code
stringlengths 1
199k
|
|---|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.file_utils import is_datasets_available, is_faiss_available, is_torch_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class RagTokenizerTest(TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
self.retrieval_vector_size = 8
# DPR tok
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer")
os.makedirs(dpr_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
self.special_tokens_map = {"unk_token": "<unk>"}
bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer")
os.makedirs(bart_tokenizer_path, exist_ok=True)
self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer"))
def get_bart_tokenizer(self) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer"))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def test_save_load_pretrained_with_saved_config(self):
save_dir = os.path.join(self.tmpdirname, "rag_tokenizer")
rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict())
rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer())
rag_config.save_pretrained(save_dir)
rag_tokenizer.save_pretrained(save_dir)
new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config)
self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast)
self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab())
@slow
def test_pretrained_token_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
@slow
def test_pretrained_sequence_nq_tokenizer(self):
tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
input_strings = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
input_dict = tokenizer(input_strings)
self.assertIsNotNone(input_dict)
|
import copy
import unittest
import numpy as np
from transformers.file_utils import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import IterableDataset
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.tokenization_utils_base import BatchEncoding
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
get_parameter_names,
)
class TstLayer(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.linear1 = nn.Linear(hidden_size, hidden_size)
self.ln1 = nn.LayerNorm(hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.ln2 = nn.LayerNorm(hidden_size)
self.bias = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
h = self.ln1(nn.functional.relu(self.linear1(x)))
h = nn.functional.relu(self.linear2(x))
return self.ln2(x + h + self.bias)
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
self.generator = torch.Generator()
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
number = torch.rand(1, generator=self.generator).item()
stop = number < self.p_stop
@require_torch
class TrainerUtilsTest(unittest.TestCase):
def test_distributed_tensor_gatherer(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays(predictions[indices])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result, predictions))
# With nested tensors
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices in input_indices:
gatherer.add_arrays([predictions[indices], [predictions[indices], predictions[indices]]])
result = gatherer.finalize()
self.assertTrue(isinstance(result, list))
self.assertTrue(len(result), 2)
self.assertTrue(isinstance(result[1], list))
self.assertTrue(len(result[1]), 2)
self.assertTrue(np.array_equal(result[0], predictions))
self.assertTrue(np.array_equal(result[1][0], predictions))
self.assertTrue(np.array_equal(result[1][1], predictions))
def test_distributed_tensor_gatherer_different_shapes(self):
# Simulate a result with a dataset of size 21, 4 processes and chunks of lengths 2, 3, 1
world_size = 4
num_samples = 21
input_indices = [
[0, 1, 6, 7, 12, 13, 18, 19],
[2, 3, 4, 8, 9, 10, 14, 15, 16, 20, 0, 1],
[5, 11, 17, 2],
]
sequence_lengths = [8, 10, 13]
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays(predictions[indices, :seq_length])
result = gatherer.finalize()
# Remove the extra samples added at the end for a round multiple of num processes.
actual_indices = [input_indices[0], input_indices[1][:-2], input_indices[2][:-1]]
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[indices, :seq_length], predictions[indices, :seq_length]))
# With nested tensors
predictions = np.random.normal(size=(num_samples, 13))
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices, :seq_length], predictions[indices]])
result = gatherer.finalize()
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[0][indices, :seq_length], predictions[indices, :seq_length]))
self.assertTrue(np.array_equal(result[1], predictions))
# Check if works if varying seq_length is second
gatherer = DistributedTensorGatherer(world_size=world_size, num_samples=num_samples)
for indices, seq_length in zip(input_indices, sequence_lengths):
gatherer.add_arrays([predictions[indices], predictions[indices, :seq_length]])
result = gatherer.finalize()
self.assertTrue(np.array_equal(result[0], predictions))
for indices, seq_length in zip(actual_indices, sequence_lengths):
self.assertTrue(np.array_equal(result[1][indices, :seq_length], predictions[indices, :seq_length]))
def test_label_smoothing(self):
epsilon = 0.1
num_labels = 12
random_logits = torch.randn(4, 5, num_labels)
random_labels = torch.randint(0, num_labels, (4, 5))
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.mean()
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
# With a few -100 labels
random_labels[0, 1] = -100
random_labels[2, 1] = -100
random_labels[2, 3] = -100
loss = nn.functional.cross_entropy(random_logits.view(-1, num_labels), random_labels.view(-1))
model_output = SequenceClassifierOutput(logits=random_logits)
label_smoothed_loss = LabelSmoother(0.1)(model_output, random_labels)
log_probs = -nn.functional.log_softmax(random_logits, dim=-1)
# Mask the log probs with the -100 labels
log_probs[0, 1] = 0.0
log_probs[2, 1] = 0.0
log_probs[2, 3] = 0.0
expected_loss = (1 - epsilon) * loss + epsilon * log_probs.sum() / (num_labels * 17)
self.assertTrue(torch.allclose(label_smoothed_loss, expected_loss))
def test_group_by_length(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices = list(LengthGroupedSampler(4, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(list(sorted(indices)), list(range(100)))
def test_group_by_length_with_dict(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append({"input_ids": input_ids})
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(list(sorted(indices)), list(range(6)))
def test_group_by_length_with_batch_encoding(self):
# Get some inputs of random lengths
data = []
for _ in range(6):
input_ids = torch.randint(0, 25, (100,)).tolist()
data.append(BatchEncoding({"input_ids": input_ids}))
# Put one bigger than the others to check it ends up in first position
data[3]["input_ids"] = torch.randint(0, 25, (105,)).tolist()
indices = list(LengthGroupedSampler(4, dataset=data))
# The biggest element should be first
self.assertEqual(len(data[indices[0]]["input_ids"]), 105)
# The indices should be a permutation of range(6)
self.assertEqual(list(sorted(indices)), list(range(6)))
def test_distributed_length_grouped(self):
# Get some inputs of random lengths
lengths = torch.randint(0, 25, (100,)).tolist()
# Put one bigger than the others to check it ends up in first position
lengths[32] = 50
indices_process_0 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=0, lengths=lengths))
indices_process_1 = list(DistributedLengthGroupedSampler(4, num_replicas=2, rank=1, lengths=lengths))
# The biggest element should be first
self.assertEqual(lengths[indices_process_0[0]], 50)
# The indices should be a permutation of range(100)
self.assertEqual(list(sorted(indices_process_0 + indices_process_1)), list(range(100)))
def test_get_parameter_names(self):
model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)]))
# fmt: off
self.assertEqual(
get_parameter_names(model, [nn.LayerNorm]),
['0.linear1.weight', '0.linear1.bias', '0.linear2.weight', '0.linear2.bias', '0.bias', '1.0.linear1.weight', '1.0.linear1.bias', '1.0.linear2.weight', '1.0.linear2.bias', '1.0.bias', '1.1.linear1.weight', '1.1.linear1.bias', '1.1.linear2.weight', '1.1.linear2.bias', '1.1.bias']
)
# fmt: on
def test_distributed_sampler_with_loop(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=0)
shard2 = DistributedSamplerWithLoop(dataset, batch_size, num_replicas=2, rank=1)
# Set seeds
shard1.set_epoch(0)
shard2.set_epoch(0)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = []
for sample1, sample2 in zip(samples1, samples2):
total += [sample1, sample2]
self.assertEqual(set(total[:length]), set(dataset))
self.assertEqual(set(total[length:]), set(total[: (len(total) - length)]))
def test_sequential_distributed_sampler(self):
batch_size = 16
for length in [23, 64, 123]:
dataset = list(range(length))
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
# With a batch_size passed
shard1 = SequentialDistributedSampler(dataset, num_replicas=2, rank=0, batch_size=batch_size)
shard2 = SequentialDistributedSampler(dataset, num_replicas=2, rank=1, batch_size=batch_size)
# Sample
samples1 = list(shard1)
samples2 = list(shard2)
self.assertTrue(len(samples1) % batch_size == 0)
self.assertTrue(len(samples2) % batch_size == 0)
total = samples1 + samples2
self.assertListEqual(total[:length], dataset)
self.assertListEqual(total[length:], dataset[: (len(total) - length)])
def check_iterable_dataset_shard(self, dataset, batch_size, drop_last, num_processes=2, epoch=0):
# Set the seed for the base dataset to get the proper reference.
dataset.generator.manual_seed(epoch)
reference = list(dataset)
shards = [
IterableDatasetShard(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard in shards:
shard.set_epoch(epoch)
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
for shard in shards:
# All shards know the total number of samples
self.assertEqual(shard.num_examples, len(reference))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
# Check equivalence between IterableDataset and ShardSampler
dataset.generator.manual_seed(epoch)
reference = list(dataset)
sampler_shards = [
ShardSampler(
reference, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
for shard, sampler_shard in zip(shard_lists, sampler_shards):
self.assertListEqual(shard, list(sampler_shard))
def test_iterable_dataset_shard(self):
dataset = RandomIterableDataset()
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=2, epoch=0)
self.check_iterable_dataset_shard(dataset, 4, drop_last=True, num_processes=3, epoch=42)
self.check_iterable_dataset_shard(dataset, 4, drop_last=False, num_processes=3, epoch=42)
def test_iterable_dataset_shard_with_length(self):
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=True, num_processes=2, process_index=i)
for i in range(2)
]
# Build expected shards: each process will have batches of size 4 until there is not enough elements to
# form two full batches (so we stop at 96 = (100 // (4 * 2)) * 4)
expected_shards = [[], []]
current_shard = 0
for i in range(0, 96, 4):
expected_shards[current_shard].extend(list(range(i, i + 4)))
current_shard = 1 - current_shard
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
sampler_shards = [
IterableDatasetShard(list(range(100)), batch_size=4, drop_last=False, num_processes=2, process_index=i)
for i in range(2)
]
# When drop_last=False, we get two last full batches by looping back to the beginning.
expected_shards[0].extend(list(range(96, 100)))
expected_shards[1].extend(list(range(0, 4)))
self.assertListEqual([list(shard) for shard in sampler_shards], expected_shards)
self.assertListEqual([len(shard) for shard in sampler_shards], [len(shard) for shard in expected_shards])
def check_shard_sampler(self, dataset, batch_size, drop_last, num_processes=2):
shards = [
ShardSampler(
dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i
)
for i in range(num_processes)
]
shard_lists = [list(shard) for shard in shards]
for shard in shard_lists:
# All shards have a number of samples that is a round multiple of batch size
self.assertTrue(len(shard) % batch_size == 0)
# All shards have the same number of samples
self.assertEqual(len(shard), len(shard_lists[0]))
observed = []
for idx in range(0, len(shard_lists[0]), batch_size):
for shard in shard_lists:
observed += shard[idx : idx + batch_size]
# If drop_last is False we loop through samples at the beginning to have a size that is a round multiple of
# batch_size
reference = copy.copy(dataset)
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_shard_sampler(self):
for n_elements in [64, 123]:
dataset = list(range(n_elements))
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=2)
self.check_shard_sampler(dataset, 4, drop_last=True, num_processes=3)
self.check_shard_sampler(dataset, 4, drop_last=False, num_processes=3)
|
from models.RepPoints.builder import RepPoints as Detector
from models.dcn.builder import DCNResNetFPN as Backbone
from models.RepPoints.builder import RepPointsNeck as Neck
from models.RepPoints.builder import RepPointsHead as Head
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = False
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=8, wd_mult=1.0)
normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
# normalizer = NormalizeParam.normalizer
normalizer = normalizer_factory(type="fixbn")
depth = 101
num_c3_block = 0
num_c4_block = 3
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class HeadParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
class point_generate:
num_points = 9
scale = 4
stride = (8, 16, 32, 64, 128)
transform = "moment"
class head:
conv_channel = 256
point_conv_channel = 256
mean = None
std = None
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
class point_target:
target_scale = 4
num_pos = 1
class bbox_target:
pos_iou_thr = 0.5
neg_iou_thr = 0.5
min_pos_iou = 0.0
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class regress_target:
class_agnostic = None
mean = None
std = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
head = Head(HeadParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
excluded_param = ["gn"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 35
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.005 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image / 3
iter = 2000
class TestScaleParam:
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
@staticmethod
def add_resize_info(roidb):
ms_roidb = []
for r_ in roidb:
for short, long in zip(TestScaleParam.short_ranges, TestScaleParam.long_ranges):
r = r_.copy()
r["resize_long"] = long
r["resize_short"] = short
ms_roidb.append(r)
return ms_roidb
class TestParam:
min_det_score = 0.05 # filter appended boxes
max_det_per_image = 100
process_roidb = TestScaleParam.add_resize_info
def process_output(x, y):
return x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class RandResizeParam:
short = None # generate on the fly
long = None
short_ranges = [600, 800, 1000, 1200]
long_ranges = [2000, 2000, 2000, 2000]
class RandCropParam:
mode = "center" # random or center
short = 800
long = 1333
class ResizeParam:
short = 800
long = 1333
class PadParam:
short = 800
long = 1333
max_num_gt = 100
class RandPadParam:
short = 1200
long = 2000
max_num_gt = 100
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, \
RandResize2DImageBbox, RandCrop2DImageBbox, Resize2DImageByRoidb, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.retinanet.input import Norm2DImage
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
RandResize2DImageBbox(RandResizeParam),
RandCrop2DImageBbox(RandCropParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
# Resize2DImageBbox(ResizeParam),
Resize2DImageByRoidb(),
Pad2DImageBbox(RandPadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric as cls_metric
import core.detection_metric as box_metric
cls_acc_metric = cls_metric.FGAccMetric(
"FGAcc",
["cls_loss_output", "point_refine_labels_output"],
[]
)
box_init_l1_metric = box_metric.L1(
"InitL1",
["pts_init_loss_output", "points_init_labels_output"],
[]
)
box_refine_l1_metric = box_metric.L1(
"RefineL1",
["pts_refine_loss_output", "point_refine_labels_output"],
[]
)
metric_list = [cls_acc_metric, box_init_l1_metric, box_refine_l1_metric]
return General, KvstoreParam, HeadParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
|
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.shortcuts import redirect
from main.models import Link
from main.models import Tag
def index(request):
context = RequestContext(request)
links = Link.objects.all()
return render_to_response('main/index.html', {'links': links}, context)
def tags(request):
context = RequestContext(request)
tags = Tag.objects.all()
return render_to_response('main/tags.html', {'tags': tags}, context)
def tag(request, tag_name):
context = RequestContext(request)
the_tag = Tag.objects.get(name=tag_name)
links=the_tag.link_set.all()
return render_to_response('main/index.html',{'links':links, 'tag_name': '#' + tag_name}, context)
def add_link(request):
context = RequestContext(request)
if request.method == 'POST':
url = request.POST.get("url","")
tags = request.POST.get("tags","")
title = request.POST.get("title","")
tags = tags.split(',')
l = Link.objects.get_or_create(title=title, url=url)[0]
for x in tags:
l.tags.add(Tag.objects.get_or_create(name=x)[0])
return redirect(index)
|
import os
import time
import uuid
import concurrent.futures
from oslo_config import cfg
import six.moves
from testtools import matchers
import oslo_messaging
from oslo_messaging.tests.functional import utils
class CallTestCase(utils.SkipIfNoTransportURL):
def setUp(self):
super(CallTestCase, self).setUp(conf=cfg.ConfigOpts())
if self.url.startswith("kafka://"):
self.skipTest("kafka does not support RPC API")
self.conf.prog = "test_prog"
self.conf.project = "test_project"
self.config(heartbeat_timeout_threshold=0,
group='oslo_messaging_rabbit')
def test_specific_server(self):
group = self.useFixture(utils.RpcServerGroupFixture(
self.conf, self.url)
)
client = group.client(1)
client.append(text='open')
self.assertEqual('openstack', client.append(text='stack'))
client.add(increment=2)
self.assertEqual(12, client.add(increment=10))
self.assertEqual(9, client.subtract(increment=3))
self.assertEqual('openstack', group.servers[1].endpoint.sval)
self.assertEqual(9, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client()
data = [c for c in 'abcdefghijklmn']
for i in data:
client.append(text=i)
for s in group.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
actual = [[c for c in s.endpoint.sval] for s in group.servers]
self.assertThat(actual, utils.IsValidDistributionOf(data))
def test_different_exchanges(self):
# If the different exchanges are not honoured, then the
# teardown may hang unless we broadcast all control messages
# to each server
group1 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url,
use_fanout_ctrl=True))
group2 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url, exchange="a",
use_fanout_ctrl=True))
group3 = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url, exchange="b",
use_fanout_ctrl=True))
client1 = group1.client(1)
data1 = [c for c in 'abcdefghijklmn']
for i in data1:
client1.append(text=i)
client2 = group2.client()
data2 = [c for c in 'opqrstuvwxyz']
for i in data2:
client2.append(text=i)
actual1 = [[c for c in s.endpoint.sval] for s in group1.servers]
self.assertThat(actual1, utils.IsValidDistributionOf(data1))
actual1 = [c for c in group1.servers[1].endpoint.sval]
self.assertThat([actual1], utils.IsValidDistributionOf(data1))
for s in group1.servers:
expected = len(data1) if group1.servers.index(s) == 1 else 0
self.assertEqual(expected, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
actual2 = [[c for c in s.endpoint.sval] for s in group2.servers]
for s in group2.servers:
self.assertThat(len(s.endpoint.sval), matchers.GreaterThan(0))
self.assertEqual(0, s.endpoint.ival)
self.assertThat(actual2, utils.IsValidDistributionOf(data2))
for s in group3.servers:
self.assertEqual(0, len(s.endpoint.sval))
self.assertEqual(0, s.endpoint.ival)
def test_timeout(self):
transport = self.useFixture(
utils.TransportFixture(self.conf, self.url)
)
target = oslo_messaging.Target(topic="no_such_topic")
c = utils.ClientStub(transport.transport, target, timeout=1)
self.assertThat(c.ping,
matchers.raises(oslo_messaging.MessagingTimeout))
def test_exception(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(1)
client.add(increment=2)
self.assertRaises(ValueError, client.subtract, increment=3)
def test_timeout_with_concurrently_queues(self):
transport = self.useFixture(
utils.TransportFixture(self.conf, self.url)
)
target = oslo_messaging.Target(topic="topic_" + str(uuid.uuid4()),
server="server_" + str(uuid.uuid4()))
server = self.useFixture(
utils.RpcServerFixture(self.conf, self.url, target,
executor="threading"))
client = utils.ClientStub(transport.transport, target,
cast=False, timeout=5)
def short_periodical_tasks():
for i in range(10):
client.add(increment=1)
time.sleep(1)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future = executor.submit(client.long_running_task, seconds=10)
executor.submit(short_periodical_tasks)
self.assertRaises(oslo_messaging.MessagingTimeout, future.result)
self.assertEqual(10, server.endpoint.ival)
class CastTestCase(utils.SkipIfNoTransportURL):
# Note: casts return immediately, so these tests utilise a special
# internal sync() cast to ensure prior casts are complete before
# making the necessary assertions.
def setUp(self):
super(CastTestCase, self).setUp()
if self.url.startswith("kafka://"):
self.skipTest("kafka does not support RPC API")
def test_specific_server(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(1, cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
time.sleep(0.3)
client.sync()
group.sync(1)
self.assertIn(group.servers[1].endpoint.sval,
["openstack", "stackopen"])
self.assertEqual(12, group.servers[1].endpoint.ival)
for i in [0, 2]:
self.assertEqual('', group.servers[i].endpoint.sval)
self.assertEqual(0, group.servers[i].endpoint.ival)
def test_server_in_group(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client(cast=True)
for i in range(20):
client.add(increment=1)
for i in range(len(group.servers)):
# expect each server to get a sync
client.sync()
group.sync(server="all")
total = 0
for s in group.servers:
ival = s.endpoint.ival
self.assertThat(ival, matchers.GreaterThan(0))
self.assertThat(ival, matchers.LessThan(20))
total += ival
self.assertEqual(20, total)
def test_fanout(self):
group = self.useFixture(
utils.RpcServerGroupFixture(self.conf, self.url)
)
client = group.client('all', cast=True)
client.append(text='open')
client.append(text='stack')
client.add(increment=2)
client.add(increment=10)
time.sleep(0.3)
client.sync()
group.sync(server='all')
for s in group.servers:
self.assertIn(s.endpoint.sval, ["openstack", "stackopen"])
self.assertEqual(12, s.endpoint.ival)
class NotifyTestCase(utils.SkipIfNoTransportURL):
# NOTE(sileht): Each test must not use the same topics
# to be run in parallel
def test_simple(self):
listener = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test_simple']))
notifier = listener.notifier('abc')
notifier.info({}, 'test', 'Hello World!')
event = listener.events.get(timeout=1)
self.assertEqual('info', event[0])
self.assertEqual('test', event[1])
self.assertEqual('Hello World!', event[2])
self.assertEqual('abc', event[3])
def test_multiple_topics(self):
listener = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['a', 'b']))
a = listener.notifier('pub-a', topic='a')
b = listener.notifier('pub-b', topic='b')
sent = {
'pub-a': [a, 'test-a', 'payload-a'],
'pub-b': [b, 'test-b', 'payload-b']
}
for e in sent.values():
e[0].info({}, e[1], e[2])
received = {}
while len(received) < len(sent):
e = listener.events.get(timeout=1)
received[e[3]] = e
for key in received:
actual = received[key]
expected = sent[key]
self.assertEqual('info', actual[0])
self.assertEqual(expected[1], actual[1])
self.assertEqual(expected[2], actual[2])
def test_multiple_servers(self):
if self.url.startswith("amqp:"):
self.skipTest("QPID-6307")
if self.url.startswith("zmq"):
self.skipTest("ZeroMQ-PUB-SUB")
if self.url.startswith("kafka"):
self.skipTest("Kafka: Need to be fixed")
listener_a = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test-topic']))
listener_b = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['test-topic']))
n = listener_a.notifier('pub')
events_out = [('test-%s' % c, 'payload-%s' % c) for c in 'abcdefgh']
for event_type, payload in events_out:
n.info({}, event_type, payload)
events_in = [[(e[1], e[2]) for e in listener_a.get_events()],
[(e[1], e[2]) for e in listener_b.get_events()]]
self.assertThat(events_in, utils.IsValidDistributionOf(events_out))
for stream in events_in:
self.assertThat(len(stream), matchers.GreaterThan(0))
def test_independent_topics(self):
listener_a = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['1']))
listener_b = self.useFixture(
utils.NotificationFixture(self.conf, self.url, ['2']))
a = listener_a.notifier('pub-1', topic='1')
b = listener_b.notifier('pub-2', topic='2')
a_out = [('test-1-%s' % c, 'payload-1-%s' % c) for c in 'abcdefgh']
for event_type, payload in a_out:
a.info({}, event_type, payload)
b_out = [('test-2-%s' % c, 'payload-2-%s' % c) for c in 'ijklmnop']
for event_type, payload in b_out:
b.info({}, event_type, payload)
def check_received(listener, publisher, messages):
actuals = sorted([listener.events.get(timeout=0.5)
for __ in range(len(a_out))])
expected = sorted([['info', m[0], m[1], publisher]
for m in messages])
self.assertEqual(expected, actuals)
check_received(listener_a, "pub-1", a_out)
check_received(listener_b, "pub-2", b_out)
def test_all_categories(self):
listener = self.useFixture(utils.NotificationFixture(
self.conf, self.url, ['test_all_categories']))
n = listener.notifier('abc')
cats = ['debug', 'audit', 'info', 'warn', 'error', 'critical']
events = [(getattr(n, c), c, 'type-' + c, c + '-data') for c in cats]
for e in events:
e[0]({}, e[2], e[3])
# order between events with different categories is not guaranteed
received = {}
for expected in events:
e = listener.events.get(timeout=1)
received[e[0]] = e
for expected in events:
actual = received[expected[1]]
self.assertEqual(expected[1], actual[0])
self.assertEqual(expected[2], actual[1])
self.assertEqual(expected[3], actual[2])
def test_simple_batch(self):
if self.url.startswith("amqp:"):
backend = os.environ.get("AMQP1_BACKEND")
if backend == "qdrouterd":
# end-to-end acknowledgement with router intermediary
# sender pends until batch_size or timeout reached
self.skipTest("qdrouterd backend")
listener = self.useFixture(
utils.BatchNotificationFixture(self.conf, self.url,
['test_simple_batch'],
batch_size=100, batch_timeout=2))
notifier = listener.notifier('abc')
for i in six.moves.range(0, 205):
notifier.info({}, 'test%s' % i, 'Hello World!')
events = listener.get_events(timeout=3)
self.assertEqual(3, len(events))
self.assertEqual(100, len(events[0][1]))
self.assertEqual(100, len(events[1][1]))
self.assertEqual(5, len(events[2][1]))
|
from __future__ import annotations
from typing import *
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from edb import errors
from . import abc as s_abc
from . import constraints
from . import delta as sd
from . import indexes
from . import inheriting
from . import properties
from . import name as sn
from . import objects as so
from . import pointers
from . import referencing
from . import sources
from . import utils
if TYPE_CHECKING:
from . import objtypes as s_objtypes
from . import types as s_types
from . import schema as s_schema
LinkTargetDeleteAction = qltypes.LinkTargetDeleteAction
def merge_actions(
target: so.InheritingObject,
sources: List[so.Object],
field_name: str,
*,
ignore_local: bool = False,
schema: s_schema.Schema,
) -> Any:
if not ignore_local:
ours = target.get_explicit_local_field_value(schema, field_name, None)
else:
ours = None
if ours is None:
current = None
current_from = None
for source in sources:
theirs = source.get_explicit_field_value(schema, field_name, None)
if theirs is not None:
if current is None:
current = theirs
current_from = source
elif current != theirs:
target_source = target.get_source(schema)
current_from_source = current_from.get_source(schema)
source_source = source.get_source(schema)
tgt_repr = (
f'{target_source.get_displayname(schema)}.'
f'{target.get_displayname(schema)}'
)
cf_repr = (
f'{current_from_source.get_displayname(schema)}.'
f'{current_from.get_displayname(schema)}'
)
other_repr = (
f'{source_source.get_displayname(schema)}.'
f'{source.get_displayname(schema)}'
)
raise errors.SchemaError(
f'cannot implicitly resolve the '
f'`on target delete` action for '
f'{tgt_repr!r}: it is defined as {current} in '
f'{cf_repr!r} and as {theirs} in {other_repr!r}; '
f'to resolve, declare `on target delete` '
f'explicitly on {tgt_repr!r}'
)
return current
else:
return ours
class Link(
sources.Source,
pointers.Pointer,
s_abc.Link,
qlkind=qltypes.SchemaObjectClass.LINK,
data_safe=False,
):
on_target_delete = so.SchemaField(
LinkTargetDeleteAction,
default=LinkTargetDeleteAction.Restrict,
coerce=True,
compcoef=0.9,
merge_fn=merge_actions)
def get_target(self, schema: s_schema.Schema) -> s_objtypes.ObjectType:
return self.get_field_value( # type: ignore[no-any-return]
schema, 'target')
def is_link_property(self, schema: s_schema.Schema) -> bool:
return False
def is_property(self, schema: s_schema.Schema) -> bool:
return False
def scalar(self) -> bool:
return False
def has_user_defined_properties(self, schema: s_schema.Schema) -> bool:
return bool([p for p in self.get_pointers(schema).objects(schema)
if not p.is_special_pointer(schema)])
def get_source_type(
self,
schema: s_schema.Schema
) -> s_types.Type:
from . import types as s_types
source = self.get_source(schema)
assert isinstance(source, s_types.Type)
return source
def compare(
self,
other: so.Object,
*,
our_schema: s_schema.Schema,
their_schema: s_schema.Schema,
context: so.ComparisonContext,
) -> float:
if not isinstance(other, Link):
if isinstance(other, pointers.Pointer):
return 0.0
else:
raise NotImplementedError()
return super().compare(
other, our_schema=our_schema,
their_schema=their_schema, context=context)
def set_target(
self,
schema: s_schema.Schema,
target: s_types.Type,
) -> s_schema.Schema:
schema = super().set_target(schema, target)
tgt_prop = self.getptr(schema, sn.UnqualName('target'))
schema = tgt_prop.set_target(schema, target)
return schema
@classmethod
def get_root_classes(cls) -> Tuple[sn.QualName, ...]:
return (
sn.QualName(module='std', name='link'),
sn.QualName(module='schema', name='__type__'),
)
@classmethod
def get_default_base_name(self) -> sn.QualName:
return sn.QualName('std', 'link')
class LinkSourceCommandContext(sources.SourceCommandContext):
pass
class LinkSourceCommand(inheriting.InheritingObjectCommand[sources.Source_T]):
pass
class LinkCommandContext(pointers.PointerCommandContext[Link],
constraints.ConsistencySubjectCommandContext,
properties.PropertySourceContext,
indexes.IndexSourceCommandContext):
pass
class LinkCommand(
properties.PropertySourceCommand[Link],
pointers.PointerCommand[Link],
context_class=LinkCommandContext,
referrer_context_class=LinkSourceCommandContext,
):
def _append_subcmd_ast(
self,
schema: s_schema.Schema,
node: qlast.DDLOperation,
subcmd: sd.Command,
context: sd.CommandContext,
) -> None:
if (
isinstance(subcmd, pointers.PointerCommand)
and subcmd.classname != self.classname
):
pname = sn.shortname_from_fullname(subcmd.classname)
if pname.name in {'source', 'target'}:
return
super()._append_subcmd_ast(schema, node, subcmd, context)
def validate_object(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
"""Check that link definition is sound."""
super().validate_object(schema, context)
scls = self.scls
assert isinstance(scls, Link)
if not scls.get_owned(schema):
return
target = scls.get_target(schema)
assert target is not None
if not target.is_object_type():
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link target type, expected object type, got '
f'{target.get_verbosename(schema)}',
context=srcctx,
)
if target.is_free_object_type(schema):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'{target.get_verbosename(schema)} is not a valid link target',
context=srcctx,
)
if (
not scls.is_pure_computable(schema)
and not scls.get_from_alias(schema)
and target.is_view(schema)
):
srcctx = self.get_attribute_source_context('target')
raise errors.InvalidLinkTargetError(
f'invalid link type: {target.get_displayname(schema)!r}'
f' is an expression alias, not a proper object type',
context=srcctx,
)
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
node = super()._get_ast(schema, context, parent_node=parent_node)
# __type__ link is special, and while it exists on every object
# it does not have a defined default in the schema (and therefore
# it isn't marked as required.) We intervene here to mark all
# __type__ links required when rendering for SDL/TEXT.
if context.declarative and node is not None:
assert isinstance(node, (qlast.CreateConcreteLink,
qlast.CreateLink))
if node.name.name == '__type__':
assert isinstance(node, qlast.CreateConcretePointer)
node.is_required = True
return node
def _reinherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> Tuple[s_schema.Schema,
Dict[sn.Name, Type[sd.ObjectCommand[so.Object]]]]:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return schema, {}
return super()._reinherit_classref_dict(schema, context, refdict)
class CreateLink(
pointers.CreatePointer[Link],
LinkCommand,
):
astnode = [qlast.CreateConcreteLink, qlast.CreateLink]
referenced_astnode = qlast.CreateConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
if isinstance(astnode, qlast.CreateConcreteLink):
assert isinstance(cmd, pointers.PointerCommand)
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
# this is an abstract property then
if cmd.get_attribute_value('default') is not None:
raise errors.SchemaDefinitionError(
f"'default' is not a valid field for an abstract link",
context=astnode.context)
assert isinstance(cmd, sd.Command)
return cmd
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if (
field == 'required'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'is_required'
elif (
field == 'cardinality'
and issubclass(astnode, qlast.CreateConcreteLink)
):
return 'cardinality'
else:
return super().get_ast_attr_for_field(field, astnode)
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
objtype = self.get_referrer_context(context)
if op.property == 'target' and objtype:
# Due to how SDL is processed the underlying AST may be an
# AlterConcreteLink, which requires different handling.
if isinstance(node, qlast.CreateConcreteLink):
if not node.target:
expr = self.get_attribute_value('expr')
if expr is not None:
node.target = expr.qlast
else:
t = op.new_value
assert isinstance(t, (so.Object, so.ObjectShell))
node.target = utils.typeref_to_ast(schema, t)
else:
old_type = pointers.merge_target(
self.scls,
list(self.scls.get_bases(schema).objects(schema)),
'target',
ignore_local=True,
schema=schema,
)
assert isinstance(op.new_value, (so.Object, so.ObjectShell))
new_type = (
op.new_value.resolve(schema)
if isinstance(op.new_value, so.ObjectShell)
else op.new_value)
new_type_ast = utils.typeref_to_ast(schema, op.new_value)
cast_expr = None
# If the type isn't assignment castable, generate a
# USING with a nonsense cast. It shouldn't matter,
# since there should be no data to cast, but the DDL side
# of things doesn't know that since the command is split up.
if old_type and not old_type.assignment_castable_to(
new_type, schema):
cast_expr = qlast.TypeCast(
type=new_type_ast,
expr=qlast.Set(elements=[]),
)
node.commands.append(
qlast.SetPointerType(
value=new_type_ast,
cast_expr=cast_expr,
)
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
def inherit_classref_dict(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
refdict: so.RefDict,
) -> sd.CommandGroup:
if self.scls.get_computable(schema) and refdict.attr != 'pointers':
# If the link is a computable, the inheritance would only
# happen in the case of aliasing, and in that case we only
# need to inherit the link properties and nothing else.
return sd.CommandGroup()
cmd = super().inherit_classref_dict(schema, context, refdict)
if refdict.attr != 'pointers':
return cmd
parent_ctx = self.get_referrer_context(context)
if parent_ctx is None:
return cmd
base_prop_name = sn.QualName('std', 'source')
s_name = sn.get_specialized_name(
sn.QualName('__', 'source'), str(self.classname))
src_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
src_prop = properties.CreateProperty(
classname=src_prop_name,
is_strong_ref=True,
)
src_prop.set_attribute_value('name', src_prop_name)
src_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
src_prop.set_attribute_value(
'source',
self.scls,
)
src_prop.set_attribute_value(
'target',
parent_ctx.op.scls,
)
src_prop.set_attribute_value('required', True)
src_prop.set_attribute_value('readonly', True)
src_prop.set_attribute_value('owned', True)
src_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
src_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(src_prop)
base_prop_name = sn.QualName('std', 'target')
s_name = sn.get_specialized_name(
sn.QualName('__', 'target'), str(self.classname))
tgt_prop_name = sn.QualName(
name=s_name, module=self.classname.module)
tgt_prop = properties.CreateProperty(
classname=tgt_prop_name,
is_strong_ref=True,
)
tgt_prop.set_attribute_value('name', tgt_prop_name)
tgt_prop.set_attribute_value(
'bases',
so.ObjectList.create(schema, [schema.get(base_prop_name)]),
)
tgt_prop.set_attribute_value(
'source',
self.scls,
)
tgt_prop.set_attribute_value(
'target',
self.get_attribute_value('target'),
)
tgt_prop.set_attribute_value('required', False)
tgt_prop.set_attribute_value('readonly', True)
tgt_prop.set_attribute_value('owned', True)
tgt_prop.set_attribute_value('from_alias',
self.scls.get_from_alias(schema))
tgt_prop.set_attribute_value('cardinality',
qltypes.SchemaCardinality.One)
cmd.prepend(tgt_prop)
return cmd
class RenameLink(
LinkCommand,
referencing.RenameReferencedInheritingObject[Link],
):
pass
class RebaseLink(
LinkCommand,
referencing.RebaseReferencedInheritingObject[Link],
):
pass
class SetLinkType(
pointers.SetPointerType[Link],
referrer_context_class=LinkSourceCommandContext,
field='target',
):
def _alter_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._alter_begin(schema, context)
scls = self.scls
new_target = scls.get_target(schema)
if not context.canonical:
# We need to update the target link prop as well
tgt_prop = scls.getptr(schema, sn.UnqualName('target'))
tgt_prop_alter = tgt_prop.init_delta_command(
schema, sd.AlterObject)
tgt_prop_alter.set_attribute_value('target', new_target)
self.add(tgt_prop_alter)
return schema
class AlterLinkUpperCardinality(
pointers.AlterPointerUpperCardinality[Link],
referrer_context_class=LinkSourceCommandContext,
field='cardinality',
):
pass
class AlterLinkLowerCardinality(
pointers.AlterPointerLowerCardinality[Link],
referrer_context_class=LinkSourceCommandContext,
field='required',
):
pass
class AlterLinkOwned(
referencing.AlterOwned[Link],
pointers.PointerCommandOrFragment[Link],
referrer_context_class=LinkSourceCommandContext,
field='owned',
):
pass
class SetTargetDeletePolicy(sd.Command):
astnode = qlast.OnTargetDelete
@classmethod
def _cmd_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.AlterObjectProperty:
return sd.AlterObjectProperty(
property='on_target_delete'
)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
assert isinstance(astnode, qlast.OnTargetDelete)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, sd.AlterObjectProperty)
cmd.new_value = astnode.cascade
return cmd
class AlterLink(
LinkCommand,
pointers.AlterPointer[Link],
):
astnode = [qlast.AlterConcreteLink, qlast.AlterLink]
referenced_astnode = qlast.AlterConcreteLink
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> AlterLink:
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
assert isinstance(cmd, AlterLink)
if isinstance(astnode, qlast.CreateConcreteLink):
cmd._process_create_or_alter_ast(schema, astnode, context)
else:
cmd._process_alter_ast(schema, astnode, context)
return cmd
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
if op.property == 'target':
if op.new_value:
assert isinstance(op.new_value, so.ObjectShell)
node.commands.append(
qlast.SetPointerType(
value=utils.typeref_to_ast(schema, op.new_value),
),
)
elif op.property == 'computable':
if not op.new_value:
node.commands.append(
qlast.SetField(
name='expr',
value=None,
special_syntax=True,
),
)
elif op.property == 'on_target_delete':
node.commands.append(qlast.OnTargetDelete(cascade=op.new_value))
else:
super()._apply_field_ast(schema, context, node, op)
class DeleteLink(
LinkCommand,
pointers.DeletePointer[Link],
):
astnode = [qlast.DropConcreteLink, qlast.DropLink]
referenced_astnode = qlast.DropConcreteLink
# NB: target type cleanup (e.g. target compound type) is done by
# the DeleteProperty handler for the @target property.
def _get_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
parent_node: Optional[qlast.DDLOperation] = None,
) -> Optional[qlast.DDLOperation]:
if self.get_orig_attribute_value('from_alias'):
# This is an alias type, appropriate DDL would be generated
# from the corresponding Alter/DeleteAlias node.
return None
else:
return super()._get_ast(schema, context, parent_node=parent_node)
|
import bisect
import string
from abc import ABC, abstractmethod
from typing import Optional
from django.conf import settings
class AbstractGrid(ABC):
enabled = False
@abstractmethod
def get_square_for_point(self, x, y) -> Optional[str]:
pass
@abstractmethod
def get_squares_for_bounds(self, bounds) -> Optional[str]:
pass
class Grid(AbstractGrid):
enabled = True
def __init__(self, rows, cols):
rows = tuple(float(y) for y in rows)
cols = tuple(float(x) for x in cols)
self.rows = tuple(sorted(rows))
self.cols = tuple(sorted(cols))
if self.rows == rows:
self.invert_y = False
elif self.rows == tuple(reversed(rows)):
self.invert_y = True
else:
raise ValueError('row coordinates are not ordered')
if self.cols == cols:
self.invert_x = False
elif self.cols == tuple(reversed(cols)):
self.invert_x = True
else:
raise ValueError('column coordinates are not ordered')
def get_square_for_point(self, x, y):
x = bisect.bisect(self.cols, x)
if x <= 0 or x >= len(self.cols):
return None
y = bisect.bisect(self.rows, y)
if y <= 0 or y >= len(self.rows):
return None
if self.invert_x:
x = len(self.cols) - x
if self.invert_y:
y = len(self.rows) - y
return '%s%d' % (string.ascii_uppercase[x-1], y)
def get_squares_for_bounds(self, bounds):
minx, miny, maxx, maxy = bounds
if self.invert_x:
minx, maxx = maxx, minx
if self.invert_y:
miny, maxy = maxy, miny
min_square = self.get_square_for_point(minx, miny)
max_square = self.get_square_for_point(maxx, maxy)
if not min_square or not max_square:
return None
if min_square == max_square:
return min_square
return '%s-%s' % (min_square, max_square)
class DummyGrid(AbstractGrid):
def get_square_for_point(self, x, y):
return None
def get_squares_for_bounds(self, bounds):
return None
if settings.GRID_COLS and settings.GRID_ROWS:
grid = Grid(settings.GRID_ROWS.split(','), settings.GRID_COLS.split(','))
else:
grid = DummyGrid()
|
from model.contact import Contact
from random import randrange
def test_edit_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name ="Sabina", last_name="test", company="Pewex",
address="osiedle", phone_home="123456789", e_mail="sabina@sabina.pl",
year="2016",))
old_contact = db.get_contact_list()
index = randrange(len(old_contact))
contact = Contact(first_name='Kasia', last_name='Bober')
contact.id = old_contact[index].id
app.contact.edit_contact_by_index(index, contact)
assert len(old_contact) == app.contact.count()
new_contact = db.get_contact_list()
old_contact[index] = contact
assert old_contact == new_contact
if check_ui:
assert sorted(new_contact, key=Contact.id_or_max) == sorted(
app.group.get_contact_list(), key=Contact.id_or_max
)
|
import os
import socket
from unittest import mock
from oslotest import base as test_base
from oslo_service import systemd
class SystemdTestCase(test_base.BaseTestCase):
"""Test case for Systemd service readiness."""
def test__abstractify(self):
sock_name = '@fake_socket'
res = systemd._abstractify(sock_name)
self.assertEqual('\0{0}'.format(sock_name[1:]), res)
@mock.patch.object(os, 'getenv', return_value='@fake_socket')
def _test__sd_notify(self, getenv_mock, unset_env=False):
self.ready = False
self.closed = False
class FakeSocket(object):
def __init__(self, family, type):
pass
def connect(fs, socket):
pass
def close(fs):
self.closed = True
def sendall(fs, data):
if data == b'READY=1':
self.ready = True
with mock.patch.object(socket, 'socket', new=FakeSocket):
if unset_env:
systemd.notify_once()
else:
systemd.notify()
self.assertTrue(self.ready)
self.assertTrue(self.closed)
def test_notify(self):
self._test__sd_notify()
def test_notify_once(self):
os.environ['NOTIFY_SOCKET'] = '@fake_socket'
self._test__sd_notify(unset_env=True)
self.assertRaises(KeyError, os.environ.__getitem__, 'NOTIFY_SOCKET')
@mock.patch("socket.socket")
def test_onready(self, sock_mock):
recv_results = [b'READY=1', '', socket.timeout]
expected_results = [0, 1, 2]
for recv, expected in zip(recv_results, expected_results):
if recv == socket.timeout:
sock_mock.return_value.recv.side_effect = recv
else:
sock_mock.return_value.recv.return_value = recv
actual = systemd.onready('@fake_socket', 1)
self.assertEqual(expected, actual)
|
class Solution:
def dailyTemperatures(self, T):
ans = []
m = [None]*101
for i in range(len(T)-1, -1, -1):
x = T[i]
m[x] = i
ans.append(min([x for x in m[x+1:] if x is not None], default=i)-i)
ans.reverse()
return ans
print(Solution().dailyTemperatures([73, 74, 75, 71, 69, 72, 76, 73]))
|
"""
Print the number of bases in a nib file.
usage: %prog nib_file
"""
from bx.seq import nib as seq_nib
import sys
nib = seq_nib.NibFile( file( sys.argv[1] ) )
print nib.length
|
'''
Created on Feb 3, 2013
@author: bpurgaso
'''
from twisted.words.protocols import irc
from twisted.internet import protocol
from twisted.internet import reactor
from twisted.internet import threads
from ConfigManager import ConfigManager
from Authenticator import Authenticator
from subprocess import PIPE, STDOUT, Popen
class bot(irc.IRCClient):
"""
irc bots, yay
"""
def _get_nickname(self):
return self.factory.nickname
nickname = property(_get_nickname)
def reloadConfig(self):
self.config = self.configManager.getConfig()
def signedOn(self):
#Initial Setup
self.configManager = self.factory.configManager
self.configManager.registerListener(self)
self.config = self.configManager.getConfig()
self.auth = self.factory.auth
print "Signed on as %s." % (self.nickname)
for i in self.config['channels'].keys():
if self.config['channels'][i]['autojoin']:
irc.IRCClient.join(self, i, self.config['channels'][i]['key'])
def joined(self, channel):
print "Joined %s." % (channel)
def irc_INVITE(self, prefix, params):
""" called by twisted,
if the bot was invited
"""
channel = params[-1].lower().replace('#', '')
if channel not in self.config['channels'].keys():
self.auth.createChannelEntry(channel)
self.join(channel, self.config['channels'][channel]['key'])
def privmsg(self, user, channel, msg):
'''
Called whenever an inbound message arrives
'''
print user, channel, msg
user = user.rsplit('!', 1)[0]
# Check to see if they're sending me a private message
if channel == self.nickname:
channel = user
index = 0
else:
index = 1
# See if the message directed at me
if msg.startswith(self.nickname + ":") or index == 0:
'''
embedded commands go here
'''
command = msg.rsplit()[index].lower()
#REGISTER
if command == 'register':
if self.auth.isUserAuthorized('register', user):
self.msg(channel, self.auth.registerUser(user, 'default'))
else:
self.msg(channel, "You aren't authorized for register.")
#PROMOTE
elif command == 'promote':
if self.auth.isUserAuthorized('promote', user):
try:
target_uname = msg.rsplit()[index + 1].lower()
target_group = msg.rsplit()[index + 2].lower()
if self.auth.getPowerOfUser(user) <=\
self.auth.getPowerOfGroup(target_group):
self.postToIRC((channel, [self.auth.registerUser(\
target_uname, target_group)]))
else:
self.postToIRC((channel, ['%s, your power level'\
' is'\
' insufficient.' % user]))
except:
self.postToIRC((channel, ['Check your formatting and'\
' try again.']))
else:
self.msg(channel, "You aren't authorized for register.")
#WHOAMI
elif command == 'whoami':
if self.auth.isUserAuthorized('whoami', user):
self.postToIRC((channel, [self.auth.whoami(user)]))
else:
self.msg(channel, "You aren't authorized for register.")
#OPME
elif command == 'opme':
if self.auth.isUserAuthorized('opme', user):
self.mode(channel, set, 'o', None, user)
else:
self.msg(channel, "You aren't authorized for opme.")
#AUTOOP
elif command == 'autoop':
if self.auth.isUserAuthorized('autoop', user):
if msg.rsplit()[2].lower() == 'on':
self.postToIRC((channel, self.auth.toggleAutoOp(\
user, channel, True)))
else:
self.postToIRC((channel, self.auth.toggleAutoOp(\
user, channel, False)))
else:
self.msg(channel, "You aren't authorized for autoop.")
#HELP
elif command == 'help':
if self.auth.isUserAuthorized('help', user):
for i in self.auth.getAvailableCommandsForUser(user):
self.msg(user, '%s: %s' %\
(i, self.auth.getHelpForCommand(i)))
self.msg(channel, 'I\'ve sent you a pm.')
else:
self.msg(channel, "You aren't authorized for help.")
#RELOAD
elif command == 'reload':
if self.auth.isUserAuthorized('reload', user):
self.configManager.reload()
self.msg(channel, "Configuration Reloaded")
if not self.auth.sanityCheck(False):
self.msg(channel, "Configuration Sanity is suspect, "\
"rolling back.")
else:
self.msg(channel, "You aren't authorized for reload.")
#KICK
elif command == 'kick':
if self.auth.isUserAuthorized('kick', user):
if self.nickname not in msg.rsplit()[index + 1:]:
for i in msg.rsplit()[index + 1:]:
self.kick(channel, i, 'Later broseph.')
else:
self.msg(channel, "Nope, not happening.")
else:
self.kick(channel, user, 'Sorry bro, nothing personal.')
else:
'''
External script execution goes here
'''
if self.auth.isUserAuthorized(msg.rsplit()[index].lower(),\
user):
#kick off the async call
#channel, command, params
self.invokeCommand(channel,\
command,\
(" ".join(msg.rsplit()[index + 1:])))
else:
self.msg(channel, "You aren't authorized for %s." %\
(command))
else:
'''
filter processing go here
'''
pass
def invokeCommand(self, channel, command, params):
tmp = threads.deferToThread(self.__shellCall, channel, command, params)
tmp.addCallback(self.postToIRC)
def __shellCall(self, channel, command, params):
command = self.sanitize(command)
params = self.sanitize(params)
command = "exec python ./bin/%s.py %s 2> /dev/null" % (command, params)
self.p = Popen(
command,
stderr=STDOUT,
stdout=PIPE,
close_fds=True,
shell=True)
out, err = self.p.communicate() # @UnusedVariable
return (channel, out.splitlines())
def sanitize(self, s):
for i in self.config['sanitize']:
s = s.replace(i, '')
return s
def postToIRC(self, tpl):
for i in tpl[1]:
self.msg(tpl[0], i)
def userJoined(self, user, channel):
channel_dict = channel.replace('#', '')
if self.config['channels'][channel_dict]['enable_autoop'] and\
user in self.config['channels'][channel_dict]['autoop']:
self.mode(channel, set, 'o', None, user)
if self.config['channels'][channel_dict]['enable_greeting']:
self.msg(channel, "%s: %s" % (user,\
self.config['channels'][channel_dict]['greeting']))
def kickedFrom(self, channel, kicker, message):
""" called by twisted,
if the bot was kicked
"""
channel = channel.replace('#', '')
if channel in self.config['channels'].keys() and\
self.config['channels'][channel]['autojoin']:
self.join(channel, self.config['channels'][channel]['key'])
self.msg(kicker, "Why would you do that to me brah?")
class botFactory(protocol.ClientFactory):
"""
Factory for producing "bot"
"""
protocol = bot
def __init__(self, channel, configManager, auth):
self.startChannel = channel
self.configManager = configManager
self.config = self.configManager.getConfig()
self.auth = auth
#required
self.nickname = self.config['nick']
def clientConnectionLost(self, connector, reason):
print "Lost connection (%s), reconnecting." % (reason)
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "Could not connect: %s" % (reason)
class Hydra(object):
'''
The big bad scary bot
'''
def __init__(self):
self.startChannel = '#hydra'
self.configManager = ConfigManager()
self.config = self.configManager.getConfig()
self.configManager.registerListener(self)
self.auth = Authenticator(self.configManager)
n = self.config['network']
p = self.config['port']
b = botFactory(self.startChannel, self.configManager, self.auth)
reactor.connectTCP(n, p, b) # @UndefinedVariable
reactor.run() # @UndefinedVariable
def reloadConfig(self):
self.config = self.configManager.getConfig()
h = Hydra()
|
"""Support for Tibber."""
import asyncio
import logging
import aiohttp
import tibber
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
from .const import DATA_HASS_CONFIG, DOMAIN
PLATFORMS = [Platform.SENSOR]
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the Tibber component."""
hass.data[DATA_HASS_CONFIG] = config
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
tibber_connection = tibber.Tibber(
access_token=entry.data[CONF_ACCESS_TOKEN],
websession=async_get_clientsession(hass),
time_zone=dt_util.DEFAULT_TIME_ZONE,
)
hass.data[DOMAIN] = tibber_connection
async def _close(event):
await tibber_connection.rt_disconnect()
entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close))
try:
await tibber_connection.update_info()
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber: %s ", err)
return False
except tibber.InvalidLogin as exp:
_LOGGER.error("Failed to login. %s", exp)
return False
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG]
)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
if unload_ok:
tibber_connection = hass.data.get(DOMAIN)
await tibber_connection.rt_disconnect()
return unload_ok
|
import pytest
from collections import OrderedDict
from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table,
split_kv_pairs, unsplit_lines, ParseException, SkipException)
SPLIT_TEST_1 = """
keyword1 = value1 # Inline comments
# Comment indented
keyword3 # Key with no separator
keyword2 = value2a=True, value2b=100M
""".strip()
SPLIT_TEST_1_OD = OrderedDict([
('keyword1', 'value1'),
('keyword3', ''),
('keyword2', 'value2a=True, value2b=100M')
])
SPLIT_TEST_2 = """
@ Comment line
keyword1: value1 @ Inline comments
keyword2 : value2a=True, value2b=100M
@ Comment indented
keyword3 @ Key with no separator
""".strip()
OFFSET_CONTENT_1 = """
data 1 line
data 2 line
""".strip()
OFFSET_CONTENT_2 = """
Warning line
Error line
data 1 line
data 2 line
Trailing line
Blank line above
Another trailing line
Yet another trailing line
Yet yet another trailing line
""".strip()
def test_split_kv_pairs():
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines())
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
kv_pairs = split_kv_pairs(SPLIT_TEST_1.splitlines(), use_partition=True, ordered=True)
assert len(kv_pairs) == 3
assert kv_pairs == SPLIT_TEST_1_OD
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':')
assert len(kv_pairs) == 2
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', filter_string='value2')
assert len(kv_pairs) == 1
assert kv_pairs == {
'keyword2': 'value2a=True, value2b=100M'
}
kv_pairs = split_kv_pairs(SPLIT_TEST_2.splitlines(), comment_char='@', split_on=':', use_partition=True)
assert len(kv_pairs) == 3
assert kv_pairs == {
'keyword1': 'value1',
'keyword2': 'value2a=True, value2b=100M',
'keyword3': ''
}
SPLIT_LINES = """
Line one
Line two part 1 \\
line two part 2\\
line two part 3
Line three
""".strip()
SPLIT_LINES_2 = """
Line one
Line two part 1 ^
line two part 2^
line two part 3
Line three^
""".strip()
SPLIT_LINES_3 = """
web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue,
RHN::Task::ErrataEngine,
RHN::Task::DailySummary, RHN::Task::SummaryPopulation,
RHN::Task::RHNProc,
RHN::Task::PackageCleanup
db_host ="""
def test_unsplit_lines():
lines = list(unsplit_lines(SPLIT_LINES.splitlines()))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three'
lines = list(unsplit_lines(SPLIT_LINES_2.splitlines(), cont_char='^'))
assert len(lines) == 3
assert lines[0] == 'Line one'
assert lines[1] == 'Line two part 1 line two part 2 line two part 3'
assert lines[2] == 'Line three' # test continuation on last line
# Test keeping continuation character on line
lines = list(unsplit_lines(
SPLIT_LINES_3.splitlines(), cont_char=',', keep_cont_char=True
))
assert len(lines) == 4
assert lines[0] == ''
assert lines[1] == 'web.default_taskmaster_tasks = RHN::Task::SessionCleanup, RHN::Task::ErrataQueue, RHN::Task::ErrataEngine, RHN::Task::DailySummary, RHN::Task::SummaryPopulation, RHN::Task::RHNProc, RHN::Task::PackageCleanup'
assert lines[2] == ''
assert lines[3] == 'db_host ='
def test_calc_offset():
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=[None]) == 0
assert calc_offset(OFFSET_CONTENT_1.splitlines(), target=['data ']) == 0
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(), target=['xdata '])
with pytest.raises(ValueError):
calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['data '],
invert_search=True)
assert calc_offset(OFFSET_CONTENT_1.splitlines(),
target=['Trailing', 'Blank', 'Another '],
invert_search=True) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=[]) == 0
assert calc_offset(OFFSET_CONTENT_2.splitlines(), target=['data ']) == 3
assert calc_offset(reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2']) == 3
assert calc_offset(OFFSET_CONTENT_2.splitlines(),
target=['data', '2'],
require_all=True) == 4
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True) == 6
assert calc_offset(
reversed(OFFSET_CONTENT_2.splitlines()),
target=['Trailing', 'Blank', 'Another ', 'Yet'],
invert_search=True,
require_all=True) == 6
FIXED_CONTENT_1 = """
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1A = """
WARNING
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_1B = """
Column1 Column2 Column3
data1 data 2
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_2 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
""".strip()
FIXED_CONTENT_3 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column2 Column3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_4 = """
WARNING WARNING WARNING
Some message
Another message
Column1 Column 2 Column 3
data1 data 2 data 3
data4 data5 data6
data 7 data 9
data10
Trailing non-data line
Another trailing non-data line
""".strip()
FIXED_CONTENT_5 = """
Column1 Column 2 Column 3
data1 data 2 data 3
data 7 data 9
data10
""".strip()
FIXED_CONTENT_DUP_HEADER_PREFIXES = """
NAMESPACE NAME LABELS
default foo app=superawesome
""".strip()
def test_parse_fixed_table():
data = parse_fixed_table(FIXED_CONTENT_1.splitlines())
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1A.splitlines(), heading_ignore=['Column1 '])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_1B.splitlines())
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': ''}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_2.splitlines(), heading_ignore=['Column1 '])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_3.splitlines(),
heading_ignore=['Column1 '],
trailing_ignore=['Trailing', 'Another'])
assert len(data) == 3
assert data[0] == {'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column2': '', 'Column3': 'data 9'}
data = parse_fixed_table(FIXED_CONTENT_4.splitlines(),
heading_ignore=['Column1 '],
header_substitute=[('Column 2', 'Column_2'), ('Column 3', 'Column_3')],
trailing_ignore=['Trailing', 'Another'])
assert len(data) == 4
assert data[0] == {'Column1': 'data1', 'Column_2': 'data 2', 'Column_3': 'data 3'}
assert data[1] == {'Column1': 'data4', 'Column_2': 'data5', 'Column_3': 'data6'}
assert data[2] == {'Column1': 'data 7', 'Column_2': '', 'Column_3': 'data 9'}
assert data[3] == {'Column1': 'data10', 'Column_2': '', 'Column_3': ''}
# Test that if we search for trailing data that is always found, then we
# should get the whole thing parsed as a table from the header line
data = parse_fixed_table(
['foo' + line for line in FIXED_CONTENT_4.splitlines()],
heading_ignore=['fooColumn1 '],
header_substitute=[('fooColumn1', 'Column1'), ('Column 2', 'Column_2'), ('Column 3', 'Column_3')],
trailing_ignore=['foo']
)
assert len(data) == 6
assert data[4] == {'Column1': 'fooTrailing', 'Column_2': 'non-data li', 'Column_3': 'ne'}
assert data[5] == {'Column1': 'foo Another', 'Column_2': 'trailing no', 'Column_3': 'n-data line'}
data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines())
assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'}
data = parse_fixed_table(FIXED_CONTENT_5.splitlines())
assert len(data) == 3
def test_parse_fixed_table_empty_exception():
with pytest.raises(ParseException) as pe:
parse_fixed_table(FIXED_CONTENT_1B.splitlines(), empty_exception=True)
assert "Incorrect line:" in str(pe.value)
def test_optlist_standard():
d = optlist_to_dict('key1,key2=val2,key1=val1,key3')
assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3'])
assert d['key1'] == 'val1'
assert d['key2'] == 'val2'
assert d['key3'] is True
def test_optlist_no_vals():
d = optlist_to_dict('key1,key2=val2,key1=val1,key3', kv_sep=None)
assert sorted(d.keys()) == sorted(['key1', 'key1=val1', 'key2=val2', 'key3'])
assert d['key1'] is True
assert d['key1=val1'] is True
assert d['key2=val2'] is True
assert d['key3'] is True
def test_optlist_strip_quotes():
d = optlist_to_dict(
'''key1="foo",key2='bar',key3="mismatched quotes',key4="inner'quotes"''',
strip_quotes=True
)
assert sorted(d.keys()) == sorted(['key1', 'key2', 'key3', 'key4'])
assert d['key1'] == 'foo'
assert d['key2'] == 'bar'
assert d['key3'] == '"mismatched quotes\''
assert d['key4'] == "inner'quotes"
def test_optlist_with_spaces():
d = optlist_to_dict(
'''key1=foo, key2=bar'''
)
assert 'key1' in d
assert 'key2' in d
PS_AUX_TEST = """
USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 /sbin/init
root 1821 0.0 0.0 0 0 ? S May31 0:25 [kondemand/0]
root 1864 0.0 0.0 18244 668 ? Ss May31 0:05 irqbalance --pid=/var/run/irqbalance.pid
user1 20160 0.0 0.0 108472 1896 pts/3 Ss 10:09 0:00 bash
root 20357 0.0 0.0 9120 760 ? Ss 10:09 0:00 /sbin/dhclient -1 -q -lf /var/lib/dhclient/dhclient-extbr0.leases -pf /var/run/dhclient-extbr0.pid extbr0
qemu 22673 0.8 10.2 1618556 805636 ? Sl 11:38 1:07 /usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad
"""
MISSING_DATA_TEST = """
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
LVM2_PV_FMT|LVM2_PV_UUID|LVM2_DEV_SIZE|LVM2_PV_NAME|LVM2_PV_MAJOR|LVM2_PV_MINOR|LVM2_PV_MDA_FREE|LVM2_PV_MDA_SIZE|LVM2_PV_EXT_VSN|LVM2_PE_START|LVM2_PV_SIZE|LVM2_PV_FREE|LVM2_PV_USED|LVM2_PV_ATTR|LVM2_PV_ALLOCATABLE|LVM2_PV_EXPORTED|LVM2_PV_MISSING|LVM2_PV_PE_COUNT|LVM2_PV_PE_ALLOC_COUNT|LVM2_PV_TAGS|LVM2_PV_MDA_COUNT|LVM2_PV_MDA_USED_COUNT|LVM2_PV_BA_START|LVM2_PV_BA_SIZE|LVM2_PV_IN_USE|LVM2_PV_DUPLICATE|LVM2_VG_NAME
WARNING: Locking disabled. Be careful! This could corrupt your metadata.
"""
SUBSTITUTE_HEADERS_TEST = """
address,port,state,read-only
0.0.0.0,3000,LISTEN,N
10.76.19.184,37500,ESTAB,Y
""".strip()
POSTGRESQL_LOG = """
schema | table | rows
public | rhnsnapshotpackage | 47428950
public | rhnpackagefile | 32174333
public | rhnpackagecapability | 12934215
public | rhnpackagechangelogrec | 11269933
public | rhnchecksum | 10129746
public | rhnactionconfigrevision | 2894957
public | rhnpackageprovides | 2712442
public | rhnpackagerequires | 2532861
public | rhn_command_target | 1009152
public | rhnconfigfilename | 0
public | rhnxccdfidentsystem | 0
public | rhndistchannelmap | 0
public | rhnactionvirtshutdown | 0
public | rhnpublicchannelfamily | 0
(402 rows)
""".strip() # Normally has a --- separator line, which is ignored using get_active_lines
TABLE1 = """
THIS IS A HEADER
this is some content_with_blank_prefix
This is more content
""".strip()
TABLE2 = [
"SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE",
"HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe",
"HA2| 22| D22| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D22/exe"
]
TABLE3 = """
THIS | IS | A | HEADER
this ^ is ^ some ^ content
This ^ is ^ more ^ content
""".strip()
def test_parse_delimited_table():
# No content? No table.
assert parse_delimited_table([]) == []
# Test maximum splits and header 'ignore', which should actually be
# called 'header_startswith'
tbl = parse_delimited_table(
PS_AUX_TEST.splitlines(), max_splits=10, heading_ignore=['USER']
)
assert tbl
assert isinstance(tbl, list)
assert len(tbl) == 6
assert isinstance(tbl[0], dict)
assert tbl[0] == {
'%MEM': '0.0', 'TTY': '?', 'VSZ': '19356', 'PID': '1', '%CPU': '0.0',
'START': 'May31', 'COMMAND': '/sbin/init', 'USER': 'root',
'STAT': 'Ss', 'TIME': '0:01', 'RSS': '1544'
}
assert tbl[5]['COMMAND'] == \
'/usr/libexec/qemu-kvm -name rhel7 -S -M rhel6.5.0 -enable-kvm -m 1024 -smp 2,sockets=2,cores=1,threads=1 -uuid 13798ffc-bc1e-d437-4f3f-2e0fa6c923ad'
# Test trailing ignore not found
tbl = parse_delimited_table(
MISSING_DATA_TEST.splitlines(), delim='|',
heading_ignore=['LVM2_PV_FMT'],
trailing_ignore=['WARNING', 'ERROR', 'Cannot get lock']
)
assert isinstance(tbl, list)
assert len(tbl) == 0
# Header substitution
tbl = parse_delimited_table(
SUBSTITUTE_HEADERS_TEST.splitlines(), delim=',', strip=False,
header_substitute=[('read-only', 'read_only')]
)
assert tbl
assert isinstance(tbl, list)
assert len(tbl) == 2
assert isinstance(tbl[1], dict)
assert tbl[1] == {
'address': '10.76.19.184', 'port': '37500', 'state': 'ESTAB', 'read_only': 'Y'
}
# Test change of delimiter and trailing_ignore
tbl = parse_delimited_table(POSTGRESQL_LOG.splitlines(), delim='|', trailing_ignore=['('])
assert isinstance(tbl, list)
assert len(tbl) == 14
assert isinstance(tbl[0], dict)
assert tbl[0] == {
'schema': 'public', 'table': 'rhnsnapshotpackage', 'rows': '47428950'
}
# Test using different header delimiter
result = parse_delimited_table(TABLE3.splitlines(), delim="^", header_delim="|")
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
expected = [{"THIS": "this", "IS": "is", "A": "some", "HEADER": "content"},
{"THIS": "This", "IS": "is", "A": "more", "HEADER": "content"}]
assert expected == result
# Test explicit None as header delimiter, different from content delimiter
result = parse_delimited_table(TABLE2, delim='|', header_delim=None)
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
expected = [{"SID": "HA2", "Nr": "16", "Instance": "D16", "SAPLOCALHOST": "lu0417",
"Version": "749, patch 10, changelist 1698137",
"DIR_EXECUTABLE": "/usr/sap/HA2/D16/exe"},
{"SID": "HA2", "Nr": "22", "Instance": "D22", "SAPLOCALHOST": "lu0417",
"Version": "749, patch 10, changelist 1698137",
"DIR_EXECUTABLE": "/usr/sap/HA2/D22/exe"}]
assert expected == result
# Test raw_line_key
TABLE1_SP = TABLE1.splitlines()
result = parse_delimited_table(TABLE1_SP, raw_line_key='raw_line')
assert isinstance(result, list)
assert len(result) == 2
assert isinstance(result[0], dict)
# Get the RAW line
assert result[0]['raw_line'] == TABLE1_SP[1]
DATA_LIST = [
{'name': 'test 1', 'role': 'server', 'memory_gb': 16, 'ssd': True},
{'name': 'test 2', 'role': 'server', 'memory_gb': 256, 'ssd': False},
{'name': 'test 3', 'role': 'server', 'memory_gb': 16, 'ssd': False},
{'name': 'test 4', 'role': 'embedded', 'memory_gb': 1, 'ssd': False},
{'name': 'test 5', 'role': 'workstation', 'memory_gb': 16, 'ssd': True},
]
CERT_LIST = [
{
'status': 'MONITORING',
'stuck': 'no',
'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-LDAP-EXAMPLE-COM/pwdfile.txt'",
'certificate': {
'type': 'NSSDB',
'location': '/etc/dirsrv/slapd-LDAP-EXAMPLE-COM',
'nickname': 'Server-Cert',
'token': 'NSS Certificate DB',
},
'CA': 'IPA',
'issuer': 'CN=Certificate Authority,O=LDAP.EXAMPLE.COM',
'subject': 'CN=master.LDAP.EXAMPLE.COM,O=LDAP.EXAMPLE.COM',
'expires': '2017-06-28 12:52:12 UTC',
'eku': 'id-kp-serverAuth,id-kp-clientAuth',
'pre-save command': '',
'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv LDAP-EXAMPLE-COM',
'track': 'yes',
'auto-renew': 'yes',
}, {
'status': 'MONITORING',
'stuck': 'no',
'key pair storage': "type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA',nickname='Server-Cert',token='NSS Certificate DB',pinfile='/etc/dirsrv/slapd-PKI-IPA/pwdfile.txt'",
'certificate': {
'type': 'NSSDB',
'location': '/etc/dirsrv/slapd-PKI-IPA',
'nickname': 'Server-Cert',
'token': 'NSS Certificate DB',
},
'CA': 'IPA',
'issuer': 'CN=Certificate Authority,O=EXAMPLE.COM',
'subject': 'CN=ldap.EXAMPLE.COM,O=EXAMPLE.COM',
'expires': '2017-06-28 12:52:13 UTC',
'eku': 'id-kp-serverAuth,id-kp-clientAuth',
'pre-save command': '',
'post-save command': '/usr/lib64/ipa/certmonger/restart_dirsrv PKI-IPA',
'track': 'yes',
'auto-renew': 'yes',
'dash- space': 'tested',
}
]
def test_keyword_search():
# No keywords, no result
assert len(keyword_search(DATA_LIST)) == 0
# Search on absent keywords produces empty list
assert keyword_search(DATA_LIST, cpu_count=4) == []
# Search on present but non-matching keyword produces empty list
assert keyword_search(DATA_LIST, memory_gb=8) == []
# Single result - search on string
results = keyword_search(DATA_LIST, role='embedded')
assert len(results) == 1
assert results[0] == DATA_LIST[3]
# Multiple results, name has underscore - search on integer
results = keyword_search(DATA_LIST, memory_gb=16)
assert len(results) == 3
assert results == [DATA_LIST[i] for i in (0, 2, 4)]
# Search on boolean
results = keyword_search(DATA_LIST, ssd=False)
assert len(results) == 3
assert results == [DATA_LIST[i] for i in (1, 2, 3)]
# No data, no results.
assert len(keyword_search([], role='server')) == 0
# Search with contains
results = keyword_search(DATA_LIST, role__contains='e')
assert len(results) == 4
assert results == [DATA_LIST[i] for i in (0, 1, 2, 3)]
# Search with startswith
results = keyword_search(DATA_LIST, role__startswith='e')
assert len(results) == 1
assert results[0] == DATA_LIST[3]
# Search for multiple keys, with spaces and dashes, and search operators
results = keyword_search(
CERT_LIST,
pre_save_command='',
key_pair_storage__startswith="type=NSSDB,location='/etc/dirsrv/slapd-PKI-IPA'"
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Make sure contains can also apply to keys with dashes and spaces
results = keyword_search(
CERT_LIST,
post_save_command__contains='PKI-IPA',
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Lower case value matching
results = keyword_search(
CERT_LIST,
status__lower_value='Monitoring',
)
assert len(results) == 2
assert results == CERT_LIST
# Check that searches for keys with two underscores that aren't matcher
# suffixes still work
results = keyword_search(
CERT_LIST,
dash__space='tested',
)
assert len(results) == 1
assert results[0] == CERT_LIST[1]
# Check that we can use contains to check the contents of a dictionary
# in a value
results = keyword_search(
CERT_LIST,
certificate__contains='type'
)
assert len(results) == 2
assert results == CERT_LIST
assert keyword_search(
CERT_LIST,
certificate__contains='encryption'
) == []
PS_LIST = [
{'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'},
{'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'},
{'PID': '725', 'PPID': '2', 'COMMAND': 'xfsalloc', '_line': ' 725 2 xfsalloc'},
{'PID': '726', 'PPID': '2', 'COMMAND': None, '_line': ' 726 2 grep -F xx'},
]
def test_keyword_search_None():
# Normal search
assert keyword_search(PS_LIST, COMMAND__default=None)[0]['PID'] == '726'
assert keyword_search(PS_LIST, _line__contains='alloc')[0]['PID'] == '725'
assert keyword_search(PS_LIST, COMMAND__startswith='xfs')[0]['PID'] == '725'
assert len(keyword_search(PS_LIST, COMMAND__lower_value='KDMFLUSH')) == 2
# Check that searches for non-existing keys
assert keyword_search(PS_LIST, NONE__default=None) == []
assert keyword_search(PS_LIST, NONE__startswith='xfs') == []
def test_parse_exception():
with pytest.raises(ParseException) as e_info:
raise ParseException('This is a parse exception')
assert 'This is a parse exception' == str(e_info.value)
def test_skip_exception():
with pytest.raises(SkipException) as e_info:
raise SkipException('This is a skip exception')
assert 'This is a skip exception' == str(e_info.value)
|
"""
@date 2014-11-16
@author Hong-She Liang <starofrainnight@gmail.com>
"""
from selenium.common.exceptions import *
|
"""
Runs either `.fit()` or `.test()` on a single node across multiple gpus.
"""
import os
from argparse import ArgumentParser
import torch
from pytorch_lightning import seed_everything, Trainer
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.simple_models import ClassificationModel
def main():
seed_everything(4321)
parser = ArgumentParser(add_help=False)
parser = Trainer.add_argparse_args(parser)
parser.add_argument("--trainer_method", default="fit")
parser.add_argument("--tmpdir")
parser.add_argument("--workdir")
parser.set_defaults(gpus=2)
parser.set_defaults(accelerator="ddp")
args = parser.parse_args()
dm = ClassifDataModule()
model = ClassificationModel()
trainer = Trainer.from_argparse_args(args)
if args.trainer_method == "fit":
trainer.fit(model, datamodule=dm)
result = None
elif args.trainer_method == "test":
result = trainer.test(model, datamodule=dm)
elif args.trainer_method == "fit_test":
trainer.fit(model, datamodule=dm)
result = trainer.test(model, datamodule=dm)
else:
raise ValueError(f"Unsupported: {args.trainer_method}")
result_ext = {"status": "complete", "method": args.trainer_method, "result": result}
file_path = os.path.join(args.tmpdir, "ddp.result")
torch.save(result_ext, file_path)
if __name__ == "__main__":
main()
|
import csv
import os
import color
def _GetDataDirPath():
return os.path.join(os.path.dirname(__file__), 'data')
def _GetCsvPath():
return os.path.join(_GetDataDirPath(), 'dmccolors.csv')
def _GetCsvString():
with open(_GetCsvPath()) as f:
return f.read().strip()
def _CreateDmcColorFromRow(row):
number = int(row[0])
name = row[1]
hex_color = row[5]
rgb_color = color.RGBColorFromHexString(hex_color)
return DMCColor(number, name, rgb_color)
_dmc_colors = None
def _CreateDMCColors():
global _dmc_colors
csv_data = _GetCsvString()
lines = csv_data.splitlines()
# Skip first line
lines = lines[1:]
reader = csv.reader(lines, delimiter='\t')
dmc_colors = set()
for row in reader:
dmc_colors.add(_CreateDmcColorFromRow(row))
return dmc_colors
def GetDMCColors():
global _dmc_colors
if not _dmc_colors:
_dmc_colors = frozenset(_CreateDMCColors())
return _dmc_colors
def GetClosestDMCColorsPairs(rgb_color):
pairs = list()
for dcolor in GetDMCColors():
pairs.append((dcolor, color.RGBColor.distance(rgb_color, dcolor.color)))
return sorted(pairs, key=lambda pair: pair[1])
def GetClosestDMCColors(rgb_color):
return [pair[0] for pair in GetClosestDMCColorsPairs(rgb_color)]
class DMCColor(object):
def __init__(self, number, name, color):
self.number = number
self.name = name
self.color = color
def __str__(self):
return super(DMCColor, self).__str__() + str((self.number, self.name, self.color))
def GetStringForDMCColor(dmc_color):
return "%s %s %s" % (dmc_color.number, dmc_color.name, dmc_color.color)
def main():
for color in GetDMCColors():
print color
if __name__ == '__main__':
main()
|
"""A simple memcache-like server.
The basic data structure maintained is a single in-memory dictionary
mapping string keys to string values, with operations get, set and
delete. (Both keys and values may contain Unicode.)
This is a TCP server listening on port 54321. There is no
authentication.
Requests provide an operation and return a response. A connection may
be used for multiple requests. The connection is closed when a client
sends a bad request.
If a client is idle for over 5 seconds (i.e., it does not send another
request, or fails to read the whole response, within this time), it is
disconnected.
Framing of requests and responses within a connection uses a
line-based protocol. The first line of a request is the frame header
and contains three whitespace-delimited token followed by LF or CRLF:
- the keyword 'request'
- a decimal request ID; the first request is '1', the second '2', etc.
- a decimal byte count giving the size of the rest of the request
Note that the requests ID *must* be consecutive and start at '1' for
each connection.
Response frames look the same except the keyword is 'response'. The
response ID matches the request ID. There should be exactly one
response to each request and responses should be seen in the same
order as the requests.
After the frame, individual requests and responses are JSON encoded.
If the frame header or the JSON request body cannot be parsed, an
unframed error message (always starting with 'error') is written back
and the connection is closed.
JSON-encoded requests can be:
- {"type": "get", "key": <string>}
- {"type": "set", "key": <string>, "value": <string>}
- {"type": "delete", "key": <string>}
Responses are also JSON-encoded:
- {"status": "ok", "value": <string>} # Successful get request
- {"status": "ok"} # Successful set or delete request
- {"status": "notfound"} # Key not found for get or delete request
If the request is valid JSON but cannot be handled (e.g., the type or
key field is absent or invalid), an error response of the following
form is returned, but the connection is not closed:
- {"error": <string>}
"""
import argparse
import asyncio
import json
import logging
import os
import random
ARGS = argparse.ArgumentParser(description='Cache server example.')
ARGS.add_argument(
'--tls', action='store_true', dest='tls',
default=False, help='Use TLS')
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--host', action='store', dest='host',
default='localhost', help='Host name')
ARGS.add_argument(
'--port', action='store', dest='port',
default=54321, type=int, help='Port number')
ARGS.add_argument(
'--timeout', action='store', dest='timeout',
default=5, type=float, help='Timeout')
ARGS.add_argument(
'--random_failure_percent', action='store', dest='fail_percent',
default=0, type=float, help='Fail randomly N percent of the time')
ARGS.add_argument(
'--random_failure_sleep', action='store', dest='fail_sleep',
default=0, type=float, help='Sleep time when randomly failing')
ARGS.add_argument(
'--random_response_sleep', action='store', dest='resp_sleep',
default=0, type=float, help='Sleep time before responding')
args = ARGS.parse_args()
class Cache:
def __init__(self, loop):
self.loop = loop
self.table = {}
@asyncio.coroutine
def handle_client(self, reader, writer):
# Wrapper to log stuff and close writer (i.e., transport).
peer = writer.get_extra_info('socket').getpeername()
logging.info('got a connection from %s', peer)
try:
yield from self.frame_parser(reader, writer)
except Exception as exc:
logging.error('error %r from %s', exc, peer)
else:
logging.info('end connection from %s', peer)
finally:
writer.close()
@asyncio.coroutine
def frame_parser(self, reader, writer):
# This takes care of the framing.
last_request_id = 0
while True:
# Read the frame header, parse it, read the data.
# NOTE: The readline() and readexactly() calls will hang
# if the client doesn't send enough data but doesn't
# disconnect either. We add a timeout to each. (But the
# timeout should really be implemented by StreamReader.)
framing_b = yield from asyncio.wait_for(
reader.readline(),
timeout=args.timeout, loop=self.loop)
if random.random()*100 < args.fail_percent:
logging.warn('Inserting random failure')
yield from asyncio.sleep(args.fail_sleep*random.random(),
loop=self.loop)
writer.write(b'error random failure\r\n')
break
logging.debug('framing_b = %r', framing_b)
if not framing_b:
break # Clean close.
try:
frame_keyword, request_id_b, byte_count_b = framing_b.split()
except ValueError:
writer.write(b'error unparseable frame\r\n')
break
if frame_keyword != b'request':
writer.write(b'error frame does not start with request\r\n')
break
try:
request_id, byte_count = int(request_id_b), int(byte_count_b)
except ValueError:
writer.write(b'error unparsable frame parameters\r\n')
break
if request_id != last_request_id + 1 or byte_count < 2:
writer.write(b'error invalid frame parameters\r\n')
break
last_request_id = request_id
request_b = yield from asyncio.wait_for(
reader.readexactly(byte_count),
timeout=args.timeout, loop=self.loop)
try:
request = json.loads(request_b.decode('utf8'))
except ValueError:
writer.write(b'error unparsable json\r\n')
break
response = self.handle_request(request) # Not a coroutine.
if response is None:
writer.write(b'error unhandlable request\r\n')
break
response_b = json.dumps(response).encode('utf8') + b'\r\n'
byte_count = len(response_b)
framing_s = 'response {} {}\r\n'.format(request_id, byte_count)
writer.write(framing_s.encode('ascii'))
yield from asyncio.sleep(args.resp_sleep*random.random(),
loop=self.loop)
writer.write(response_b)
def handle_request(self, request):
# This parses one request and farms it out to a specific handler.
# Return None for all errors.
if not isinstance(request, dict):
return {'error': 'request is not a dict'}
request_type = request.get('type')
if request_type is None:
return {'error': 'no type in request'}
if request_type not in {'get', 'set', 'delete'}:
return {'error': 'unknown request type'}
key = request.get('key')
if not isinstance(key, str):
return {'error': 'key is not a string'}
if request_type == 'get':
return self.handle_get(key)
if request_type == 'set':
value = request.get('value')
if not isinstance(value, str):
return {'error': 'value is not a string'}
return self.handle_set(key, value)
if request_type == 'delete':
return self.handle_delete(key)
assert False, 'bad request type' # Should have been caught above.
def handle_get(self, key):
value = self.table.get(key)
if value is None:
return {'status': 'notfound'}
else:
return {'status': 'ok', 'value': value}
def handle_set(self, key, value):
self.table[key] = value
return {'status': 'ok'}
def handle_delete(self, key):
if key not in self.table:
return {'status': 'notfound'}
else:
del self.table[key]
return {'status': 'ok'}
def main():
asyncio.set_event_loop(None)
if args.iocp:
from asyncio.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
sslctx = None
if args.tls:
import ssl
# TODO: take cert/key from args as well.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.options |= ssl.OP_NO_SSLv2
sslctx.load_cert_chain(
certfile=os.path.join(here, 'ssl_cert.pem'),
keyfile=os.path.join(here, 'ssl_key.pem'))
cache = Cache(loop)
task = asyncio.streams.start_server(cache.handle_client,
args.host, args.port,
ssl=sslctx, loop=loop)
svr = loop.run_until_complete(task)
for sock in svr.sockets:
logging.info('socket %s', sock.getsockname())
loop.run_forever()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
import operator
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.param.shared import *
from pyspark.ml.regression import DecisionTreeModel, DecisionTreeRegressionModel, \
RandomForestParams, TreeEnsembleModel, TreeEnsembleParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'OneVsRest', 'OneVsRestModel']
@inherit_doc
class JavaClassificationModel(JavaPredictionModel):
"""
(Private) Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@inherit_doc
class LinearSVC(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasRawPredictionCol, HasFitIntercept, HasStandardization,
HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
Only supports L2 regularization currently.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC(maxIter=5, regParam=0.01)
>>> model = svm.fit(df)
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
.. versionadded:: 2.2.0
"""
threshold = Param(Params._dummy(), "threshold",
"The threshold in binary classification applied to the linear model"
" prediction. This threshold can be any real number, where Inf will make"
" all predictions 0.0 and -Inf will make all predictions 1.0.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class LinearSVCModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@inherit_doc
class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol,
HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds,
HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(regParam=0.01, weightCol="weight")
>>> blorModel = blor.fit(bdf)
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
.. versionadded:: 1.3.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto")
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self._clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self._clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: {0}".format(str(ts)))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getOrDefault(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
java_blrt_summary = self._call_java("summary")
# Note: Once multiclass is added, update this to return correct summary
return BinaryLogisticRegressionTrainingSummary(java_blrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
return BinaryLogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Abstraction for multinomial Logistic Regression Training results.
Currently, the training summary ignores the training weights except
for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class BinaryLogisticRegressionSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. seealso:: `Wikipedia reference \
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("roc")
@property
@since("2.0.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("areaUnderROC")
@property
@since("2.0.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("pr")
@property
@since("2.0.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("2.0.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("precisionByThreshold")
@property
@since("2.0.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("recallByThreshold")
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
.. note:: Experimental
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
class TreeClassifierParams(object):
"""
Private class to track supported impurity measures.
.. versionadded:: 1.4.0
"""
supportedImpurities = ["entropy", "gini"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeClassifierParams, self).__init__()
@since("1.6.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.6.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
.. versionadded:: 1.4.0
"""
supportedLossTypes = ["logistic"]
@inherit_doc
class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasProbabilityCol, HasRawPredictionCol, DecisionTreeParams,
TreeClassifierParams, HasCheckpointInterval, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed")
>>> model = dt.fit(td)
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
@inherit_doc
class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
HasRawPredictionCol, HasProbabilityCol,
RandomForestParams, TreeClassifierParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42)
>>> model = rf.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.rawPrediction)
0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel (uid=...) of depth..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Notes on Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
.. note:: Multiclass labels are not currently supported.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42)
>>> model = gbt.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel (uid=...)...with 5 trees...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol,
HasRawPredictionCol, HasThresholds, HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values must be nonnegative.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
.. versionadded:: 1.5.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default) and bernoulli.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
self._setDefault(smoothing=1.0, modelType="multinomial")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@inherit_doc
class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasSeed, HasStepSize, JavaMLWritable,
JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[2, 2, 2], blockSize=1, seed=123)
>>> model = mlp.fit(df)
>>> model.layers
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.transform(testDF).show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.layers == model2.layers
True
>>> model.weights == model2.weights
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.layers == model.layers
True
.. versionadded:: 1.6.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
blockSize = Param(Params._dummy(), "blockSize", "Block size for stacking input data in " +
"matrices. Data is stacked within partitions. If block size is more than " +
"remaining data in a partition then it is adjusted to the size of this " +
"data. Recommended size is between 10 and 1000, default is 128.",
typeConverter=TypeConverters.toInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
self._setDefault(maxIter=100, tol=1E-4, blockSize=128, stepSize=0.03, solver="l-bfgs")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("1.6.0")
def getBlockSize(self):
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("2.0.0")
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("2.0.0")
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def layers(self):
"""
array of layer sizes including input and output layers.
"""
return self._call_java("javaLayers")
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
class OneVsRestParams(HasFeaturesCol, HasLabelCol, HasWeightCol, HasPredictionCol):
"""
Parameters for OneVsRest and OneVsRestModel.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
.. note:: Only LogisticRegression and NaiveBayes are supported now.
"""
return self._set(classifier=value)
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().prediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().prediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().prediction
0.0
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
classifier=None, weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
classifier=None, weightCol=None)
"""
super(OneVsRest, self).__init__()
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol=None, labelCol=None, predictionCol=None,
classifier=None, weightCol=None):
"""
setParams(self, featuresCol=None, labelCol=None, predictionCol=None, \
classifier=None, weightCol=None):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
assert isinstance(classifier, HasRawPredictionCol),\
"Classifier %s doesn't extend from HasRawPredictionCol." % type(classifier)
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
weightCol = None
if (self.isDefined(self.weightCol) and self.getWeightCol()):
if isinstance(classifier, HasWeightCol):
weightCol = self.getWeightCol()
else:
warnings.warn("weightCol is ignored, "
"as it is not supported by {} now.".format(classifier))
if weightCol:
multiclassLabeled = dataset.select(labelCol, featuresCol, weightCol)
else:
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
if weightCol:
paramMap[classifier.weightCol] = weightCol
return classifier.fit(trainingDataset, paramMap)
# TODO: Parallel training for all classes.
models = [trainSingleClass(i) for i in range(numClasses)]
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
return _java_obj
class OneVsRestModel(Model, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = model._call_java("getRawPredictionCol")
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(
lambda predictions: float(max(enumerate(predictions), key=operator.itemgetter(1))[0]),
DoubleType())
# output label and label metadata as prediction
return aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName])).drop(accColName)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
|
"""
Test lldb breakpoint command add/list/delete.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import side_effect
class BreakpointCommandTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528")
def test_breakpoint_command_sequence(self):
"""Test a sequence of breakpoint command add, list, and delete."""
self.build()
self.breakpoint_command_sequence()
def test_script_parameters(self):
"""Test a sequence of breakpoint command add, list, and delete."""
self.build()
self.breakpoint_command_script_parameters()
def test_commands_on_creation(self):
self.build()
self.breakpoint_commands_on_creation()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.c', '// Set break point at this line.')
# disable "There is a running process, kill it and restart?" prompt
self.runCmd("settings set auto-confirm true")
self.addTearDownHook(
lambda: self.runCmd("settings clear auto-confirm"))
def test_delete_all_breakpoints(self):
"""Test that deleting all breakpoints works."""
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_symbol(self, "main")
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
self.runCmd("breakpoint delete")
self.runCmd("process continue")
self.expect("process status", PROCESS_STOPPED,
patterns=['Process .* exited with status = 0'])
def breakpoint_command_sequence(self):
"""Test a sequence of breakpoint command add, list, and delete."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add three breakpoints on the same line. The first time we don't specify the file,
# since the default file is the one containing main:
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# Breakpoint 4 - set at the same location as breakpoint 1 to test
# setting breakpoint commands on two breakpoints at a time
lldbutil.run_break_set_by_file_and_line(
self, None, self.line, num_expected_locations=1, loc_exact=True)
# Make sure relative path source breakpoints work as expected. We test
# with partial paths with and without "./" prefixes.
lldbutil.run_break_set_by_file_and_line(
self, "./main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./breakpoint/basic/main.c", self.line,
num_expected_locations=1, loc_exact=True)
# Test relative breakpoints with incorrect paths and make sure we get
# no breakpoint locations
lldbutil.run_break_set_by_file_and_line(
self, "invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
lldbutil.run_break_set_by_file_and_line(
self, "./invalid/main.c", self.line,
num_expected_locations=0, loc_exact=True)
# Now add callbacks for the breakpoints just created.
self.runCmd(
"breakpoint command add -s command -o 'frame variable --show-types --scope' 1 4")
self.runCmd(
"breakpoint command add -s python -o 'import side_effect; side_effect.one_liner = \"one liner was here\"' 2")
self.runCmd(
"breakpoint command add --python-function bktptcmd.function 3")
# Check that the breakpoint commands are correctly set.
# The breakpoint list now only contains breakpoint 1.
self.expect(
"breakpoint list", "Breakpoints 1 & 2 created", substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line], patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line])
self.expect(
"breakpoint list -f",
"Breakpoints 1 & 2 created",
substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1" %
self.line],
patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1" %
self.line,
"1.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line,
"2.1: .+at main.c:%d:?[0-9]*, .+unresolved, hit count = 0" %
self.line])
self.expect("breakpoint command list 1", "Breakpoint 1 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.expect("breakpoint command list 2", "Breakpoint 2 command ok",
substrs=["Breakpoint commands (Python):",
"import side_effect",
"side_effect.one_liner"])
self.expect("breakpoint command list 3", "Breakpoint 3 command ok",
substrs=["Breakpoint commands (Python):",
"bktptcmd.function(frame, bp_loc, internal_dict)"])
self.expect("breakpoint command list 4", "Breakpoint 4 command ok",
substrs=["Breakpoint commands:",
"frame variable --show-types --scope"])
self.runCmd("breakpoint delete 4")
self.runCmd("command script import --allow-reload ./bktptcmd.py")
# Next lets try some other breakpoint kinds. First break with a regular expression
# and then specify only one file. The first time we should get two locations,
# the second time only one:
lldbutil.run_break_set_by_regexp(
self, r"._MyFunction", num_expected_locations=2)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c",
num_expected_locations=1)
lldbutil.run_break_set_by_regexp(
self,
r"._MyFunction",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
# Now try a source regex breakpoint:
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c -f b.c",
num_expected_locations=2)
lldbutil.run_break_set_by_source_regexp(
self,
r"is about to return [12]0",
extra_options="-f a.c",
num_expected_locations=1)
# Reset our canary variables and run the program.
side_effect.one_liner = None
side_effect.bktptcmd = None
self.runCmd("run", RUN_SUCCEEDED)
# Check the value of canary variables.
self.assertEquals("one liner was here", side_effect.one_liner)
self.assertEquals("function was here", side_effect.bktptcmd)
# Finish the program.
self.runCmd("process continue")
# Remove the breakpoint command associated with breakpoint 1.
self.runCmd("breakpoint command delete 1")
# Remove breakpoint 2.
self.runCmd("breakpoint delete 2")
self.expect(
"breakpoint command list 1",
startstr="Breakpoint 1 does not have an associated command.")
self.expect(
"breakpoint command list 2",
error=True,
startstr="error: '2' is not a currently valid breakpoint ID.")
# The breakpoint list now only contains breakpoint 1.
self.expect(
"breakpoint list -f",
"Breakpoint 1 exists",
patterns=[
"1: file = '.*main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" %
self.line,
"hit count = 1"])
# Not breakpoint 2.
self.expect(
"breakpoint list -f",
"No more breakpoint 2",
matching=False,
substrs=[
"2: file = 'main.c', line = %d, exact_match = 0, locations = 1, resolved = 1" %
self.line])
# Run the program again, with breakpoint 1 remaining.
self.runCmd("run", RUN_SUCCEEDED)
# We should be stopped again due to breakpoint 1.
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# The breakpoint should have a hit count of 2.
self.expect("breakpoint list -f", BREAKPOINT_HIT_TWICE,
substrs=['resolved, hit count = 2'])
def breakpoint_command_script_parameters(self):
"""Test that the frame and breakpoint location are being properly passed to the script breakpoint command function."""
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Add a breakpoint.
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True)
# Now add callbacks for the breakpoints just created.
self.runCmd("breakpoint command add -s python -o 'import side_effect; side_effect.frame = str(frame); side_effect.bp_loc = str(bp_loc)' 1")
# Reset canary variables and run.
side_effect.frame = None
side_effect.bp_loc = None
self.runCmd("run", RUN_SUCCEEDED)
self.expect(side_effect.frame, exe=False, startstr="frame #0:")
self.expect(side_effect.bp_loc, exe=False,
patterns=["1.* where = .*main .* resolved, hit count = 1"])
def breakpoint_commands_on_creation(self):
"""Test that setting breakpoint commands when creating the breakpoint works"""
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target.IsValid(), "Created an invalid target.")
# Add a breakpoint.
lldbutil.run_break_set_by_file_and_line(
self, "main.c", self.line, num_expected_locations=1, loc_exact=True,
extra_options='-C bt -C "thread list" -C continue')
bkpt = target.FindBreakpointByID(1)
self.assertTrue(bkpt.IsValid(), "Couldn't find breakpoint 1")
com_list = lldb.SBStringList()
bkpt.GetCommandLineCommands(com_list)
self.assertEqual(com_list.GetSize(), 3, "Got the wrong number of commands")
self.assertEqual(com_list.GetStringAtIndex(0), "bt", "First bt")
self.assertEqual(com_list.GetStringAtIndex(1), "thread list", "Next thread list")
self.assertEqual(com_list.GetStringAtIndex(2), "continue", "Last continue")
|
"""Functional tests for Stack and ParallelStack Ops."""
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool_:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
def testSimple(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
def testSimpleParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testParallelConcatShapeZero(self):
if not tf2.enabled():
self.skipTest("only fails in TF2")
@def_function.function
def f():
y = gen_array_ops.parallel_concat(values=[["tf"]], shape=0)
return y
with self.assertRaisesRegex(errors.InvalidArgumentError,
r"0th dimension of value .* is less than"):
f()
def testSimpleParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
def testConst(self):
np.random.seed(7)
with test_util.use_gpu():
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool_, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
if not context.executing_eagerly():
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
if not context.executing_eagerly():
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
def testConstParallelCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testConstParallelGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
np.random.seed(7)
with test_util.device(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
with self.subTest(shape=shape):
with self.cached_session():
def func(*xs):
return array_ops.stack(xs)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session():
def func(*inp):
return array_ops.stack(inp, axis=1)
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
theoretical, numerical = gradient_checker_v2.compute_gradient(
func, xs)
self.assertAllClose(theoretical, numerical)
def testZeroSizeCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testZeroSizeGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
# Verify that stack doesn't crash for zero size inputs
with test_util.device(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = self.evaluate(array_ops.stack(list(x)))
self.assertAllEqual(p, x)
p = self.evaluate(array_ops.parallel_stack(list(x)))
self.assertAllEqual(p, x)
def testAxis0DefaultCPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAxis0DefaultGPU(self):
# tf.parallel_stack is only supported in graph mode.
with ops.Graph().as_default():
with test_util.device(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = self.evaluate(array_ops.stack(t))
parallel_stacked = self.evaluate(array_ops.parallel_stack(t))
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool_, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session():
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = 2 not in range \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError,
r"Argument `axis` = -3 not in range \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session():
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
def testSimple(self):
self.assertAllEqual([1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
[0,
constant_op.constant(1), 0],
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([[0, 0, 0],
constant_op.constant([0, 1, 0]),
[0, 0, 0]]))
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]))
def testWithNDArray(self):
with self.session():
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
if __name__ == "__main__":
test.main()
|
from flask import abort
from flask import Blueprint
from flask import jsonify
from flask import request
from keystoneauth1 import exceptions as exc
from keystoneauth1 import session as ks_session
from keystoneclient.auth.identity import v3
from keystoneclient.v3 import client as ks_client
import logging
import os
from oslo_config import cfg
import pbr.version
import pwd
import threading
import time
from .util import ping
from . import config
from . import policy
bp = Blueprint('admin', __name__)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
USER_AGENT = 'Installer UI'
@bp.route("/api/v2/version")
def version():
"""Returns the version of the service
.. :quickref: Admin; Returns the version of the service
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
0.0.1.dev16
"""
version_info = pbr.version.VersionInfo('ardana-service')
return version_info.version_string_with_vcs()
@bp.route("/api/v2/heartbeat")
def heartbeat():
"""Returns the epoch time
Simple API to verify that the service is up and responding. Returns
the number of seconds since 1970-01-01 00:00:00 GMT.
.. :quickref: Admin; Returns the epoch time
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
1502745650
"""
return jsonify(int(time.time()))
@bp.route("/api/v2/user")
@policy.enforce('lifecycle:get_user')
def user():
"""Returns the username the service is running under
.. :quickref: Admin; Returns the username the service is running under
**Example valid response**:
.. sourcecode:: http
HTTP/1.1 200 OK
{"username": "myusername"}
"""
user_dict = {'username': pwd.getpwuid(os.getuid()).pw_name}
return jsonify(user_dict)
def update_trigger_file():
trigger_file = os.path.join(CONF.paths.log_dir, 'trigger.txt')
with open(trigger_file, 'w') as f:
f.write("Triggered restart at %s\n" % time.asctime())
@bp.route("/api/v2/restart", methods=['POST'])
@policy.enforce('lifecycle:restart')
def restart():
"""Requests the service to restart after a specified delay, in seconds
.. :quickref: Admin; Requests a service restart after a delay
**Example Request**:
.. sourcecode:: http
POST /api/v2/user HTTP/1.1
Content-Type: application/json
{
"delay": 60
}
"""
info = request.get_json() or {}
delay_secs = int(info.get('delay', 0))
t = threading.Timer(delay_secs, update_trigger_file)
t.start()
return jsonify('Success')
@bp.route("/api/v2/login", methods=['POST'])
def login():
"""Authenticates with keystone and returns a token
.. :quickref: Admin; Authenticates with keystone
**Example Request**:
.. sourcecode:: http
POST /api/v2/login HTTP/1.1
Content-Type: application/json
{
"username": "admin",
"password": "secret"
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"token": "gAAAAABbEaruZDQGIH5KmKWHlDZIw7CLq",
"expires": "2018-06-01T21:22:06+00:00"
}
:status 200: successful authentication
:status 401: invalid credentials
:status 403: authentication not permitted, or user not authorized for any
projects
"""
if not config.requires_auth():
abort(403,
"authentication not permitted since service is in insecure mode")
info = request.get_json() or {}
username = info.get('username')
password = info.get('password')
user_domain_name = info.get('user_domain_name', 'Default')
token = _authenticate(CONF.keystone_authtoken.auth_url,
username,
password,
user_domain_name)
return jsonify(token)
def _authenticate(auth_url, username=None, password=None,
user_domain_name='Default'):
"""Authenticate with keystone
Creates an unscoped token using the given credentials (which validates
them), and then uses that token to get a project-scoped token.
"""
unscoped_auth = v3.Password(auth_url,
username=username,
password=password,
user_domain_name=user_domain_name,
unscoped=True)
session = ks_session.Session(user_agent=USER_AGENT,
verify=not CONF.keystone_authtoken.insecure)
try:
# Trigger keystone to verify the credentials
unscoped_auth_ref = unscoped_auth.get_access(session)
except exc.connection.ConnectFailure as e:
abort(503, str(e))
except exc.http.HttpError as e:
abort(e.http_status, e.message)
except exc.ClientException as e:
abort(401, str(e))
except Exception as e:
LOG.exception(e)
abort(500, "Unable to authenticate")
client = ks_client.Client(session=session,
auth=unscoped_auth,
user_agent=USER_AGENT)
auth_url = unscoped_auth.auth_url
projects = client.projects.list(user=unscoped_auth_ref.user_id)
# Filter out disabled projects
projects = [project for project in projects if project.enabled]
# Prioritize the admin project by putting it at the beginning of the list
for pos, project in enumerate(projects):
if project.name == 'admin':
projects.pop(pos)
projects.insert(0, project)
break
# Return the first project token that we have the admin role on, otherwise
# return the first project token we have any role on.
fallback_auth_ref = None
for project in projects:
auth = v3.Token(auth_url=auth_url,
token=unscoped_auth_ref.auth_token,
project_id=project.id,
reauthenticate=False)
try:
auth_ref = auth.get_access(session)
if 'admin' in auth_ref.role_names:
return {'token': auth_ref.auth_token,
'expires': auth_ref.expires.isoformat()}
elif not fallback_auth_ref:
fallback_auth_ref = auth_ref
except Exception as e:
pass
if fallback_auth_ref:
return {'token': fallback_auth_ref.auth_token,
'expires': fallback_auth_ref.expires.isoformat()}
# TODO(gary): Consider as a secondary fallback to return a domain-scoped
# token
abort(403, "Not authorized for any project")
@bp.route("/api/v2/is_secured")
def get_secured():
"""Returns whether authentication is required
Returns a json object indicating whether the service is configured to
enforce authentication
.. :quickref: Model; Returns whether authentication is required
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"isSecured": false
}
:status 200: success
"""
return jsonify({'isSecured': config.requires_auth()})
@bp.route("/api/v2/connection_test", methods=['POST'])
def connection_test():
body = request.get_json() or {}
host = body['host']
try:
ping(host, 22)
return jsonify('Success')
except Exception as e:
return jsonify(error=str(e)), 404
|
import experiment
from ..util import dirs
from ..util import file_handling as fh
from optparse import OptionParser
import sys
def main():
usage = "%prog project logfile "
parser = OptionParser(usage=usage)
parser.add_option('-n', dest='new_name', default=None,
help='New name for experiment: default= old name + _rerun')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project = args[0]
log_filename = args[1]
new_name = options.new_name
log = fh.read_json(log_filename)
if new_name is None:
new_name = log['name'] + '_rerun'
log['name'] = new_name
float_vars = ['best_alpha', 'alpha_exp_base', 'max_alpha_exp', 'min_alpha_exp', 'orig_T', 'tau']
for v in float_vars:
if v in log:
if log[v] is not None:
log[v] = float(log[v])
else:
log[v] = None
#if log['reuse'] == 'False':
# log['reuse'] = False
#else:
# log['reuse'] = True
# convert list stirng to list
#list_vars = ['feature_list', 'additional_label_files', 'additional_label_weights']
#for v in list_vars:
# if v in log:
# print v
# print log[v]
# quoted_strings = [p.strip() for p in log[v][1:-1].split(',')]
# print quoted_strings
# log[v] = [p[1:-1] for p in quoted_strings]
# print log[v]
# print '\n'
#print log
#if 'additional_label_weights' in log:
# log['additional_label_weights'] = [float(w) for w in log['additional_label_weights']]
dirs.make_base_dir(project)
print log
result = experiment.run_experiment(**log)
print result
if __name__ == '__main__':
main()
|
from __future__ import with_statement
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift.common.utils import search_tree, remove_file, write_file
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
STANDALONE_SERVERS = ['object-expirer']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print _("WARNING: Unable to increase file descriptor limit. "
"Running as non-root?")
os.environ['PYTHON_EGG_CACHE'] = '/tmp'
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yeilding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
class UnknownCommandError(Exception):
pass
class Manager():
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
server_names = set()
for server in servers:
if server == 'all':
server_names.update(ALL_SERVERS)
elif server == 'main':
server_names.update(MAIN_SERVERS)
elif server == 'rest':
server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
server_names.update([s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
server_names.add(server)
self.servers = set()
for name in server_names:
self.servers.add(Server(name, run_dir))
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print _('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print _('No %s running') % server
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print _('Waited %s seconds for %s to die; giving up') % (
kill_wait, server)
return 1
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.servers:
m = Manager([server.server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server():
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
if '-' not in server:
server = '%s-server' % server
self.server = server.lower()
self.type = server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).rsplit(
'.conf', 1)[0] + '.pid'
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1)\
.rsplit('.pid', 1)[0] + '.conf'
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).rsplit(
'.pid', 1)[0] + '.conf'
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
found_conf_files = search_tree(SWIFT_DIR, self.server + '*',
'.conf')
else:
found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type,
'.conf')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
print _('Unable to locate config %sfor %s') % (
('number %s ' % number if number else ''), self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print _('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print ' %d) %s' % (i + 1, conf_file)
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
pid_files = search_tree(self.run_dir, '%s*' % self.server, '.pid')
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
yield pid_file, int(open(pid_file).read().strip())
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
try:
if sig != signal.SIG_DFL:
print _('Signal %s pid: %s signal: %s') % (self.server,
pid, sig)
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print _("Removing stale pid file %s") % pid_file
remove_file(pid_file)
elif e.errno == errno.EPERM:
print _("No permission to signal PID %d") % pid
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print _("%s #%d not running (%s)") % (self.server, number,
conf_files[0])
else:
print _("No %s running") % self.server
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if true ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if output:
print output
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return []
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
elif not kwargs.get('number', 0):
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, pid_file)
if already_started:
print _("%s already started...") % self.server
return []
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print '%s...(%s)' % (msg, conf_file)
try:
pid = self.spawn(conf_file, **kwargs)
except OSError, e:
if e.errno == errno.ENOENT:
# TODO: should I check if self.cmd exists earlier?
print _("%s does not exist") % self.cmd
break
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
|
"""App name"""
from django.apps import AppConfig
class CertificateEngineConfig(AppConfig):
name = "certificate_engine"
|
"""Base classes and functions for dynamic decoding."""
import abc
import tensorflow as tf
from tensorflow_addons.utils.types import TensorLike
from typeguard import typechecked
from typing import Any, Optional, Tuple, Union
from tensorflow.python.ops import control_flow_util
class Decoder(metaclass=abc.ABCMeta):
"""An RNN Decoder abstract interface object.
Concepts used by this interface:
- `inputs`: (structure of) tensors and TensorArrays that is passed as input
to the RNN cell composing the decoder, at each time step.
- `state`: (structure of) tensors and TensorArrays that is passed to the
RNN cell instance as the state.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `training`: boolean whether it should behave in training mode or in
inference mode.
- `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at
each time step.
"""
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape`
object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
@abc.abstractmethod
def initialize(self, name=None):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
name: Name scope for any created operations.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
@abc.abstractmethod
def step(self, time, inputs, state, training=None, name=None):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNN cell input (possibly nested tuple of) tensor[s] for this
time step.
state: RNN cell state (possibly nested tuple of) tensor[s] from
previous time step.
training: Python boolean. Indicates whether the layer should behave
in training mode or in inference mode. Only relevant
when `dropout` or `recurrent_dropout` is used.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an
object containing the decoder output, `next_state` is a (structure
of) state tensors and TensorArrays, `next_inputs` is the tensor that
should be used as input for the next step, `finished` is a boolean
tensor telling whether the sequence is complete, for each sequence in
the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps
track of which batch entries are already finished, and performs a
logical OR to insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries
because it does not track the reshuffle across time steps. In this
case, it is up to the decoder to declare that it will keep track of its
own finished state by setting this property to `True`.
Returns:
Python bool.
"""
return False
class BaseDecoder(tf.keras.layers.Layer):
"""An RNN Decoder that is based on a Keras layer.
Concepts used by this interface:
- `inputs`: (structure of) Tensors and TensorArrays that is passed as input
to the RNN cell composing the decoder, at each time step.
- `state`: (structure of) Tensors and TensorArrays that is passed to the
RNN cell instance as the state.
- `memory`: tensor that is usually the full output of the encoder, which
will be used for the attention wrapper for the RNN cell.
- `finished`: boolean tensor telling whether each sequence in the batch is
finished.
- `training`: boolean whether it should behave in training mode or in
inference mode.
- `outputs`: instance of `tfa.seq2seq.BasicDecoderOutput`. Result of the decoding, at
each time step.
"""
@typechecked
def __init__(
self,
output_time_major: bool = False,
impute_finished: bool = False,
maximum_iterations: Optional[TensorLike] = None,
parallel_iterations: int = 32,
swap_memory: bool = False,
**kwargs,
):
self.output_time_major = output_time_major
self.impute_finished = impute_finished
self.maximum_iterations = maximum_iterations
self.parallel_iterations = parallel_iterations
self.swap_memory = swap_memory
super().__init__(**kwargs)
def call(self, inputs, initial_state=None, training=None, **kwargs):
init_kwargs = kwargs
init_kwargs["initial_state"] = initial_state
return dynamic_decode(
self,
output_time_major=self.output_time_major,
impute_finished=self.impute_finished,
maximum_iterations=self.maximum_iterations,
parallel_iterations=self.parallel_iterations,
swap_memory=self.swap_memory,
training=training,
decoder_init_input=inputs,
decoder_init_kwargs=init_kwargs,
)
@property
def batch_size(self):
"""The batch size of input values."""
raise NotImplementedError
@property
def output_size(self):
"""A (possibly nested tuple of...) integer[s] or `TensorShape`
object[s]."""
raise NotImplementedError
@property
def output_dtype(self):
"""A (possibly nested tuple of...) dtype[s]."""
raise NotImplementedError
def initialize(self, inputs, initial_state=None, **kwargs):
"""Called before any decoding iterations.
This methods must compute initial input values and initial state.
Args:
inputs: (structure of) tensors that contains the input for the
decoder. In the normal case, it's a tensor with shape
[batch, timestep, embedding].
initial_state: (structure of) tensors that contains the initial state
for the RNN cell.
**kwargs: Other arguments that are passed in from layer.call()
method. It could contains item like input `sequence_length`, or
masking for input.
Returns:
`(finished, initial_inputs, initial_state)`: initial values of
'finished' flags, inputs and state.
"""
raise NotImplementedError
def step(self, time, inputs, state, training):
"""Called per step of decoding (but only once for dynamic decoding).
Args:
time: Scalar `int32` tensor. Current step number.
inputs: RNN cell input (possibly nested tuple of) tensor[s] for this
time step.
state: RNN cell state (possibly nested tuple of) tensor[s] from
previous time step.
training: Python boolean. Indicates whether the layer should
behave in training mode or in inference mode.
Returns:
`(outputs, next_state, next_inputs, finished)`: `outputs` is an
object containing the decoder output, `next_state` is a
(structure of) state tensors and TensorArrays, `next_inputs` is the
tensor that should be used as input for the next step, `finished` is
a boolean tensor telling whether the sequence is complete, for each
sequence in the batch.
"""
raise NotImplementedError
def finalize(self, outputs, final_state, sequence_lengths):
raise NotImplementedError
@property
def tracks_own_finished(self):
"""Describes whether the Decoder keeps track of finished states.
Most decoders will emit a true/false `finished` value independently
at each time step. In this case, the `tfa.seq2seq.dynamic_decode` function keeps
track of which batch entries are already finished, and performs a
logical OR to insert new batches to the finished set.
Some decoders, however, shuffle batches / beams between time steps and
`tfa.seq2seq.dynamic_decode` will mix up the finished state across these entries
because it does not track the reshuffle across time steps. In this
case, it is up to the decoder to declare that it will keep track of its
own finished state by setting this property to `True`.
Returns:
Python bool.
"""
return False
# TODO(scottzhu): Add build/get_config/from_config and other layer methods.
@typechecked
def dynamic_decode(
decoder: Union[Decoder, BaseDecoder],
output_time_major: bool = False,
impute_finished: bool = False,
maximum_iterations: Optional[TensorLike] = None,
parallel_iterations: int = 32,
swap_memory: bool = False,
training: Optional[bool] = None,
scope: Optional[str] = None,
enable_tflite_convertible: bool = False,
**kwargs,
) -> Tuple[Any, Any, Any]:
"""Runs dynamic decoding with a decoder.
Calls `initialize()` once and `step()` repeatedly on the decoder object.
Args:
decoder: A `tfa.seq2seq.Decoder` or `tfa.seq2seq.BaseDecoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is
faster). Otherwise, outputs are returned as batch major tensors (this
adds extra time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: A strictly positive `int32` scalar, the maximum
allowed number of decoding steps. Default is `None` (decode until the
decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
training: Python boolean. Indicates whether the layer should behave
in training mode or in inference mode. Only relevant
when `dropout` or `recurrent_dropout` is used.
scope: Optional name scope to use.
enable_tflite_convertible: Python boolean. If `True`, then the variables
of `TensorArray` become of 1-D static shape. Also zero pads in the
output tensor will be discarded. Default: `False`.
**kwargs: dict, other keyword arguments for dynamic_decode. It might
contain arguments for `BaseDecoder` to initialize, which takes all
tensor inputs during call().
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
with tf.name_scope(scope or "decoder"):
is_xla = (
not tf.executing_eagerly()
and control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph()
)
)
if maximum_iterations is not None:
maximum_iterations = tf.convert_to_tensor(
maximum_iterations, dtype=tf.int32, name="maximum_iterations"
)
if maximum_iterations.shape.ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
tf.debugging.assert_greater(
maximum_iterations,
0,
message="maximum_iterations should be greater than 0",
)
elif is_xla:
raise ValueError("maximum_iterations is required for XLA compilation.")
if isinstance(decoder, Decoder):
initial_finished, initial_inputs, initial_state = decoder.initialize()
else:
# For BaseDecoder that takes tensor inputs during call.
decoder_init_input = kwargs.pop("decoder_init_input", None)
decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
initial_finished, initial_inputs, initial_state = decoder.initialize(
decoder_init_input, **decoder_init_kwargs
)
if enable_tflite_convertible:
# Assume the batch_size = 1 for inference.
# So we can change 2-D TensorArray into 1-D by reshaping it.
tf.debugging.assert_equal(
decoder.batch_size,
1,
message="TFLite conversion requires a batch size of 1",
)
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.reshape(
tf.zeros(_prepend_batch(decoder.batch_size, shape), dtype=dtype),
[-1],
),
decoder.output_size,
decoder.output_dtype,
)
else:
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.zeros(
_prepend_batch(decoder.batch_size, shape), dtype=dtype
),
decoder.output_size,
decoder.output_dtype,
)
if maximum_iterations is not None:
initial_finished = tf.logical_or(initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = tf.zeros_like(initial_finished, dtype=tf.int32)
initial_time = tf.constant(0, dtype=tf.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tf.TensorShape) or from_shape.ndims == 0:
return None
else:
batch_size = tf.get_static_value(
tf.convert_to_tensor(batch_size, name="batch_size")
)
return tf.TensorShape([batch_size]).concatenate(from_shape)
dynamic_size = maximum_iterations is None or not is_xla
# The dynamic shape `TensorArray` is not allowed in TFLite yet.
dynamic_size = dynamic_size and (not enable_tflite_convertible)
def _create_ta(s, d):
if enable_tflite_convertible:
# TFLite requires 1D element_shape.
if isinstance(s, tf.TensorShape) and s.ndims == 0:
s = (1,)
element_shape = s
else:
element_shape = _shape(decoder.batch_size, s)
return tf.TensorArray(
dtype=d,
size=0 if dynamic_size else maximum_iterations,
dynamic_size=dynamic_size,
element_shape=element_shape,
)
initial_outputs_ta = tf.nest.map_structure(
_create_ta, decoder.output_size, decoder.output_dtype
)
def condition(
unused_time,
unused_outputs_ta,
unused_state,
unused_inputs,
finished,
unused_sequence_lengths,
):
return tf.logical_not(tf.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(
time, inputs, state, training
)
decoder_state_sequence_lengths = False
if decoder.tracks_own_finished:
next_finished = decoder_finished
lengths = getattr(decoder_state, "lengths", None)
if lengths is not None:
# sequence lengths are provided by decoder_state.lengths;
# overwrite our sequence lengths.
decoder_state_sequence_lengths = True
sequence_lengths = tf.cast(lengths, tf.int32)
else:
next_finished = tf.logical_or(decoder_finished, finished)
if decoder_state_sequence_lengths:
# Just pass something through the loop; at the next iteration
# we'll pull the sequence lengths from the decoder_state again.
next_sequence_lengths = sequence_lengths
else:
next_sequence_lengths = tf.where(
tf.logical_not(finished),
tf.fill(tf.shape(sequence_lengths), time + 1),
sequence_lengths,
)
tf.nest.assert_same_structure(state, decoder_state)
tf.nest.assert_same_structure(outputs_ta, next_outputs)
tf.nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
def zero_out_finished(out, zero):
if finished.shape.rank < zero.shape.rank:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), zero.shape
)
return tf.where(broadcast_finished, zero, out)
else:
return tf.where(finished, zero, out)
emit = tf.nest.map_structure(
zero_out_finished, next_outputs, zero_outputs
)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tf.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = new.shape.ndims == 0
if not pass_through:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), new.shape
)
return tf.where(broadcast_finished, cur, new)
else:
return new
if impute_finished:
next_state = tf.nest.map_structure(
_maybe_copy_state, decoder_state, state
)
else:
next_state = decoder_state
if enable_tflite_convertible:
# Reshape to 1-D.
emit = tf.nest.map_structure(lambda x: tf.reshape(x, [-1]), emit)
outputs_ta = tf.nest.map_structure(
lambda ta, out: ta.write(time, out), outputs_ta, emit
)
return (
time + 1,
outputs_ta,
next_state,
next_inputs,
next_finished,
next_sequence_lengths,
)
res = tf.while_loop(
condition,
body,
loop_vars=(
initial_time,
initial_outputs_ta,
initial_state,
initial_inputs,
initial_finished,
initial_sequence_lengths,
),
parallel_iterations=parallel_iterations,
maximum_iterations=maximum_iterations,
swap_memory=swap_memory,
)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = tf.nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths
)
except NotImplementedError:
pass
if not output_time_major:
if enable_tflite_convertible:
# Reshape the output to the original shape.
def _restore_batch(x):
return tf.expand_dims(x, [1])
final_outputs = tf.nest.map_structure(_restore_batch, final_outputs)
final_outputs = tf.nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
def _prepend_batch(batch_size, shape):
"""Prepends the batch dimension to the shape.
If the batch_size value is known statically, this function returns a
TensorShape, otherwise a Tensor.
"""
if isinstance(batch_size, tf.Tensor):
static_batch_size = tf.get_static_value(batch_size)
else:
static_batch_size = batch_size
if static_batch_size is None:
return tf.concat(([batch_size], shape), axis=0)
return [static_batch_size] + shape
def _transpose_batch_time(tensor):
"""Transposes the batch and time dimension of tensor if its rank is at
least 2."""
shape = tensor.shape
if shape.rank is not None and shape.rank < 2:
return tensor
perm = tf.concat(([1, 0], tf.range(2, tf.rank(tensor))), axis=0)
return tf.transpose(tensor, perm)
|
from collections import OrderedDict
import functools
import re
from typing import (
Dict,
Optional,
AsyncIterable,
Awaitable,
AsyncIterator,
Sequence,
Tuple,
Type,
Union,
)
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.bigquery_storage_v1beta2.types import storage
from google.cloud.bigquery_storage_v1beta2.types import stream
from google.cloud.bigquery_storage_v1beta2.types import table
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from .transports.base import BigQueryWriteTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import BigQueryWriteGrpcAsyncIOTransport
from .client import BigQueryWriteClient
class BigQueryWriteAsyncClient:
"""BigQuery Write API.
The Write API can be used to write data to BigQuery.
"""
_client: BigQueryWriteClient
DEFAULT_ENDPOINT = BigQueryWriteClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = BigQueryWriteClient.DEFAULT_MTLS_ENDPOINT
table_path = staticmethod(BigQueryWriteClient.table_path)
parse_table_path = staticmethod(BigQueryWriteClient.parse_table_path)
write_stream_path = staticmethod(BigQueryWriteClient.write_stream_path)
parse_write_stream_path = staticmethod(BigQueryWriteClient.parse_write_stream_path)
common_billing_account_path = staticmethod(
BigQueryWriteClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
BigQueryWriteClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(BigQueryWriteClient.common_folder_path)
parse_common_folder_path = staticmethod(
BigQueryWriteClient.parse_common_folder_path
)
common_organization_path = staticmethod(
BigQueryWriteClient.common_organization_path
)
parse_common_organization_path = staticmethod(
BigQueryWriteClient.parse_common_organization_path
)
common_project_path = staticmethod(BigQueryWriteClient.common_project_path)
parse_common_project_path = staticmethod(
BigQueryWriteClient.parse_common_project_path
)
common_location_path = staticmethod(BigQueryWriteClient.common_location_path)
parse_common_location_path = staticmethod(
BigQueryWriteClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_info.__func__(BigQueryWriteAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
BigQueryWriteAsyncClient: The constructed client.
"""
return BigQueryWriteClient.from_service_account_file.__func__(BigQueryWriteAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return BigQueryWriteClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> BigQueryWriteTransport:
"""Returns the transport used by the client instance.
Returns:
BigQueryWriteTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(BigQueryWriteClient).get_transport_class, type(BigQueryWriteClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the big query write client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.BigQueryWriteTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = BigQueryWriteClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_write_stream(
self,
request: Union[storage.CreateWriteStreamRequest, dict] = None,
*,
parent: str = None,
write_stream: stream.WriteStream = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Creates a write stream to the given table. Additionally, every
table has a special COMMITTED stream named '_default' to which
data can be written. This stream doesn't need to be created
using CreateWriteStream. It is a stream that can be used
simultaneously by any number of clients. Data written to this
stream is considered committed as soon as an acknowledgement is
received.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_create_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.CreateWriteStreamRequest(
parent="parent_value",
)
# Make the request
response = client.create_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]):
The request object. Request message for
`CreateWriteStream`.
parent (:class:`str`):
Required. Reference to the table to which the stream
belongs, in the format of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
write_stream (:class:`google.cloud.bigquery_storage_v1beta2.types.WriteStream`):
Required. Stream to be created.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.CreateWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def append_rows(
self,
requests: AsyncIterator[storage.AppendRowsRequest] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]:
r"""Appends data to the given stream.
If ``offset`` is specified, the ``offset`` is checked against
the end of stream. The server returns ``OUT_OF_RANGE`` in
``AppendRowsResponse`` if an attempt is made to append to an
offset beyond the current end of the stream or
``ALREADY_EXISTS`` if user provids an ``offset`` that has
already been written to. User can retry with adjusted offset
within the same RPC stream. If ``offset`` is not specified,
append happens at the end of the stream.
The response contains the offset at which the append happened.
Responses are received in the same order in which requests are
sent. There will be one response for each successful request. If
the ``offset`` is not set in response, it means append didn't
happen due to some errors. If one request fails, all the
subsequent requests will also fail until a success request is
made again.
If the stream is of ``PENDING`` type, data will only be
available for read operations after the stream is committed.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_append_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.AppendRowsRequest(
write_stream="write_stream_value",
)
# This method expects an iterator which contains
# 'bigquery_storage_v1beta2.AppendRowsRequest' objects
# Here we create a generator that yields a single `request` for
# demonstrative purposes.
requests = [request]
def request_generator():
for request in requests:
yield request
# Make the request
stream = client.append_rows(requests=request_generator())
# Handle the response
for response in stream:
print(response)
Args:
requests (AsyncIterator[`google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest`]):
The request object AsyncIterator. Request message for `AppendRows`.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
AsyncIterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]:
Response message for AppendRows.
"""
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.append_rows,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.ResourceExhausted,
core_exceptions.ServiceUnavailable,
),
deadline=86400.0,
),
default_timeout=86400.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (gapic_v1.routing_header.to_grpc_metadata(()),)
# Send the request.
response = rpc(requests, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_write_stream(
self,
request: Union[storage.GetWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> stream.WriteStream:
r"""Gets a write stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_get_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.GetWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.get_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]):
The request object. Request message for
`GetWriteStreamRequest`.
name (:class:`str`):
Required. Name of the stream to get, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.WriteStream:
Information about a single stream
that gets data inside the storage
system.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.GetWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def finalize_write_stream(
self,
request: Union[storage.FinalizeWriteStreamRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FinalizeWriteStreamResponse:
r"""Finalize a write stream so that no new data can be appended to
the stream. Finalize is not supported on the '_default' stream.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_finalize_write_stream():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.FinalizeWriteStreamRequest(
name="name_value",
)
# Make the request
response = client.finalize_write_stream(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]):
The request object. Request message for invoking
`FinalizeWriteStream`.
name (:class:`str`):
Required. Name of the stream to finalize, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse:
Response message for FinalizeWriteStream.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.FinalizeWriteStreamRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.finalize_write_stream,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def batch_commit_write_streams(
self,
request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.BatchCommitWriteStreamsResponse:
r"""Atomically commits a group of ``PENDING`` streams that belong to
the same ``parent`` table. Streams must be finalized before
commit and cannot be committed multiple times. Once a stream is
committed, data in the stream becomes available for read
operations.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_batch_commit_write_streams():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.BatchCommitWriteStreamsRequest(
parent="parent_value",
write_streams=['write_streams_value_1', 'write_streams_value_2'],
)
# Make the request
response = client.batch_commit_write_streams(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]):
The request object. Request message for
`BatchCommitWriteStreams`.
parent (:class:`str`):
Required. Parent table that all the streams should
belong to, in the form of
``projects/{project}/datasets/{dataset}/tables/{table}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse:
Response message for BatchCommitWriteStreams.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.BatchCommitWriteStreamsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.batch_commit_write_streams,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def flush_rows(
self,
request: Union[storage.FlushRowsRequest, dict] = None,
*,
write_stream: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> storage.FlushRowsResponse:
r"""Flushes rows to a BUFFERED stream. If users are appending rows
to BUFFERED stream, flush operation is required in order for the
rows to become available for reading. A Flush operation flushes
up to any previously flushed offset in a BUFFERED stream, to the
offset specified in the request. Flush is not supported on the
\_default stream, since it is not BUFFERED.
.. code-block:: python
from google.cloud import bigquery_storage_v1beta2
def sample_flush_rows():
# Create a client
client = bigquery_storage_v1beta2.BigQueryWriteClient()
# Initialize request argument(s)
request = bigquery_storage_v1beta2.FlushRowsRequest(
write_stream="write_stream_value",
)
# Make the request
response = client.flush_rows(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]):
The request object. Request message for `FlushRows`.
write_stream (:class:`str`):
Required. The stream that is the
target of the flush operation.
This corresponds to the ``write_stream`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse:
Respond message for FlushRows.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([write_stream])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = storage.FlushRowsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if write_stream is not None:
request.write_stream = write_stream
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.flush_rows,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("write_stream", request.write_stream),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-bigquery-storage",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("BigQueryWriteAsyncClient",)
|
"""Testing.
See the [Testing](https://tensorflow.org/api_docs/python/tf/test) guide.
Note: `tf.compat.v1.test.mock` is an alias to the python `mock` or
`unittest.mock` depending on the python version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import test_util as _test_util
from tensorflow.python.platform import googletest as _googletest
from tensorflow.python.framework.test_util import assert_equal_graph_def
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.framework.test_util import TensorFlowTestCase as TestCase
from tensorflow.python.framework.test_util import gpu_device_name
from tensorflow.python.framework.test_util import is_gpu_available
from tensorflow.python.ops.gradient_checker import compute_gradient_error
from tensorflow.python.ops.gradient_checker import compute_gradient
import sys
from tensorflow.python.util.tf_export import tf_export
if sys.version_info.major == 2:
import mock # pylint: disable=g-import-not-at-top,unused-import
else:
from unittest import mock # pylint: disable=g-import-not-at-top,g-importing-member
tf_export(v1=['test.mock'])(mock)
Benchmark = _googletest.Benchmark # pylint: disable=invalid-name
StubOutForTesting = _googletest.StubOutForTesting # pylint: disable=invalid-name
@tf_export('test.main')
def main(argv=None):
"""Runs all unit tests."""
_test_util.InstallStackTraceHandler()
return _googletest.main(argv)
@tf_export(v1=['test.get_temp_dir'])
def get_temp_dir():
"""Returns a temporary directory for use during tests.
There is no need to delete the directory after the test.
Returns:
The temporary directory.
"""
return _googletest.GetTempDir()
@tf_export(v1=['test.test_src_dir_path'])
def test_src_dir_path(relative_path):
"""Creates an absolute test srcdir path given a relative path.
Args:
relative_path: a path relative to tensorflow root.
e.g. "core/platform".
Returns:
An absolute path to the linked in runfiles.
"""
return _googletest.test_src_dir_path(relative_path)
@tf_export('test.is_built_with_cuda')
def is_built_with_cuda():
"""Returns whether TensorFlow was built with CUDA (GPU) support."""
return _test_util.IsGoogleCudaEnabled()
@tf_export('test.is_built_with_rocm')
def is_built_with_rocm():
"""Returns whether TensorFlow was built with ROCm (GPU) support."""
return _test_util.IsBuiltWithROCm()
@tf_export('test.is_built_with_gpu_support')
def is_built_with_gpu_support():
"""Returns whether TensorFlow was built with GPU (i.e. CUDA or ROCm) support."""
return is_built_with_cuda() or is_built_with_rocm()
@tf_export('test.is_built_with_xla')
def is_built_with_xla():
"""Returns whether TensorFlow was built with XLA support."""
return _test_util.IsBuiltWithXLA()
|
import click
from copy import copy
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import os
import rasterio
import rasterio.warp as rwarp
import time
import osr
from .. import geotools
from .. import utils
def earth_radius():
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
return srs.GetSemiMajor()
def init_nc(dst_ds, transform, lats, lons, years, variables):
# Set attributes
dst_ds.setncattr('Conventions', u'CF-1.5')
dst_ds.setncattr('GDAL', u'GDAL 1.11.3, released 2015/09/16')
# Create dimensions
dst_ds.createDimension('time', None)
dst_ds.createDimension('lat', len(lats))
dst_ds.createDimension('lon', len(lons))
# Create variables
times = dst_ds.createVariable("time", "f8", ("time"), zlib=True,
least_significant_digit=3)
latitudes = dst_ds.createVariable("lat", "f4", ("lat"), zlib=True,
least_significant_digit = 3)
longitudes = dst_ds.createVariable("lon", "f4", ("lon"), zlib=True,
least_significant_digit=3)
crs = dst_ds.createVariable('crs', "S1", ())
# Add metadata
dst_ds.history = "Created at " + time.ctime(time.time())
dst_ds.source = "gen-sps.py"
latitudes.units = "degrees_north"
latitudes.long_name = 'latitude'
longitudes.units = "degrees_east"
longitudes.long_name = "longitude"
times.units = "years since 2010-01-01 00:00:00.0"
times.calendar = "gregorian"
times.standard_name = "time"
times.axis = 'T'
# Assign data to variables
latitudes[:] = lats
longitudes[:] = lons
times[:] = years
srs = osr.SpatialReference()
srs.ImportFromWkt(geotools.WGS84_WKT)
crs.grid_mapping_name = 'latitude_longitude'
crs.spatial_ref = srs.ExportToWkt()
crs.GeoTransform = ' '.join(map(str, transform))
crs.longitude_of_prime_meridian = srs.GetPrimeMeridian()
crs.semi_major_axis = srs.GetSemiMajor()
crs.inverse_flattening = srs.GetInvFlattening()
out = {}
for name, dtype, units, fill in variables:
dst_data = dst_ds.createVariable(name, dtype,
("time", "lat","lon"), zlib = True,
least_significant_digit = 4,
fill_value = fill)
dst_data.units = units
dst_data.grid_mapping = 'crs'
out[name] = dst_data
return out
def get_transform(r1, r2):
# Get the geo transform using r1 resolution but r2 bounds
dst = rasterio.open(r1)
src = rasterio.open(r2)
#src_bounds = np.around(src.bounds, decimals=3)
affine, width, height = rwarp.calculate_default_transform(src.crs,
dst.crs,
src.width,
src.height,
*src.bounds,
resolution=dst.res)
ul = affine * (0.5, 0.5)
lr = affine * (width - 0.5, height - 0.5)
lats = np.linspace(ul[1], lr[1], height)
lons = np.linspace(ul[0], lr[0], width)
cratio = np.prod(dst.res) / np.prod(src.res)
#cratio = 1.0
static = rasterio.open(utils.luh2_static('carea'))
carea = static.read(1, window=static.window(*src.bounds))
rcs = (np.sin(np.radians(lats + dst.res[0] / 2.0)) -
np.sin(np.radians(lats - dst.res[0] / 2.0))) * \
(dst.res[0] * np.pi/180) * earth_radius() ** 2 / 1e6
#carea *= rcs.reshape(carea.shape[0], 1)
return affine, lats, lons, dst.res, cratio# / carea
def mixing(year):
if year % 10 == 0:
return [year]
y0 = year - (year % 10)
return (y0, y0 + 10)
def resample(ds, bidx, resolution, resampling, out):
arr = ds.read(bidx, masked=True)
nodata = ds.nodatavals[bidx - 1]
if nodata is None: #"'nodata' must be set!"
nodata = -9999
if ds.crs.data == {}:
crs = ds.crs.from_string(u'epsg:4326')
else:
crs = ds.crs
newaff, width, height = rwarp.calculate_default_transform(crs, crs, ds.width,
ds.height,
*ds.bounds,
resolution=resolution)
out.mask.fill(False)
rwarp.reproject(arr, out,
src_transform = ds.affine,
dst_transform = newaff,
width = width,
height = height,
src_nodata = nodata,
dst_nodata = nodata,
src_crs = crs,
resampling = resampling)
out.mask = np.where(out == nodata, 1, 0)
def main():
years = range(2010, 2101)
ssps = ['ssp%d' % i for i in range(1, 6)]
variables = [(ssp, 'f4', 'ppl/km^2', -9999) for ssp in ssps]
fname = '%s/luh2/un_codes-full.tif' % utils.outdir()
affine, lats, lons, res, cfudge = get_transform(fname,
utils.sps(ssps[0], 2010))
arr = (ma.empty((len(lats), len(lons)), fill_value=-9999),
ma.empty((len(lats), len(lons)), fill_value=-9999))
oname = '%s/luh2/sps.nc' % utils.outdir()
with Dataset(oname, 'w') as out:
data = init_nc(out, affine.to_gdal(), lats, lons, years, variables)
for ssp in ssps:
print(ssp)
with click.progressbar(enumerate(years), length=len(years)) as bar:
for idx, year in bar:
yy = mixing(year)
files = map(lambda y: utils.sps(ssp, y), yy)
rasters = map(rasterio.open, files)
if len(rasters) == 1:
resample(rasters[0], 1, res,
rwarp.Resampling.average, arr[0])
data[ssp][idx, :, :] = np.clip(arr[0], 0, None) * cfudge
else:
f0 = (year % 10) / 10.0
resample(rasters[0], 1, res,
rwarp.Resampling.average, arr[0])
resample(rasters[1], 1, res,
rwarp.Resampling.average, arr[1])
data[ssp][idx, :, :] = ((1 - f0) * np.clip(arr[0], 0, None) +
f0 * np.clip(arr[1], 0, None)) * cfudge
if __name__ == '__main__':
main()
|
import itertools
import json
import logging
import os
import time
from collections import deque
from six import iteritems, itervalues
from six.moves import queue as Queue
from pyspider.libs import counter, utils
from pyspider.libs.base_handler import BaseHandler
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Project(object):
'''
project for scheduler
'''
def __init__(self, scheduler, project_info):
'''
'''
self.scheduler = scheduler
self.active_tasks = deque(maxlen=scheduler.ACTIVE_TASKS)
self.task_queue = TaskQueue()
self.task_loaded = False
self._selected_tasks = False # selected tasks after recent pause
self._send_finished_event_wait = 0 # wait for scheduler.FAIL_PAUSE_NUM loop steps before sending the event
self.md5sum = None
self._send_on_get_info = False
self.waiting_get_info = True
self._paused = False
self._paused_time = 0
self._unpause_last_seen = None
self.update(project_info)
@property
def paused(self):
# unpaused --(last FAIL_PAUSE_NUM task failed)--> paused --(PAUSE_TIME)--> unpause_checking
# unpaused <--(last UNPAUSE_CHECK_NUM task have success)--|
# paused <--(last UNPAUSE_CHECK_NUM task no success)--|
if not self._paused:
fail_cnt = 0
for _, task in self.active_tasks:
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
if 'process' not in task['track']:
logger.error('process not in task, %r', task)
if task['track']['process']['ok']:
break
else:
fail_cnt += 1
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
break
if fail_cnt >= self.scheduler.FAIL_PAUSE_NUM:
self._paused = True
self._paused_time = time.time()
elif self._paused is True and (self._paused_time + self.scheduler.PAUSE_TIME < time.time()):
self._paused = 'checking'
self._unpause_last_seen = self.active_tasks[0][1] if len(self.active_tasks) else None
elif self._paused == 'checking':
cnt = 0
fail_cnt = 0
for _, task in self.active_tasks:
if task is self._unpause_last_seen:
break
# ignore select task
if task.get('type') == self.scheduler.TASK_PACK:
continue
cnt += 1
if task['track']['process']['ok']:
# break with enough check cnt
cnt = max(cnt, self.scheduler.UNPAUSE_CHECK_NUM)
break
else:
fail_cnt += 1
if cnt >= self.scheduler.UNPAUSE_CHECK_NUM:
if fail_cnt == cnt:
self._paused = True
self._paused_time = time.time()
else:
self._paused = False
return self._paused is True
def update(self, project_info):
self.project_info = project_info
self.name = project_info['name']
self.group = project_info['group']
self.db_status = project_info['status']
self.updatetime = project_info['updatetime']
md5sum = utils.md5string(project_info['script'])
if (self.md5sum != md5sum or self.waiting_get_info) and self.active:
self._send_on_get_info = True
self.waiting_get_info = True
self.md5sum = md5sum
if self.active:
self.task_queue.rate = project_info['rate']
self.task_queue.burst = project_info['burst']
else:
self.task_queue.rate = 0
self.task_queue.burst = 0
logger.info('project %s updated, status:%s, paused:%s, %d tasks',
self.name, self.db_status, self.paused, len(self.task_queue))
def on_get_info(self, info):
self.waiting_get_info = False
self.min_tick = info.get('min_tick', 0)
self.retry_delay = info.get('retry_delay', {})
self.crawl_config = info.get('crawl_config', {})
@property
def active(self):
return self.db_status in ('RUNNING', 'DEBUG')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
DEFAULT_RETRY_DELAY = {
0: 30,
1: 1*60*60,
2: 6*60*60,
3: 12*60*60,
'': 24*60*60
}
FAIL_PAUSE_NUM = 10
PAUSE_TIME = 5*60
UNPAUSE_CHECK_NUM = 3
TASK_PACK = 1
STATUS_PACK = 2 # current not used
REQUEST_PACK = 3 # current not used
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self._last_tick = int(time.time())
self._postpone_request = []
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
get_info_attributes = ['min_tick', 'retry_delay', 'crawl_config']
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = Project(self, project)
else:
self.projects[project['name']].update(project)
project = self.projects[project['name']]
if project._send_on_get_info:
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
project._send_on_get_info = False
self.on_select_task({
'taskid': '_on_get_info',
'project': project.name,
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': self.get_info_attributes,
},
'process': {
'callback': '_on_get_info',
},
})
# load task queue when project is running and delete task_queue when project is stoped
if project.active:
if not project.task_loaded:
self._load_tasks(project)
project.task_loaded = True
else:
if project.task_loaded:
project.task_queue = TaskQueue()
project.task_loaded = False
if project not in self._cnt['all']:
self._update_project_cnt(project.name)
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
task_queue = project.task_queue
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project.name, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
task_queue.put(taskid, priority, exetime)
project.task_loaded = True
logger.debug('project: %s loaded %d tasks.', project.name, len(task_queue))
if project not in self._cnt['all']:
self._update_project_cnt(project)
self._cnt['all'].value((project.name, 'pending'), len(project.task_queue))
def _update_project_cnt(self, project_name):
status_count = self.taskdb.status_count(project_name)
self._cnt['all'].value(
(project_name, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project_name, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value(
(project_name, 'pending'),
status_count.get(self.taskdb.ACTIVE, 0)
)
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.projects:
logger.error('unknown project: %s', task['project'])
return False
project = self.projects[task['project']]
if not project.active:
logger.error('project %s not started, please set status to RUNNING or DEBUG',
task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.projects[task['project']].task_queue.put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
if task['project'] not in self.projects:
continue
project = self.projects[task['project']]
project.on_get_info(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
# check _postpone_request first
todo = []
for task in self._postpone_request:
if task['project'] not in self.projects:
continue
if self.projects[task['project']].task_queue.is_processing(task['taskid']):
todo.append(task)
else:
self.on_request(task)
self._postpone_request = todo
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.projects[task['project']].task_queue:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
self.on_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if not project.active:
continue
if project.waiting_get_info:
continue
if project.min_tick == 0:
continue
if self._last_tick % int(project.min_tick) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project.name,
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project in itervalues(self.projects):
if not project.active:
continue
# only check project pause when select new tasks, cronjob and new request still working
if project.paused:
continue
if project.waiting_get_info:
continue
if cnt >= limit:
break
# task queue
task_queue = project.task_queue
task_queue.check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project.name, taskid))
if taskid != 'on_finished':
project_cnt += 1
cnt += 1
cnt_dict[project.name] = project_cnt
if project_cnt:
project._selected_tasks = True
project._send_finished_event_wait = 0
# check and send finished event to project
if not project_cnt and len(task_queue) == 0 and project._selected_tasks:
# wait for self.FAIL_PAUSE_NUM steps to make sure all tasks in queue have been processed
if project._send_finished_event_wait < self.FAIL_PAUSE_NUM:
project._send_finished_event_wait += 1
else:
project._selected_tasks = False
project._send_finished_event_wait = 0
self.newtask_queue.put({
'project': project.name,
'taskid': 'on_finished',
'url': 'data:,on_finished',
'process': {
'callback': 'on_finished',
},
"schedule": {
"age": 0,
"priority": 9,
"force_update": True,
},
})
for project, taskid in taskids:
self._load_put_task(project, taskid)
return cnt_dict
def _load_put_task(self, project, taskid):
try:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
except ValueError:
logger.error('bad task pack %s:%s', project, taskid)
return
if not task:
return
task = self.on_select_task(task)
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project.db_status != 'STOP':
continue
if now - project.updatetime < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project.group):
continue
logger.warning("deleting project: %s!", project.name)
del self.projects[project.name]
self.taskdb.drop(project.name)
self.projectdb.drop(project.name)
if self.resultdb:
self.resultdb.drop(project.name)
for each in self._cnt.values():
del each[project.name]
def __len__(self):
return sum(len(x.task_queue) for x in itervalues(self.projects))
def quit(self):
'''Set quit signal'''
self._quit = True
# stop xmlrpc server
if hasattr(self, 'xmlrpc_server'):
self.xmlrpc_ioloop.add_callback(self.xmlrpc_server.stop)
self.xmlrpc_ioloop.add_callback(self.xmlrpc_ioloop.stop)
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("scheduler starting...")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
from pyspider.libs.wsgi_xmlrpc import WSGIXMLRPCApplication
application = WSGIXMLRPCApplication()
application.register_function(self.quit, '_quit')
application.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
application.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
application.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
application.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
application.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'type',
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x.active_tasks) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(t for t in tasks if t)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
application.register_function(get_active_tasks, 'get_active_tasks')
def get_projects_pause_status():
result = {}
for project_name, project in iteritems(self.projects):
result[project_name] = project.paused
return result
application.register_function(get_projects_pause_status, 'get_projects_pause_status')
def webui_update():
return {
'pause_status': get_projects_pause_status(),
'counter': {
'5m_time': dump_counter('5m_time', 'avg'),
'5m': dump_counter('5m', 'sum'),
'1h': dump_counter('1h', 'sum'),
'1d': dump_counter('1d', 'sum'),
'all': dump_counter('all', 'sum'),
},
}
application.register_function(webui_update, 'webui_update')
import tornado.wsgi
import tornado.ioloop
import tornado.httpserver
container = tornado.wsgi.WSGIContainer(application)
self.xmlrpc_ioloop = tornado.ioloop.IOLoop()
self.xmlrpc_server = tornado.httpserver.HTTPServer(container, io_loop=self.xmlrpc_ioloop)
self.xmlrpc_server.listen(port=port, address=bind)
logger.info('scheduler.xmlrpc listening on %s:%s', bind, port)
self.xmlrpc_ioloop.start()
def on_request(self, task):
if self.INQUEUE_LIMIT and len(self.projects[task['project']].task_queue) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
return
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
return self.on_old_request(task, oldtask)
else:
return self.on_new_request(task)
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
if _schedule.get('force_update') and self.projects[task['project']].task_queue.is_processing(task['taskid']):
# when a task is in processing, the modify may conflict with the running task.
# postpone the modify after task finished.
logger.info('postpone modify task %(project)s:%(taskid)s %(url)s', task)
self._postpone_request.append(task)
return
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
if _schedule.get('cancel'):
logger.info('cancel task %(project)s:%(taskid)s %(url)s', task)
task['status'] = self.taskdb.BAD
self.update_task(task)
self.projects[task['project']].task_queue.delete(task['taskid'])
return task
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.projects[task['project']].task_queue.done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
project_info = self.projects[task['project']]
retry_delay = project_info.retry_delay or self.DEFAULT_RETRY_DELAY
next_exetime = retry_delay.get(retried, retry_delay.get('', self.DEFAULT_RETRY_DELAY['']))
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
else:
if retried >= retries:
next_exetime = -1
elif 'age' in task['schedule'] and next_exetime > task['schedule'].get('age'):
next_exetime = task['schedule'].get('age')
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['type'] = self.TASK_PACK
task['group'] = project_info.group
task['project_md5sum'] = project_info.md5sum
task['project_updatetime'] = project_info.updatetime
# lazy join project.crawl_config
if getattr(project_info, 'crawl_config', None):
task = BaseHandler.task_join_crawl_config(task, project_info.crawl_config)
project_info.active_tasks.appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
banner = (
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if hasattr(shell, 'show_banner'):
shell.show_banner(banner)
shell.interact()
else:
shell.interact(banner)
if not is_crawled:
self.ioloop.add_callback(self.ioloop.stop)
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']].active_tasks.appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
import random
import threading
from pyspider.database.sqlite.sqlitebase import SQLiteMixin
class ThreadBaseScheduler(Scheduler):
def __init__(self, threads=4, *args, **kwargs):
self.local = threading.local()
super(ThreadBaseScheduler, self).__init__(*args, **kwargs)
if isinstance(self.taskdb, SQLiteMixin):
self.threads = 1
else:
self.threads = threads
self._taskdb = self.taskdb
self._projectdb = self.projectdb
self._resultdb = self.resultdb
self.thread_objs = []
self.thread_queues = []
self._start_threads()
assert len(self.thread_queues) > 0
@property
def taskdb(self):
if not hasattr(self.local, 'taskdb'):
self.taskdb = self._taskdb.copy()
return self.local.taskdb
@taskdb.setter
def taskdb(self, taskdb):
self.local.taskdb = taskdb
@property
def projectdb(self):
if not hasattr(self.local, 'projectdb'):
self.projectdb = self._projectdb.copy()
return self.local.projectdb
@projectdb.setter
def projectdb(self, projectdb):
self.local.projectdb = projectdb
@property
def resultdb(self):
if not hasattr(self.local, 'resultdb'):
self.resultdb = self._resultdb.copy()
return self.local.resultdb
@resultdb.setter
def resultdb(self, resultdb):
self.local.resultdb = resultdb
def _start_threads(self):
for i in range(self.threads):
queue = Queue.Queue()
thread = threading.Thread(target=self._thread_worker, args=(queue, ))
thread.daemon = True
thread.start()
self.thread_objs.append(thread)
self.thread_queues.append(queue)
def _thread_worker(self, queue):
while True:
method, args, kwargs = queue.get()
try:
method(*args, **kwargs)
except Exception as e:
logger.exception(e)
def _run_in_thread(self, method, *args, **kwargs):
i = kwargs.pop('_i', None)
block = kwargs.pop('_block', False)
if i is None:
while True:
for queue in self.thread_queues:
if queue.empty():
break
else:
if block:
time.sleep(0.1)
continue
else:
queue = self.thread_queues[random.randint(0, len(self.thread_queues)-1)]
break
else:
queue = self.thread_queues[i % len(self.thread_queues)]
queue.put((method, args, kwargs))
if block:
self._wait_thread()
def _wait_thread(self):
while True:
if all(queue.empty() for queue in self.thread_queues):
break
time.sleep(0.1)
def _update_project(self, project):
self._run_in_thread(Scheduler._update_project, self, project)
def on_task_status(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_task_status, self, task, _i=i)
def on_request(self, task):
i = hash(task['taskid'])
self._run_in_thread(Scheduler.on_request, self, task, _i=i)
def _load_put_task(self, project, taskid):
i = hash(taskid)
self._run_in_thread(Scheduler._load_put_task, self, project, taskid, _i=i)
def run_once(self):
super(ThreadBaseScheduler, self).run_once()
self._wait_thread()
|
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class ResponseContainerPagedRecentTracesSearch(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'PagedRecentTracesSearch',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerPagedRecentTracesSearch - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:return: The response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:rtype: PagedRecentTracesSearch
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerPagedRecentTracesSearch.
:param response: The response of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:type: PagedRecentTracesSearch
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:return: The status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerPagedRecentTracesSearch.
:param status: The status of this ResponseContainerPagedRecentTracesSearch. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerPagedRecentTracesSearch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerPagedRecentTracesSearch):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerPagedRecentTracesSearch):
return True
return self.to_dict() != other.to_dict()
|
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
inputs = X_test
targets = y_test
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter))
|
__author__ = 'thorsteinn'
def get_all_ship_fields(db):
ships = db.keys()
fields = []
for ship in ships:
shipDB = db[ship]
shipKeys = shipDB.keys()
for oneKey in shipKeys:
if oneKey not in fields:
fields.append(oneKey)
return fields
|
print(2 + 2) # 4
print(50 - 5*6) # 20
print((50 - 5*6) / 4) # 5.0
print(8/5) # 1.6
print(17 / 3) # 5.666666666666667 float
print(17 // 3) # 5 取整
print(17 % 3) # 2 取模
print(5*3+2) # 17 先乘除,后加减
print(2+5*3) # 17 先乘除,后加减
print(5**2) # 5的平方 25
print(5**3) # 5的立方 125
print(2**7) # 2的7次方128
print("--华丽的分割线--")
width=50
height=10*10
print(width*height) # 5000
print(4 * 3.75 - 1)
tax = 12.5 / 100
price = 100.50
print(price * tax)
print("--华丽的分割线--")
print('spam eggs')
print( 'doesn\'t') # \' 会进行转义
print("doesn't") # 也可使用双引号来输出,此时‘ 就不需要转义
print('"Yes," he said.') # 被单引号包含的双引号会被当成字符处理
print("\"Yes,\" he said.") # 被双引号包含中的双银行需要转义
print('"Isn\'t," she said.') #被单引号包含的单引号需要进行转义,不是用print函数打印时'"Isn\'t," she said.'
s = 'First line.\nSecond line.'
print(s) # 使用print打印\n会被转义换行 ,使用命令行是\n不会被转义 First line.\nSecond line.
print("----")
print('C:\some\name') # 这里 \n 会被转义
print(r'C:\some\name' ) # 声明 r' 后面的字符串不会被转义
print("""\
Usage: thingy [OPTIONS]
-h Display this usage message
-H hostname Hostname to connect to
""")
# """...""" or '''...''' 相当于html中p标签的作用,允许多行,排列格式
# 不加\ : 空一行
print(3 * 'un' + 'ium') # 字符串可以跟数字进行相乘
print('Py' 'thon') # Python ,在同一个print方法中,用多个空格隔开,最终会拼接成一个
prefix = 'Py'
print(prefix + 'thon') # 字符串用+ 进行拼接
text = ('Put several strings within parentheses to have them joined together.')
print(text)
word = 'Python'
print(word[0]) # 字符串也可以当成数组来取值
print(word[5])
print(word[-1]) # 截取最后一个字符
print(word[-2]) # 截取最后第二个字符
print(word[-6]) # 截取最后第六个字符
print(word[0:2]) # 从第一个字符所在索引截取2个字符
print(word[2:5]) # 从第三个字符所在索引截取5个字符
print(word[:2] + word[2:]) # s[:i] + s[i:] = s ,不管i为某个整数
print(word[:4] + word[4:])
print(word[:70] + word[70:])
print(word[:2]) # 从第一个字符开始取2个 字符
print(word[4:]) # 从第四个字符取到最后的 字符
print(word[-2:]) # 从最后第二个字符取到最后的 字符
print("---------");
print( word[4:42]) #从第四个字符取到最后的 字符
print(word[42:]) #从第42个字符取到最后的字符 空
print('J' + word[1:]) #可以重新拼接新字符串
print(word[:2] + 'py')
s = 'supercalifragilisticexpialidocious'
print(len(s)) # 获取字符串的长度
|
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DirectorsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def attach_director_to_category(self, category_id, director_id, **kwargs):
"""
Attach director to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_category(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs)
else:
(data) = self.attach_director_to_category_with_http_info(category_id, director_id, **kwargs)
return data
def attach_director_to_category_with_http_info(self, category_id, director_id, **kwargs):
"""
Attach director to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_category_with_http_info(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_director_to_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `attach_director_to_category`")
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `attach_director_to_category`")
collection_formats = {}
resource_path = '/categories/{category_id}/directors'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'director_id' in params:
form_params.append(('director_id', params['director_id']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def attach_director_to_product(self, product_id, director_id, **kwargs):
"""
Attach director to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_product(product_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs)
else:
(data) = self.attach_director_to_product_with_http_info(product_id, director_id, **kwargs)
return data
def attach_director_to_product_with_http_info(self, product_id, director_id, **kwargs):
"""
Attach director to product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.attach_director_to_product_with_http_info(product_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int director_id: Director ID to attach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method attach_director_to_product" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `attach_director_to_product`")
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `attach_director_to_product`")
collection_formats = {}
resource_path = '/products/{product_id}/directors'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'director_id' in params:
form_params.append(('director_id', params['director_id']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_director(self, body, **kwargs):
"""
Create new director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_director(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateDirectorRequest body: Directory settings (required)
:return: Director
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_director_with_http_info(body, **kwargs)
else:
(data) = self.create_director_with_http_info(body, **kwargs)
return data
def create_director_with_http_info(self, body, **kwargs):
"""
Create new director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_director_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param CreateDirectorRequest body: Directory settings (required)
:return: Director
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_director" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_director`")
collection_formats = {}
resource_path = '/directors'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
self.api_client.set_default_header('Content-Type', 'application/json')
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Director',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_director(self, director_id, **kwargs):
"""
Delete director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_director(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_director_with_http_info(director_id, **kwargs)
else:
(data) = self.delete_director_with_http_info(director_id, **kwargs)
return data
def delete_director_with_http_info(self, director_id, **kwargs):
"""
Delete director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_director_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_director" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `delete_director`")
collection_formats = {}
resource_path = '/directors/{director_id}'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def detach_director_from_category(self, category_id, director_id, **kwargs):
"""
Detach director from category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.detach_director_from_category(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to detach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.detach_director_from_category_with_http_info(category_id, director_id, **kwargs)
else:
(data) = self.detach_director_from_category_with_http_info(category_id, director_id, **kwargs)
return data
def detach_director_from_category_with_http_info(self, category_id, director_id, **kwargs):
"""
Detach director from category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.detach_director_from_category_with_http_info(category_id, director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int director_id: Director ID to detach (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method detach_director_from_category" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `detach_director_from_category`")
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `detach_director_from_category`")
collection_formats = {}
resource_path = '/categories/{category_id}/directors/{director_id}'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_category_directors(self, category_id, **kwargs):
"""
Get directors attached to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_directors(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int page:
:param int per_page:
:return: CategoryDirectorsListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_category_directors_with_http_info(category_id, **kwargs)
else:
(data) = self.get_category_directors_with_http_info(category_id, **kwargs)
return data
def get_category_directors_with_http_info(self, category_id, **kwargs):
"""
Get directors attached to category
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_category_directors_with_http_info(category_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int category_id: Category ID to fetch (required)
:param int page:
:param int per_page:
:return: CategoryDirectorsListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['category_id', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_category_directors" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'category_id' is set
if ('category_id' not in params) or (params['category_id'] is None):
raise ValueError("Missing the required parameter `category_id` when calling `get_category_directors`")
collection_formats = {}
resource_path = '/categories/{category_id}/directors'.replace('{format}', 'json')
path_params = {}
if 'category_id' in params:
path_params['category_id'] = params['category_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CategoryDirectorsListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_director(self, director_id, **kwargs):
"""
Get Director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: DirectorResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_director_with_http_info(director_id, **kwargs)
else:
(data) = self.get_director_with_http_info(director_id, **kwargs)
return data
def get_director_with_http_info(self, director_id, **kwargs):
"""
Get Director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: DirectorResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_director" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `get_director`")
collection_formats = {}
resource_path = '/directors/{director_id}'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_director_cover_image(self, director_id, **kwargs):
"""
Get cover image of a director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_cover_image(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: ImageResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_director_cover_image_with_http_info(director_id, **kwargs)
else:
(data) = self.get_director_cover_image_with_http_info(director_id, **kwargs)
return data
def get_director_cover_image_with_http_info(self, director_id, **kwargs):
"""
Get cover image of a director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_cover_image_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:return: ImageResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_director_cover_image" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `get_director_cover_image`")
collection_formats = {}
resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImageResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_director_products(self, director_id, **kwargs):
"""
Get director products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_products(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:return: DirectorProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_director_products_with_http_info(director_id, **kwargs)
else:
(data) = self.get_director_products_with_http_info(director_id, **kwargs)
return data
def get_director_products_with_http_info(self, director_id, **kwargs):
"""
Get director products
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_products_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:param int page:
:param int per_page:
:param str sort_by: Sort by this attribute (id by default)
:param str sort_direction: Sorting direction (asc by default)
:param str ip: Filter by user IP
:param str features: ``` features[*][value]=string&features[*][operator]=strict&features[1][value]=string&features[1][operator]=strict _______________ { \"*\": { \"value\": \"string\", \"operator\": \"strict\" }, \"1\": { \"value\": \"string\", \"operator\": \"contains\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). To search on all features, you can pass * as featureId.
:param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than).
:return: DirectorProductListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id', 'page', 'per_page', 'sort_by', 'sort_direction', 'ip', 'features', 'filters']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_director_products" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `get_director_products`")
collection_formats = {}
resource_path = '/directors/{director_id}/products'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'sort_by' in params:
query_params['sort_by'] = params['sort_by']
if 'sort_direction' in params:
query_params['sort_direction'] = params['sort_direction']
if 'ip' in params:
query_params['ip'] = params['ip']
if 'features' in params:
query_params['features'] = params['features']
if 'filters' in params:
query_params['filters'] = params['filters']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorProductListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_director_products_role(self, director_id, **kwargs):
"""
Get Products linked to Product with their role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_products_role(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:param int page:
:param int per_page:
:return: DirectorProductRoleListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_director_products_role_with_http_info(director_id, **kwargs)
else:
(data) = self.get_director_products_role_with_http_info(director_id, **kwargs)
return data
def get_director_products_role_with_http_info(self, director_id, **kwargs):
"""
Get Products linked to Product with their role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_director_products_role_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: Director ID to fetch (required)
:param int page:
:param int per_page:
:return: DirectorProductRoleListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_director_products_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `get_director_products_role`")
collection_formats = {}
resource_path = '/directors/{director_id}/products-role'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorProductRoleListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_directors(self, **kwargs):
"""
Get directors list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_directors(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: DirectorListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_directors_with_http_info(**kwargs)
else:
(data) = self.get_directors_with_http_info(**kwargs)
return data
def get_directors_with_http_info(self, **kwargs):
"""
Get directors list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_directors_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: DirectorListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_directors" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/directors'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_directors(self, product_id, **kwargs):
"""
Get directors of a product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_directors(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:param str image_type:
:return: DirectorListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_directors_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_directors_with_http_info(product_id, **kwargs)
return data
def get_product_directors_with_http_info(self, product_id, **kwargs):
"""
Get directors of a product
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_directors_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:param str image_type:
:return: DirectorListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'page', 'per_page', 'image_type']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_directors" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_directors`")
collection_formats = {}
resource_path = '/products/{product_id}/directors'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
if 'image_type' in params:
query_params['image_type'] = params['image_type']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_product_directors_role(self, product_id, **kwargs):
"""
Get Directors attached to Product with their role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_directors_role(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:return: DirectorRoleListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_product_directors_role_with_http_info(product_id, **kwargs)
else:
(data) = self.get_product_directors_role_with_http_info(product_id, **kwargs)
return data
def get_product_directors_role_with_http_info(self, product_id, **kwargs):
"""
Get Directors attached to Product with their role
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_product_directors_role_with_http_info(product_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int product_id: Product ID to fetch (required)
:param int page:
:param int per_page:
:return: DirectorRoleListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['product_id', 'page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_product_directors_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'product_id' is set
if ('product_id' not in params) or (params['product_id'] is None):
raise ValueError("Missing the required parameter `product_id` when calling `get_product_directors_role`")
collection_formats = {}
resource_path = '/products/{product_id}/directors-role'.replace('{format}', 'json')
path_params = {}
if 'product_id' in params:
path_params['product_id'] = params['product_id']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectorRoleListResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_director(self, director_id, body, **kwargs):
"""
Update director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_director(director_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: (required)
:param UpdateDirectorRequest body: Directory settings (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_director_with_http_info(director_id, body, **kwargs)
else:
(data) = self.update_director_with_http_info(director_id, body, **kwargs)
return data
def update_director_with_http_info(self, director_id, body, **kwargs):
"""
Update director
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_director_with_http_info(director_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int director_id: (required)
:param UpdateDirectorRequest body: Directory settings (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_director" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `update_director`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_director`")
collection_formats = {}
resource_path = '/directors/{director_id}'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
self.api_client.set_default_header('Content-Type', 'application/json')
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_director_cover(self, director_id, **kwargs):
"""
Upload director cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_director_cover(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float director_id: Director ID to fetch (required)
:param file file:
:param str hash:
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: ImageResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.upload_director_cover_with_http_info(director_id, **kwargs)
else:
(data) = self.upload_director_cover_with_http_info(director_id, **kwargs)
return data
def upload_director_cover_with_http_info(self, director_id, **kwargs):
"""
Upload director cover
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.upload_director_cover_with_http_info(director_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param float director_id: Director ID to fetch (required)
:param file file:
:param str hash:
:param str hash_algorithm: Hash algorithm to check the hash file (default value is: sha256)
:return: ImageResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['director_id', 'file', 'hash', 'hash_algorithm']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_director_cover" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'director_id' is set
if ('director_id' not in params) or (params['director_id'] is None):
raise ValueError("Missing the required parameter `director_id` when calling `upload_director_cover`")
collection_formats = {}
resource_path = '/directors/{director_id}/cover'.replace('{format}', 'json')
path_params = {}
if 'director_id' in params:
path_params['director_id'] = params['director_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'file' in params:
local_var_files['file'] = params['file']
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash' in params:
form_params.append(('hash', params['hash']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'hash_algorithm' in params:
form_params.append(('hash_algorithm', params['hash_algorithm']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['multipart/form-data'])
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ImageResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
"""
.. module:: shellscribe
Shell-Scribe run.py
@author: Keith E. Miller <keithmiller@umass.edu>
Expected issues:
- cd command is shell-scribe specific so commands that use cd in a non-trivial
way might break the cd command
"""
import cmd
import os
import sys
import argparse as ap
import datetime
import json
from twilio.rest import TwilioRestClient
DEBUG = False
def bashinator_9000(filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1]
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
shell-scribe().send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
#not sure what to do with the rest of this code. whether or not it is even necessary
#with open('test.sh','r') as file:
# for row in file:
# print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
# comment=raw_input('- ')
# tempDic = {'comment':comment,'command':row}
# dic.update({inc:tempDic})
# inc+=1
#dic['welcome']="""This is a welcome message"""
#print('\033[0m')
#with open(title+'.json','w') as file:
# json.dump(dic,file)
class Shell_Scribe(cmd.Cmd):
"""
Shell_Scribe is a commandline interface that automatically saves a history
of what commands were typed to a text file as well as creating a shell
script for them.
"""
## Return value for each command (None == 0)
return_value = None
## The prompt to the user
prompt = '\033[96m'+'S'+'\033[33m'+'hell-'+'\033[96m'+'S'+'\033[33m'+ \
'cribe>'+'\033[0m'
## Set to True for Working Directory as prompt"
location_prompt = False
## This is a list of commands that will not be stored by Shell-Scribe
storage_blacklist = ["ls", "pwd", ""]
## Config File Name
config_filename = "config.json"
## Twilio Attributes
TWILIO = False
ACCOUNT_SID = None
AUTH_TOKEN = None
message_recipient = None
message_sender = None
call_url = None
alert_type = None
## Properties
script_filename = "shell-scribe.sh"
script = None
def bashinator_9000(self, filename):
dic={}
inc=1
title = ''
author = ''
date = datetime.datetime.now()
title = raw_input("What is the title: ")
author = raw_input("Who is the author: ")
dic['welcome']= raw_input("Input a description for the lesson: ")
date = datetime.datetime.now()
if title =="": title = 'lesson'
if author=="": author = 'N/A'
dic["title"] = title
dic["author"] = author
with open(filename,'r') as file:
for row in file:
print '\033[91m' + "\nCode for the row: " + '\033[96m' + row + '\033[92m'
comment=raw_input('- ')
tempDic = {'comment':comment,'command':row}
dic.update({inc:tempDic})
inc+=1
print('\033[0m')
dic['command_count'] = inc - 1
with open(title+'.json','w') as file:
json.dump(dic,file)
def bashinator_10000(self, filename): #need sleeeeeep
#fname = filename.readFile() #attempting to have json file read-in
with open(filename, 'r') as f:
json_dict = json.load(f)
print json_dict
inc=1
# Welcomes them to Hell
print json_dict["welcome"], "\n"
for x in range(json_dict["command_count"]):
x = x + 1
print '\033[91m' +"Line: ", x,'\n'
print '\033[92m'+ "Comment: ", json_dict[str(x)]["comment"],'\n'
print '\033[96m' + "Input: ", json_dict[str(x)]["command"][:-1]
outfile = os.popen(json_dict[str(x)]["command"])
output = outfile.read()
return_val = outfile.close()
if return_val != None:
self.send_call()
print '\033[93m' + "Output: ", os.popen(json_dict[str(x)]["command"]).read() + '\033[0m'
raw_input("-Press Enter-\n")
## File Editing Methods
def store_to_script(self, line):
"""
Stores the shell command to the script
"""
self.script.write(line + "\n")
def load_config_json(self):
"""
Configures Shell-Scribe based on the JSON configuration file
"""
with open(self.config_filename, 'r') as f:
json_dict = json.load(f)
#print "Dict from Json:", json_dict
self.TWILIO = (1 == json_dict["twilio"]["TWILIO"])
if self.TWILIO:
self.ACCOUNT_SID = json_dict["twilio"]["ACCOUNT_SID"]
self.AUTH_TOKEN = json_dict["twilio"]["AUTH_TOKEN"]
self.message_recipient = json_dict["twilio"]["TO"]
self.message_sender = json_dict["twilio"]["FROM"]
if json_dict["twilio"]["ALERT_TYPE"].lower() == "call":
self.alert_type = json_dict["twilio"]["ALERT_TYPE"].lower()
self.call_url = json_dict["twilio"]["CALL_URL"]
if json_dict["appearance"]["prompt"].lower() == 'location':
self.location_prompt = True
def no_config_subroutine(self):
"""
Method that is called when there is no config found
"""
gen_config = input("Generate Default Config File? (Y/n)")
if gen_config == "": gen_conifg = "Y"
if gen_config.lower() == 'y':
self.generate_config()
self.load_config_json
else:
"No Configuration File. Running basic mode"
## Send text via Twilio
def send_text(self, line):
"""
Sends a text message via Twilio
"""
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
client.messages.create(to=self.message_recipient,
from_=self.message_sender,
body="Failed on command: " + line)
def send_call(self):
"""
Sends said call via Twilio
"""
print "Calling"
client = TwilioRestClient(self.ACCOUNT_SID, self.AUTH_TOKEN)
call = client.calls.create(to=self.message_recipient,
from_=self.message_sender,
url=self.call_url,
method="GET",
fallback_method="GET",
status_callback_method="GET",
record="false")
print call.sid
## Explicit Shell-Scribe Commands
def do_cd(self, line):
"""
Runs the cd equivalent
"""
if os.path.isdir(line):
os.chdir(line)
else:
print "Directory ", line, " does not exist"
def do_exit(self, line):
"""
Exits Shell-Scribe
"""
os.system("chmod +x %s" % self.script_filename)
sys.exit()
def do_quit(self, line):
"""
Exits Shell Scribe
"""
os.system("chmod +x %s" % self.script_filename)
sys.exit()
## Misc. Functions
def command_not_blank(self, line):
"""
Checks to make sure the command is not all space characters
"""
print "line:",line
for char in line:
if char != " ":
return True
return False
## CMD Overloads
def do_EOF(self, line):
"""
Method that is called at the end of a batch job.
"""
return True
def precmd(self, line):
"""
Method that is run just before the shell command is run
"""
return line
def emptyline(self):
"""
Controls what happens if the user enters an empty line. This is addded
to because without overloading this method it defaults to rerunning
the command which is not what we are looking for.
"""
return ""
def postcmd(self, stop, line):
"""
Method that is called after each of command is run
"""
if self.location_prompt:
self.prompt = os.getcwd() + " >"
if self.return_value == None:
if (line not in self.storage_blacklist) and self.command_not_blank(line):
self.store_to_script(line)
print "Stored!"
def default(self, line):
"""
This is the default method that is called if the shell command is not
a specific shell command (a do_ method_)
"""
cmd_file = os.popen(line)
output = cmd_file.read()
self.return_value = cmd_file.close()
if self.return_value != None:
if self.alert_type == 'text':
self.send_text(line)
if self.alert_type == 'call':
self.send_call()
if self.command_not_blank(line):
print output
def preloop(self):
"""
Method that is called before the CMD loop begins
"""
if self.location_prompt:
self.prompt = os.getcwd() + " >"
if os.path.isfile(self.script_filename):
pass
self.script = open(self.script_filename, 'a')
if __name__ == '__main__':
parser = ap.ArgumentParser(description="Documents Shell-Commands")
parser.add_argument('--location-prompt', action='store_true')
parser.add_argument('-config',
help="The name of the configuration JSON file")
parser.add_argument('-create-lesson',
help="The name of the script that we are building \
a lesson for")
parser.add_argument('-run-lesson',
help="The name of the lesson (JSON file) that we are \
running in shell-scribe")
args = parser.parse_args()
ss = Shell_Scribe()
ss.location_prompt = args.location_prompt
if args.config is not None:
if os.path.isfile(args.config):
print "Using configuration from file ", args.config
ss.config_filename = args.config
ss.load_config_json()
else:
print "Config does not exist"
self.no_config_subroutine()
elif os.path.isfile("config.json"):
print "Found config.json"
ss.load_config_json()
else:
ss.no_config_subroutine()
if DEBUG: print args
if args.create_lesson != None:
ss.bashinator_9000(args.create_lesson)
print "RUNNING CREATE LESSON BLOCK"
elif args.run_lesson != None:
# Run Lesson Function
ss.bashinator_10000(args.run_lesson)
else:
ss.cmdloop()
|
import os
import sys
import etcd
import subprocess
import signal
import time
if len(sys.argv) < 2:
print("Please provide a server argument")
sys.exit(1)
def siginthandler(signum, stackframe):
sys.exit(-1)
signal.signal(signal.SIGINT, siginthandler)
logpath="/log"
if len(sys.argv) > 2:
logpath=sys.argv[2]
while True:
try:
idx = 0
time.sleep(1)
p = 2379
print("Connect to {}:{}".format(sys.argv[1], p))
keyval = etcd.Client(host=sys.argv[1], port=p)
while keyval:
res = keyval.watch(logpath, index=idx, recursive=True)
for e in res.leaves:
if e.key == logpath:
idx = 0
break
print(e.value)
idx = e.createdIndex+1
except Exception as e:
print(e)
|
from mido import MidiFile
from time import sleep
import pibrella
""" fade test
pibrella.light.red.fade(0,100,10)
sleep(11)
pibrella.light.red.fade(100,0,10)
sleep(11)
"""
""" start
pibrella.buzzer.note(-9)
sleep(.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(-9)
sleep(0.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(-9)
sleep(0.9)
pibrella.buzzer.off()
sleep(0.1)
pibrella.buzzer.note(3)
sleep(0.9)
pibrella.buzzer.off()
"""
""" fail
pibrella.buzzer.note(0)
sleep(1.25)
pibrella.buzzer.note(-7)
sleep(2)
pibrellay.buzzer.off()
"""
""" Mike notes for success likely bond theme
and need a calibration mode
push button yellow goes on then as turn the light can change untl the light changes
press red button again to go back to operational state
"""
""" it knows it is a comment """
mid = MidiFile('bond.mid')
for i, track in enumerate(mid.tracks):
print('Track ')
print(track.name)
if track.name == '':
for message in track:
if message.type == 'note_on':
# print('Turn on ')
note = message.note - 69
print(note)
pibrella.buzzer.note(note)
duration = 0.0 + message.time
elif message.type == 'note_off':
print(duration)
duration = message.time - duration
if duration > 0:
sleep(duration/1000.0)
pibrella.buzzer.off()
pibrella.buzzer.off()
|
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(firstname="test"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
assert len(old_contacts) - 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from utility import get_gpu_num
class NpairsLoss():
def __init__(self,
train_batch_size = 160,
samples_each_class=2,
reg_lambda=0.01):
self.samples_each_class = samples_each_class
assert(self.samples_each_class == 2)
self.train_batch_size = train_batch_size
num_gpus = get_gpu_num()
assert(train_batch_size % num_gpus == 0)
self.cal_loss_batch_size = train_batch_size // num_gpus
assert(self.cal_loss_batch_size % samples_each_class == 0)
self.reg_lambda = reg_lambda
def loss(self, input, label=None):
reg_lambda = self.reg_lambda
samples_each_class = self.samples_each_class
batch_size = self.cal_loss_batch_size
num_class = batch_size // samples_each_class
fea_dim = input.shape[1]
input = fluid.layers.reshape(input, shape = [-1, fea_dim])
feature = fluid.layers.reshape(input, shape = [-1, samples_each_class, fea_dim])
label = fluid.layers.reshape(label, shape = [-1, samples_each_class])
label = fluid.layers.cast(label, dtype='float32')
if samples_each_class == 2:
anchor_fea, positive_fea = fluid.layers.split(feature, num_or_sections = 2, dim = 1)
anchor_lab, positive_lab = fluid.layers.split(label, num_or_sections = 2, dim = 1)
else:
anchor_fea, positive_fea = fluid.layers.split(feature, num_or_sections = [1, samples_each_class-1], dim = 1)
anchor_lab, positive_lab = fluid.layers.split(label, num_or_sections = [1, samples_each_class-1], dim = 1)
anchor_fea = fluid.layers.reshape(anchor_fea, shape = [-1, fea_dim])
positive_fea = fluid.layers.reshape(positive_fea, shape = [-1, fea_dim])
positive_fea_trans = fluid.layers.transpose(positive_fea, perm = [1, 0])
similarity_matrix = fluid.layers.mul(anchor_fea, positive_fea_trans)
anchor_lab = fluid.layers.expand(x=anchor_lab, expand_times=[1, batch_size-num_class])
positive_lab_tran = fluid.layers.transpose(positive_lab, perm = [1, 0])
positive_lab_tran = fluid.layers.expand(x=positive_lab_tran, expand_times=[num_class, 1])
label_remapped = fluid.layers.equal(anchor_lab, positive_lab_tran)
label_remapped = fluid.layers.cast(label_remapped, dtype='float32') / (samples_each_class-1)
label_remapped.stop_gradient = True
out = fluid.layers.softmax(input=similarity_matrix, use_cudnn=False)
xentloss = fluid.layers.cross_entropy(input=out, label=label_remapped, soft_label=True)
xentloss = fluid.layers.mean(x=xentloss)
reg = fluid.layers.reduce_mean(fluid.layers.reduce_sum(fluid.layers.square(input), dim=1))
l2loss = 0.5 * reg_lambda * reg
return xentloss + l2loss
|
"""Support for INSTEON Modems (PLM and Hub)."""
import asyncio
from contextlib import suppress
import logging
from pyinsteon import async_close, async_connect, devices
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PLATFORM, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from . import api
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
INSTEON_PLATFORMS,
ON_OFF_EVENTS,
)
from .schemas import convert_yaml_to_config_flow
from .utils import (
add_on_off_event_device,
async_register_services,
get_device_platforms,
register_new_device_callback,
)
_LOGGER = logging.getLogger(__name__)
OPTIONS = "options"
async def async_get_device_config(hass, config_entry):
"""Initiate the connection and services."""
# Make a copy of addresses due to edge case where the list of devices could change during status update
# Cannot be done concurrently due to issues with the underlying protocol.
for address in list(devices):
if devices[address].is_battery:
continue
with suppress(AttributeError):
await devices[address].async_status()
await devices.async_load(id_devices=1)
for addr in devices:
device = devices[addr]
flags = True
for name in device.operating_flags:
if not device.operating_flags[name].is_loaded:
flags = False
break
if flags:
for name in device.properties:
if not device.properties[name].is_loaded:
flags = False
break
# Cannot be done concurrently due to issues with the underlying protocol.
if not device.aldb.is_loaded or not flags:
await device.async_read_config()
await devices.async_save(workdir=hass.config.config_dir)
async def close_insteon_connection(*args):
"""Close the Insteon connection."""
await async_close()
async def async_setup(hass, config):
"""Set up the Insteon platform."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
data, options = convert_yaml_to_config_flow(conf)
if options:
hass.data[DOMAIN] = {}
hass.data[DOMAIN][OPTIONS] = options
# Create a config entry with the connection data
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=data
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up an Insteon entry."""
if not devices.modem:
try:
await async_connect(**entry.data)
except ConnectionError as exception:
_LOGGER.error("Could not connect to Insteon modem")
raise ConfigEntryNotReady from exception
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_insteon_connection)
)
await devices.async_load(
workdir=hass.config.config_dir, id_devices=0, load_modem_aldb=0
)
# If options existed in YAML and have not already been saved to the config entry
# add them now
if (
not entry.options
and entry.source == SOURCE_IMPORT
and hass.data.get(DOMAIN)
and hass.data[DOMAIN].get(OPTIONS)
):
hass.config_entries.async_update_entry(
entry=entry,
options=hass.data[DOMAIN][OPTIONS],
)
for device_override in entry.options.get(CONF_OVERRIDE, []):
# Override the device default capabilities for a specific address
address = device_override.get("address")
if not devices.get(address):
cat = device_override[CONF_CAT]
subcat = device_override[CONF_SUBCAT]
devices.set_id(address, cat, subcat, 0)
for device in entry.options.get(CONF_X10, []):
housecode = device.get(CONF_HOUSECODE)
unitcode = device.get(CONF_UNITCODE)
x10_type = "on_off"
steps = device.get(CONF_DIM_STEPS, 22)
if device.get(CONF_PLATFORM) == "light":
x10_type = "dimmable"
elif device.get(CONF_PLATFORM) == "binary_sensor":
x10_type = "sensor"
_LOGGER.debug(
"Adding X10 device to Insteon: %s %d %s", housecode, unitcode, x10_type
)
device = devices.add_x10_device(housecode, unitcode, x10_type, steps)
for platform in INSTEON_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
for address in devices:
device = devices[address]
platforms = get_device_platforms(device)
if ON_OFF_EVENTS in platforms:
add_on_off_event_device(hass, device)
_LOGGER.debug("Insteon device count: %s", len(devices))
register_new_device_callback(hass)
async_register_services(hass)
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, str(devices.modem.address))},
manufacturer="Smart Home",
name=f"{devices.modem.description} {devices.modem.address}",
model=f"{devices.modem.model} ({devices.modem.cat!r}, 0x{devices.modem.subcat:02x})",
sw_version=f"{devices.modem.firmware:02x} Engine Version: {devices.modem.engine_version}",
)
api.async_load_api(hass)
asyncio.create_task(async_get_device_config(hass, entry))
return True
|
"""Config flow for ReCollect Waste integration."""
from __future__ import annotations
from typing import Any
from aiorecollect.client import Client
from aiorecollect.errors import RecollectError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import aiohttp_client
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ReCollect Waste."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return RecollectWasteOptionsFlowHandler(config_entry)
async def async_step_import(
self, import_config: dict[str, Any] | None = None
) -> FlowResult:
"""Handle configuration via YAML import."""
return await self.async_step_user(import_config)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle configuration via the UI."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={}
)
unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session
)
try:
await client.async_get_next_pickup_event()
except RecollectError as err:
LOGGER.error("Error during setup of integration: %s", err)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_place_or_service_id"},
)
return self.async_create_entry(
title=unique_id,
data={
CONF_PLACE_ID: user_input[CONF_PLACE_ID],
CONF_SERVICE_ID: user_input[CONF_SERVICE_ID],
},
)
class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Recollect Waste options flow."""
def __init__(self, entry: config_entries.ConfigEntry) -> None:
"""Initialize."""
self._entry = entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FRIENDLY_NAME,
default=self._entry.options.get(CONF_FRIENDLY_NAME),
): bool
}
),
)
|
import robot_util
def sendSettings(ser, args):
if args.right_wheel_forward_speed is not None:
robot_util.sendSerialCommand(ser, "rwfs " + str(args.right_wheel_forward_speed))
if args.right_wheel_backward_speed is not None:
robot_util.sendSerialCommand(ser, "rwbs " + str(args.right_wheel_backward_speed))
if args.left_wheel_forward_speed is not None:
robot_util.sendSerialCommand(ser, "lwfs " + str(args.left_wheel_forward_speed))
if args.left_wheel_backward_speed is not None:
robot_util.sendSerialCommand(ser, "lwbs " + str(args.left_wheel_backward_speed))
if args.straight_delay is not None:
robot_util.sendSerialCommand(ser, "straight-distance " + str(int(args.straight_delay * 255)))
if args.turn_delay is not None:
robot_util.sendSerialCommand(ser, "turn-distance " + str(int(args.turn_delay * 255)))
if args.led_max_brightness is not None:
robot_util.sendSerialCommand(ser, "led-max-brightness " + str(args.led_max_brightness))
|
""" Cisco_IOS_XR_infra_objmgr_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-objmgr package configuration.
This module contains definitions
for the following management objects\:
object\-group\: Object\-group configuration
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class EndPortEnum(Enum):
"""
EndPortEnum
End port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data:: exec_ = 512
Exec (rsh, 512)
.. data:: login = 513
Login (rlogin, 513)
.. data:: cmd = 514
Remote commands (rcmd, 514)
.. data:: lpd = 515
Printer service (515)
.. data:: uucp = 540
Unix-to-Unix Copy Program (540)
.. data:: klogin = 543
Kerberos login (543)
.. data:: kshell = 544
Kerberos shell (544)
.. data:: talk = 517
Talk (517)
.. data:: ldp = 646
LDP session connection attempts (MPLS, 646)
"""
echo = 7
discard = 9
daytime = 13
chargen = 19
ftp_data = 20
ftp = 21
ssh = 22
telnet = 23
smtp = 25
time = 37
nicname = 43
tacacs = 49
domain = 53
gopher = 70
finger = 79
www = 80
host_name = 101
pop2 = 109
pop3 = 110
sun_rpc = 111
ident = 113
nntp = 119
bgp = 179
irc = 194
pim_auto_rp = 496
exec_ = 512
login = 513
cmd = 514
lpd = 515
uucp = 540
klogin = 543
kshell = 544
talk = 517
ldp = 646
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['EndPortEnum']
class PortEnum(Enum):
"""
PortEnum
Port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data:: exec_ = 512
Exec (rsh, 512)
.. data:: login = 513
Login (rlogin, 513)
.. data:: cmd = 514
Remote commands (rcmd, 514)
.. data:: lpd = 515
Printer service (515)
.. data:: uucp = 540
Unix-to-Unix Copy Program (540)
.. data:: klogin = 543
Kerberos login (543)
.. data:: kshell = 544
Kerberos shell (544)
.. data:: talk = 517
Talk (517)
.. data:: ldp = 646
LDP session connection attempts (MPLS, 646)
"""
echo = 7
discard = 9
daytime = 13
chargen = 19
ftp_data = 20
ftp = 21
ssh = 22
telnet = 23
smtp = 25
time = 37
nicname = 43
tacacs = 49
domain = 53
gopher = 70
finger = 79
www = 80
host_name = 101
pop2 = 109
pop3 = 110
sun_rpc = 111
ident = 113
nntp = 119
bgp = 179
irc = 194
pim_auto_rp = 496
exec_ = 512
login = 513
cmd = 514
lpd = 515
uucp = 540
klogin = 543
kshell = 544
talk = 517
ldp = 646
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['PortEnum']
class PortOperatorEnum(Enum):
"""
PortOperatorEnum
Port operator
.. data:: equal = 0
Match packets on ports equal to entered port
number
.. data:: not_equal = 1
Match packets on ports not equal to entered
port number
.. data:: greater_than = 2
Match packets on ports greater than entered
port number
.. data:: less_than = 3
Match packets on ports less than entered port
number
"""
equal = 0
not_equal = 1
greater_than = 2
less_than = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['PortOperatorEnum']
class StartPortEnum(Enum):
"""
StartPortEnum
Start port
.. data:: echo = 7
Echo (7)
.. data:: discard = 9
Discard (9)
.. data:: daytime = 13
Daytime (13)
.. data:: chargen = 19
Character generator (19)
.. data:: ftp_data = 20
FTP data connections (used infrequently, 20)
.. data:: ftp = 21
File Transfer Protocol (21)
.. data:: ssh = 22
Secure Shell (22)
.. data:: telnet = 23
Telnet (23)
.. data:: smtp = 25
Simple Mail Transport Protocol (25)
.. data:: time = 37
Time (37)
.. data:: nicname = 43
Nicname (43)
.. data:: tacacs = 49
TAC Access Control System (49)
.. data:: domain = 53
Domain Name Service (53)
.. data:: gopher = 70
Gopher (70)
.. data:: finger = 79
Finger (79)
.. data:: www = 80
World Wide Web (HTTP, 80)
.. data:: host_name = 101
NIC hostname server (101)
.. data:: pop2 = 109
Post Office Protocol v2 (109)
.. data:: pop3 = 110
Post Office Protocol v3 (110)
.. data:: sun_rpc = 111
Sun Remote Procedure Call (111)
.. data:: ident = 113
Ident Protocol (113)
.. data:: nntp = 119
Network News Transport Protocol (119)
.. data:: bgp = 179
Border Gateway Protocol (179)
.. data:: irc = 194
Internet Relay Chat (194)
.. data:: pim_auto_rp = 496
PIM Auto-RP (496)
.. data:: exec_ = 512
Exec (rsh, 512)
.. data:: login = 513
Login (rlogin, 513)
.. data:: cmd = 514
Remote commands (rcmd, 514)
.. data:: lpd = 515
Printer service (515)
.. data:: uucp = 540
Unix-to-Unix Copy Program (540)
.. data:: klogin = 543
Kerberos login (543)
.. data:: kshell = 544
Kerberos shell (544)
.. data:: talk = 517
Talk (517)
.. data:: ldp = 646
LDP session connection attempts (MPLS, 646)
"""
echo = 7
discard = 9
daytime = 13
chargen = 19
ftp_data = 20
ftp = 21
ssh = 22
telnet = 23
smtp = 25
time = 37
nicname = 43
tacacs = 49
domain = 53
gopher = 70
finger = 79
www = 80
host_name = 101
pop2 = 109
pop3 = 110
sun_rpc = 111
ident = 113
nntp = 119
bgp = 179
irc = 194
pim_auto_rp = 496
exec_ = 512
login = 513
cmd = 514
lpd = 515
uucp = 540
klogin = 543
kshell = 544
talk = 517
ldp = 646
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['StartPortEnum']
class ObjectGroup(object):
"""
Object\-group configuration
.. attribute:: network
Network object group
**type**\: :py:class:`Network <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network>`
.. attribute:: port
Port object group
**type**\: :py:class:`Port <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.network = ObjectGroup.Network()
self.network.parent = self
self.port = ObjectGroup.Port()
self.port.parent = self
class Port(object):
"""
Port object group
.. attribute:: udf_objects
Table of port objects groups
**type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_objects = ObjectGroup.Port.UdfObjects()
self.udf_objects.parent = self
class UdfObjects(object):
"""
Table of port objects groups
.. attribute:: udf_object
Port object group
**type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_object = YList()
self.udf_object.parent = self
self.udf_object.name = 'udf_object'
class UdfObject(object):
"""
Port object group
.. attribute:: object_name <key>
Port object group name \- maximum 64 characters
**type**\: str
**length:** 1..64
.. attribute:: description
Up to 100 characters describing this object
**type**\: str
**length:** 1..100
.. attribute:: nested_groups
Table of nested port object groups
**type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups>`
.. attribute:: operators
Table of port operators
**type**\: :py:class:`Operators <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.Operators>`
.. attribute:: port_ranges
Table of port range addresses
**type**\: :py:class:`PortRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.PortRanges>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object_name = None
self.description = None
self.nested_groups = ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups()
self.nested_groups.parent = self
self.operators = ObjectGroup.Port.UdfObjects.UdfObject.Operators()
self.operators.parent = self
self.port_ranges = ObjectGroup.Port.UdfObjects.UdfObject.PortRanges()
self.port_ranges.parent = self
class Operators(object):
"""
Table of port operators
.. attribute:: operator
op class
**type**\: list of :py:class:`Operator <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.Operators.Operator>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.operator = YList()
self.operator.parent = self
self.operator.name = 'operator'
class Operator(object):
"""
op class
.. attribute:: operator_type <key>
operation for ports
**type**\: :py:class:`PortOperatorEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.PortOperatorEnum>`
.. attribute:: port <key>
Port number
**type**\: one of the below types:
**type**\: :py:class:`PortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.PortEnum>`
----
**type**\: int
**range:** 0..65535
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.operator_type = None
self.port = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.operator_type is None:
raise YPYModelError('Key property operator_type is None')
if self.port is None:
raise YPYModelError('Key property port is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:operator[Cisco-IOS-XR-infra-objmgr-cfg:operator-type = ' + str(self.operator_type) + '][Cisco-IOS-XR-infra-objmgr-cfg:port = ' + str(self.port) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.operator_type is not None:
return True
if self.port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.Operators.Operator']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:operators'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.operator is not None:
for child_ref in self.operator:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.Operators']['meta_info']
class NestedGroups(object):
"""
Table of nested port object groups
.. attribute:: nested_group
nested object group
**type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups.NestedGroup>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group = YList()
self.nested_group.parent = self
self.nested_group.name = 'nested_group'
class NestedGroup(object):
"""
nested object group
.. attribute:: nested_group_name <key>
Name of a nested object group
**type**\: str
**length:** 1..64
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.nested_group_name is None:
raise YPYModelError('Key property nested_group_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group is not None:
for child_ref in self.nested_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.NestedGroups']['meta_info']
class PortRanges(object):
"""
Table of port range addresses
.. attribute:: port_range
Match only packets on a given port range
**type**\: list of :py:class:`PortRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Port.UdfObjects.UdfObject.PortRanges.PortRange>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.port_range = YList()
self.port_range.parent = self
self.port_range.name = 'port_range'
class PortRange(object):
"""
Match only packets on a given port range
.. attribute:: start_port <key>
Port number
**type**\: one of the below types:
**type**\: :py:class:`StartPortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.StartPortEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: end_port <key>
Port number
**type**\: one of the below types:
**type**\: :py:class:`EndPortEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.EndPortEnum>`
----
**type**\: int
**range:** 0..65535
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.start_port = None
self.end_port = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.start_port is None:
raise YPYModelError('Key property start_port is None')
if self.end_port is None:
raise YPYModelError('Key property end_port is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:port-range[Cisco-IOS-XR-infra-objmgr-cfg:start-port = ' + str(self.start_port) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-port = ' + str(self.end_port) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.start_port is not None:
return True
if self.end_port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.PortRanges.PortRange']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:port-ranges'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.port_range is not None:
for child_ref in self.port_range:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject.PortRanges']['meta_info']
@property
def _common_path(self):
if self.object_name is None:
raise YPYModelError('Key property object_name is None')
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.object_name is not None:
return True
if self.description is not None:
return True
if self.nested_groups is not None and self.nested_groups._has_data():
return True
if self.operators is not None and self.operators._has_data():
return True
if self.port_ranges is not None and self.port_ranges._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects.UdfObject']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_object is not None:
for child_ref in self.udf_object:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port.UdfObjects']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:port'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_objects is not None and self.udf_objects._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Port']['meta_info']
class Network(object):
"""
Network object group
.. attribute:: ipv4
IPv4 object group
**type**\: :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4>`
.. attribute:: ipv6
IPv6 object group
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.ipv4 = ObjectGroup.Network.Ipv4()
self.ipv4.parent = self
self.ipv6 = ObjectGroup.Network.Ipv6()
self.ipv6.parent = self
class Ipv6(object):
"""
IPv6 object group
.. attribute:: udf_objects
Table of ipv6 object groups
**type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_objects = ObjectGroup.Network.Ipv6.UdfObjects()
self.udf_objects.parent = self
class UdfObjects(object):
"""
Table of ipv6 object groups
.. attribute:: udf_object
IPv6 object group
**type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_object = YList()
self.udf_object.parent = self
self.udf_object.name = 'udf_object'
class UdfObject(object):
"""
IPv6 object group
.. attribute:: object_name <key>
IPv6 object group name \- maximum 64 characters
**type**\: str
**length:** 1..64
.. attribute:: address_ranges
Table of ipv6 address ranges
**type**\: :py:class:`AddressRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges>`
.. attribute:: addresses
Table of ipv6 addresses
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses>`
.. attribute:: description
Up to 100 characters describing this object
**type**\: str
**length:** 1..100
.. attribute:: hosts
Table of ipv6 host addresses
**type**\: :py:class:`Hosts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts>`
.. attribute:: nested_groups
Table of nested ipv6 object groups
**type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object_name = None
self.address_ranges = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges()
self.address_ranges.parent = self
self.addresses = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses()
self.addresses.parent = self
self.description = None
self.hosts = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts()
self.hosts.parent = self
self.nested_groups = ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups()
self.nested_groups.parent = self
class NestedGroups(object):
"""
Table of nested ipv6 object groups
.. attribute:: nested_group
nested object group
**type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups.NestedGroup>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group = YList()
self.nested_group.parent = self
self.nested_group.name = 'nested_group'
class NestedGroup(object):
"""
nested object group
.. attribute:: nested_group_name <key>
Enter the name of a nested object group
**type**\: str
**length:** 1..64
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.nested_group_name is None:
raise YPYModelError('Key property nested_group_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group is not None:
for child_ref in self.nested_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.NestedGroups']['meta_info']
class AddressRanges(object):
"""
Table of ipv6 address ranges
.. attribute:: address_range
Range of host addresses
**type**\: list of :py:class:`AddressRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges.AddressRange>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_range = YList()
self.address_range.parent = self
self.address_range.name = 'address_range'
class AddressRange(object):
"""
Range of host addresses
.. attribute:: start_address <key>
IPv6 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: end_address <key>
IPv6 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.start_address = None
self.end_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.start_address is None:
raise YPYModelError('Key property start_address is None')
if self.end_address is None:
raise YPYModelError('Key property end_address is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-range[Cisco-IOS-XR-infra-objmgr-cfg:start-address = ' + str(self.start_address) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-address = ' + str(self.end_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.start_address is not None:
return True
if self.end_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges.AddressRange']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-ranges'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.address_range is not None:
for child_ref in self.address_range:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.AddressRanges']['meta_info']
class Addresses(object):
"""
Table of ipv6 addresses
.. attribute:: address
IPv6 address
**type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses.Address>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address = YList()
self.address.parent = self
self.address.name = 'address'
class Address(object):
"""
IPv6 address
.. attribute:: prefix <key>
IPv6 prefix x\:x\:\:x/y
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: prefix_length <key>
Prefix of the IP Address
**type**\: int
**range:** 0..128
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = None
self.prefix_length = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.prefix is None:
raise YPYModelError('Key property prefix is None')
if self.prefix_length is None:
raise YPYModelError('Key property prefix_length is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address[Cisco-IOS-XR-infra-objmgr-cfg:prefix = ' + str(self.prefix) + '][Cisco-IOS-XR-infra-objmgr-cfg:prefix-length = ' + str(self.prefix_length) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.prefix is not None:
return True
if self.prefix_length is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses.Address']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:addresses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.address is not None:
for child_ref in self.address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Addresses']['meta_info']
class Hosts(object):
"""
Table of ipv6 host addresses
.. attribute:: host
A single host address
**type**\: list of :py:class:`Host <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts.Host>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.host = YList()
self.host.parent = self
self.host.name = 'host'
class Host(object):
"""
A single host address
.. attribute:: host_address <key>
host ipv6 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.host_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.host_address is None:
raise YPYModelError('Key property host_address is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:host[Cisco-IOS-XR-infra-objmgr-cfg:host-address = ' + str(self.host_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.host_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts.Host']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:hosts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.host is not None:
for child_ref in self.host:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject.Hosts']['meta_info']
@property
def _common_path(self):
if self.object_name is None:
raise YPYModelError('Key property object_name is None')
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.object_name is not None:
return True
if self.address_ranges is not None and self.address_ranges._has_data():
return True
if self.addresses is not None and self.addresses._has_data():
return True
if self.description is not None:
return True
if self.hosts is not None and self.hosts._has_data():
return True
if self.nested_groups is not None and self.nested_groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects.UdfObject']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_object is not None:
for child_ref in self.udf_object:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6.UdfObjects']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv6'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_objects is not None and self.udf_objects._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv6']['meta_info']
class Ipv4(object):
"""
IPv4 object group
.. attribute:: udf_objects
Table of ipv4 object groups
**type**\: :py:class:`UdfObjects <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_objects = ObjectGroup.Network.Ipv4.UdfObjects()
self.udf_objects.parent = self
class UdfObjects(object):
"""
Table of ipv4 object groups
.. attribute:: udf_object
IPv4 object group
**type**\: list of :py:class:`UdfObject <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.udf_object = YList()
self.udf_object.parent = self
self.udf_object.name = 'udf_object'
class UdfObject(object):
"""
IPv4 object group
.. attribute:: object_name <key>
IPv4 object group name \- maximum 64 characters
**type**\: str
**length:** 1..64
.. attribute:: address_ranges
Table of ipv4 host address ranges
**type**\: :py:class:`AddressRanges <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges>`
.. attribute:: addresses
Table of addresses
**type**\: :py:class:`Addresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses>`
.. attribute:: description
Up to 100 characters describing this object
**type**\: str
**length:** 1..100
.. attribute:: hosts
Table of host addresses
**type**\: :py:class:`Hosts <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts>`
.. attribute:: nested_groups
Table of nested ipv4 object groups
**type**\: :py:class:`NestedGroups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.object_name = None
self.address_ranges = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges()
self.address_ranges.parent = self
self.addresses = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses()
self.addresses.parent = self
self.description = None
self.hosts = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts()
self.hosts.parent = self
self.nested_groups = ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups()
self.nested_groups.parent = self
class NestedGroups(object):
"""
Table of nested ipv4 object groups
.. attribute:: nested_group
Nested object group
**type**\: list of :py:class:`NestedGroup <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups.NestedGroup>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group = YList()
self.nested_group.parent = self
self.nested_group.name = 'nested_group'
class NestedGroup(object):
"""
Nested object group
.. attribute:: nested_group_name <key>
Nested object group
**type**\: str
**length:** 1..64
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.nested_group_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.nested_group_name is None:
raise YPYModelError('Key property nested_group_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-group[Cisco-IOS-XR-infra-objmgr-cfg:nested-group-name = ' + str(self.nested_group_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups.NestedGroup']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:nested-groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.nested_group is not None:
for child_ref in self.nested_group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.NestedGroups']['meta_info']
class AddressRanges(object):
"""
Table of ipv4 host address ranges
.. attribute:: address_range
Range of host addresses
**type**\: list of :py:class:`AddressRange <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges.AddressRange>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address_range = YList()
self.address_range.parent = self
self.address_range.name = 'address_range'
class AddressRange(object):
"""
Range of host addresses
.. attribute:: start_address <key>
IPv4 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: end_address <key>
IPv4 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.start_address = None
self.end_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.start_address is None:
raise YPYModelError('Key property start_address is None')
if self.end_address is None:
raise YPYModelError('Key property end_address is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-range[Cisco-IOS-XR-infra-objmgr-cfg:start-address = ' + str(self.start_address) + '][Cisco-IOS-XR-infra-objmgr-cfg:end-address = ' + str(self.end_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.start_address is not None:
return True
if self.end_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges.AddressRange']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address-ranges'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.address_range is not None:
for child_ref in self.address_range:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.AddressRanges']['meta_info']
class Addresses(object):
"""
Table of addresses
.. attribute:: address
IPv4 address
**type**\: list of :py:class:`Address <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses.Address>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address = YList()
self.address.parent = self
self.address.name = 'address'
class Address(object):
"""
IPv4 address
.. attribute:: prefix <key>
IPv4 address/prefix
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
.. attribute:: prefix_length <key>
Prefix of the IP Address
**type**\: int
**range:** 0..32
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = None
self.prefix_length = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.prefix is None:
raise YPYModelError('Key property prefix is None')
if self.prefix_length is None:
raise YPYModelError('Key property prefix_length is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:address[Cisco-IOS-XR-infra-objmgr-cfg:prefix = ' + str(self.prefix) + '][Cisco-IOS-XR-infra-objmgr-cfg:prefix-length = ' + str(self.prefix_length) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.prefix is not None:
return True
if self.prefix_length is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses.Address']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:addresses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.address is not None:
for child_ref in self.address:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Addresses']['meta_info']
class Hosts(object):
"""
Table of host addresses
.. attribute:: host
A single host address
**type**\: list of :py:class:`Host <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_cfg.ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts.Host>`
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.host = YList()
self.host.parent = self
self.host.name = 'host'
class Host(object):
"""
A single host address
.. attribute:: host_address <key>
Host ipv4 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'infra-objmgr-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.host_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.host_address is None:
raise YPYModelError('Key property host_address is None')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:host[Cisco-IOS-XR-infra-objmgr-cfg:host-address = ' + str(self.host_address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.host_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts.Host']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-infra-objmgr-cfg:hosts'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.host is not None:
for child_ref in self.host:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject.Hosts']['meta_info']
@property
def _common_path(self):
if self.object_name is None:
raise YPYModelError('Key property object_name is None')
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects/Cisco-IOS-XR-infra-objmgr-cfg:udf-object[Cisco-IOS-XR-infra-objmgr-cfg:object-name = ' + str(self.object_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.object_name is not None:
return True
if self.address_ranges is not None and self.address_ranges._has_data():
return True
if self.addresses is not None and self.addresses._has_data():
return True
if self.description is not None:
return True
if self.hosts is not None and self.hosts._has_data():
return True
if self.nested_groups is not None and self.nested_groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects.UdfObject']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4/Cisco-IOS-XR-infra-objmgr-cfg:udf-objects'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_object is not None:
for child_ref in self.udf_object:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4.UdfObjects']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network/Cisco-IOS-XR-infra-objmgr-cfg:ipv4'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.udf_objects is not None and self.udf_objects._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network.Ipv4']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group/Cisco-IOS-XR-infra-objmgr-cfg:network'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.ipv4 is not None and self.ipv4._has_data():
return True
if self.ipv6 is not None and self.ipv6._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup.Network']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-infra-objmgr-cfg:object-group'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self.network is not None and self.network._has_data():
return True
if self.port is not None and self.port._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_infra_objmgr_cfg as meta
return meta._meta_table['ObjectGroup']['meta_info']
|
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from model_utils import Choices
from model_utils.models import TimeStampedModel
class History(TimeStampedModel):
RESOLUTIONS = Choices('second', 'minute', 'hour', 'day', 'week', 'month', 'year')
resolution = models.CharField(choices=RESOLUTIONS, default=RESOLUTIONS.day, max_length=6)
tag = models.SlugField()
datetime = models.DateTimeField()
source_type = models.ForeignKey(ContentType)
source_id = models.PositiveIntegerField(blank=True, null=True)
source_object = GenericForeignKey('source_type', 'source_id')
sum = models.IntegerField(default=0)
delta = models.IntegerField(default=0)
class Meta:
get_latest_by = 'datetime'
verbose_name_plural = 'histories'
def __unicode__(self):
return u'%s' % (self.tag)
def save(self, *args, **kwargs):
try:
filters = {'resolution': self.resolution, 'tag': self.tag}
previous = self._default_manager.filter(**filters).latest()
except self._meta.model.DoesNotExist:
pass
else:
self.delta = self.sum - previous.sum
super(History, self).save(*args, **kwargs)
|
pass
|
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
print 'Hey there !!'
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# LAB(begin solution)
# The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.
names = []
# Open and read the file.
f = open(filename, 'rU')
text = f.read()
# Could process the file line-by-line, but regex on the whole text
# at once is even easier.
# Get the year.
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
# Extract all the data tuples with a findall()
# each tuple is: (rank, boy-name, girl-name)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text)
#print tuples
# Store data into a dict using each name as a key and that
# name's rank number as the value.
# (if the name is already in there, don't add it, since
# this new rank will be bigger than the previous rank).
names_to_rank = {}
for rank_tuple in tuples:
(rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
# You can also write:
# for rank, boyname, girlname in tuples:
# ...
# To unpack the tuples inside a for-loop.
# Get the names, sorted in the right order
sorted_names = sorted(names_to_rank.keys())
# Build up result list, one element per line
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
return names
# LAB(replace solution)
# return
# LAB(end solution)
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
# LAB(begin solution)
for filename in args:
names = extract_names(filename)
# Make text out of the whole list
text = '\n'.join(names)
if summary:
outf = open(filename + '.summary', 'w')
outf.write(text + '\n')
outf.close()
else:
print text
# LAB(end solution)
if __name__ == '__main__':
main()
|
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from bs4 import BeautifulSoup
from datetime import datetime
from decimal import *
import sys
phantonPath = "../phantomjs/phantomjs"
contratacionPage = "https://contrataciondelestado.es/wps/portal/!ut/p/b1/lZDLDoIwEEU_aaYParssrwLxAVZQujEsjMH42Bi_30rcGCPq7CZz7pzkgoOWKC6kYBPYgDt3t37fXfvLuTs-die2PFlEUZpRlJbFSKdxXYvMrybwQOsB_DAah3xopdQh0YislqhFVUXK_0HFnvmARbwpmlLY3CDmWRpPaxKgoeI3_4jgxW_sjPhzwkRAkRhLn_mPAvqn_13wJb8GNyBjDQzAWMXjEgrz7HLaQeuxyVY3SaVzxXARLj1WlLNVaShB5LCCNoGTO6Z-VH7g3R2UoLEz/dl4/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_AVEQAI930OBRD02JPMTPG21004/act/id=0/p=javax.servlet.include.path_info=QCPjspQCPbusquedaQCPBusquedaVIS_UOE.jsp/299420689304/-/"
class detalleContrato():
""" Clase que devuelve los detalles de un contrato por nº expediente y Órgano de contratación
numExpediente
OrgContratacion
driverType=1 (Firefox, online) / 2(phantomjs)
"""
driver = ""
driverType = 1
estadoLic = ""
procedimiento = ""
enlacelic = ''
codigocpv = ''
resultado = ''
adjudicatario =''
numlicitadores = 0
impadjudicacion = ''
def __init__(self, numExpediente, OrgContratacion, driverType=1):
self.driverType = driverType
self.numExpediente = numExpediente
self.OrgContratacion = OrgContratacion
if driverType == 1:
self.driver = webdriver.Firefox()
elif driverType == 2:
self.driver = webdriver.PhantomJS(phantonPath, service_args=['--ignore-ssl-errors=true'])
self.driver.set_window_size(1120, 550)
self.extraeDetalles()
def cargaPagina(self):
#Carga página
if self.driverType == 2:
self.driver.implicitly_wait(10)
self.driver.set_page_load_timeout(10)
try:
self.driver.get(contratacionPage)
except TimeoutException as e: #Handle y
#Handle your exception here
print(e)
def debugPhanton(self):
self.cargaPagina()
# check phantomjs
print(self.driver.page_source)
def extraeDetalles(self):
self.cargaPagina()
#Introduce contrato
contrato = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:text71ExpMAQ')
contrato.send_keys(self.numExpediente)
#Introduce ´organo contrataci´on
orgcont = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:texoorganoMAQ')
orgcont.send_keys(self.OrgContratacion)
# pulsa el botón de buscar
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:button1').click()
#Obtener enlace
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:enlaceExpediente_0').click() #sólo sirve para el primer expediente... como es este caso.
# Obtiene los datos
self.estadoLic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Estado').text
self.procedimiento = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Procedimiento').text
self.enlacelic = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_EnlaceLicPLACE').text
self.codigocpv = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_CPV').text
#Dependiendo del estado los siguientes elementos pueden existir o no
try:
self.resultado = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Resultado').text
self.adjudicatario = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_Adjudicatario').text
importe_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_ImporteAdjudicacion').text.replace(".","").replace(",",".")
try:
self.impadjudicacion = Decimal(importe_text.strip(' "'))
except (ValueError, TypeError, DecimalException) as e:
self.impadjudicacion = 0
numlicitadores_text = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:text_NumeroLicitadores').text
try:
self.numlicitadores = int(numlicitadores_text)
except ValueError:
self.numlicitadores =0
print("numlic= ",self.numlicitadores)
except NoSuchElementException:
resultado = ''
adjudicatario =''
numlicitadores = 0
impadjudicacion = ''
# En linea saca los documentos de la página
html_page = self.driver.page_source
soup = BeautifulSoup(html_page, "html5lib")
self.Documento={}
for row in soup.findAll("tr", {'class': ['rowClass1', 'rowClass2']}):
try:
fechadoc=datetime.strptime(row.find("td", {'class': 'fechaPubLeft'}).text, '%d/%m/%Y %H:%M:%S')
tipodoc=row.find("td", {'class': 'tipoDocumento'}).text
docs = row.find("td", {'class': 'documentosPub'}).findAll('div')
enlacedoc = docs[0].find('a', href=True)['href']
self.Documento[tipodoc]=[fechadoc,enlacedoc]
except: # documentos adicionales
try:
fechadoc = datetime.strptime(row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textSfecha1PadreGen').text, '%d/%m/%Y %H:%M:%S')
tipodoc = row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:textStipo1PadreGen').text
enlace =row.find(id='viewns_Z7_AVEQAI930OBRD02JPMTPG21006_:form1:TableEx1_Aux:0:linkVerDocPadreGen')['href']
self.Documento[tipodoc]=[fechadoc,enlacedoc]
except:
pass
# Cierra el driver
self.driver.quit()
def main(nExp,orgCon):
detalles=detalleContrato(numExpediente = nExp, OrgContratacion=orgCon, driverType=2)
print(detalles.estadoLic)
print(detalles.procedimiento)
print(detalles.enlacelic)
print(detalles.codigocpv)
print(detalles.resultado)
print(detalles.adjudicatario)
print(detalles.numlicitadores)
print(detalles.impadjudicacion)
for docs in detalles.Documento.keys():
print(docs,"-",detalles.Documento[docs][0],detalles.Documento[docs][1])
if __name__ == "__main__":
if not len(sys.argv) == 3:
print ('Usage: pce_extrae_detalle_contrato.py numExpediente orgContratacion')
sys.exit(1)
sys.exit(main(sys.argv[1], # TODO comprobar 1 ó 2
sys.argv[2], # TODO comprobar entre 6 y 20
))
|
from python_kemptech_api import *
loadmaster_ip = ""
username = ""
password = ""
vs_ip_1 = ""
vs_ip_2 = ""
rs_ip_1 = ""
rs_ip_2 = ""
vs_port = ""
rs_port = ""
class RealServerPool(object):
healthcheck_parameters = [
"checktype",
"checkport",
"checkurl",
"checkheaders",
"checkuse1_1",
"checkuseget",
"checkpostdata",
"checkpattern",
"checkcodes",
"matchlen",
"enhancedhealthchecks",
"rsminimum"
]
rs_parameters = [
"enable",
"forward",
"weight",
"limit",
"critical",
"follow"
]
def __init__(self, rs_list=None, vs=None):
if rs_list is not None:
self.rs = []
for rs in rs_list:
if isinstance(rs, RealServer):
self.rs.append(rs)
else:
ip, port = rs.split(":")
mock_lm = {"endpoint": "", "ip_address": "", "vs": ""}
self.rs.append(RealServer(mock_lm, ip, port))
self.checktype = None
self.checkport = None
self.checkurl = None
self.checkheaders = None
self.checkuse1_1 = None
self.checkuseget = None
self.checkpostdata = None
self.checkpattern = None
self.checkcodes = None
self.matchlen = None
self.enhancedhealthchecks = None
self.rsminimum = None
elif vs is not None:
self.rs = vs.servers.values()
self.checktype = vs.checktype
self.checkport = vs.checkport
self.checkurl = vs.checkurl
self.checkheaders = vs.checkheaders
self.checkuse1_1 = vs.checkuse1_1
self.checkuseget = vs.checkuseget
self.checkpostdata = vs.checkpostdata
self.checkpattern = vs.checkpattern
self.checkcodes = vs.checkcodes
self.matchlen = vs.matchlen
self.enhancedhealthchecks = vs.enhancedhealthchecks
self.rsminimum = vs.rsminimum
def apply(self, vs):
[rs.delete() for rs in vs.servers.values()]
for rs in self.rs:
new_rs = vs.create_real_server(rs.rs, rs.rsport)
# Apply other settings
new_rs.save()
for attr in self.rs_parameters:
print("attr: {}".format(attr))
if hasattr(rs, attr) and rs.__getattribute__(attr) is not None:
print("set attr: {}={}".format(attr, rs.__getattribute__(attr)))
new_rs.__setattr__(attr, rs.__getattribute__(attr))
new_rs.update()
for attr in self.healthcheck_parameters:
print("attr: {}".format(attr))
if hasattr(self, attr) and self.__getattribute__(attr) is not None:
print("set attr: {}={}".format(attr, self.__getattribute__(attr)))
vs.__setattr__(attr, self.__getattribute__(attr))
vs.update()
lm = LoadMaster(loadmaster_ip, username, password)
[vs.delete() for vs in lm.vs.values()]
vs = lm.create_virtual_service(vs_ip_1, vs_port, "tcp")
vs.save()
vs.checktype = 'HTTPS'
vs.checkport = "8443"
vs.update()
rs1 = vs.create_real_server(rs_ip_1, rs_port)
rs1.save()
rs1.weight = 200
rs1.update()
rs2 = vs.create_real_server(rs_ip_2, rs_port)
rs2.save()
rs2.enable = 'N'
rs2.update()
pool1 = RealServerPool(vs=vs)
vs2 = lm.create_virtual_service(vs_ip_2, vs_port, "tcp")
vs2.save()
pool1.apply(vs2)
rs_list = ["172.22.100.6:88", "172.22.100.7:88", "172.22.100.8:88", "172.22.100.9:88"]
pool2 = RealServerPool(rs_list)
pool2.checktype = "ICMP"
pool2.apply(vs)
pool2.apply(vs2)
|
import mock
from neutron_lib import constants as common_constants
from neutron_lib import context
from neutron_lib.db import constants as db_consts
from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from neutron.agent.l2.extensions import qos
from neutron.agent.l2.extensions import qos_linux
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron import manager
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.openvswitch.agent import (
ovs_agent_extension_api as ovs_ext_api)
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
ovs_bridge)
from neutron.tests import base
BASE_TEST_POLICY = {'context': None,
'name': 'test1',
'id': uuidutils.generate_uuid()}
TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY)
TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr',
**BASE_TEST_POLICY)
TEST_POLICY2 = policy.QosPolicy(context=None,
name='test2', id=uuidutils.generate_uuid())
TEST_PORT = {'port_id': 'test_port_id',
'qos_policy_id': TEST_POLICY.id}
TEST_PORT2 = {'port_id': 'test_port_id_2',
'qos_policy_id': TEST_POLICY2.id}
FAKE_RULE_ID = uuidutils.generate_uuid()
FAKE_RULE_ID_2 = uuidutils.generate_uuid()
REALLY_FAKE_RULE_ID = uuidutils.generate_uuid()
class FakeDriver(qos_linux.QosLinuxAgentDriver):
SUPPORTED_RULES = {
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
qos_consts.MAX_KBPS: {
'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},
qos_consts.MAX_BURST: {
'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},
qos_consts.DIRECTION: {
'type:values': [common_constants.EGRESS_DIRECTION,
common_constants.INGRESS_DIRECTION]}
},
}
def __init__(self):
super(FakeDriver, self).__init__()
self.create_bandwidth_limit = mock.Mock()
self.update_bandwidth_limit = mock.Mock()
self.delete_bandwidth_limit = mock.Mock()
self.delete_bandwidth_limit_ingress = mock.Mock()
def initialize(self):
pass
class QosFakeRule(rule.QosRule):
rule_type = 'fake_type'
class QosAgentDriverTestCase(base.BaseTestCase):
def setUp(self):
super(QosAgentDriverTestCase, self).setUp()
self.driver = FakeDriver()
self.policy = TEST_POLICY
self.egress_bandwidth_limit_rule = (
rule.QosBandwidthLimitRule(
context=None, id=FAKE_RULE_ID,
qos_policy_id=self.policy.id,
max_kbps=100, max_burst_kbps=200,
direction=common_constants.EGRESS_DIRECTION))
self.ingress_bandwidth_limit_rule = (
rule.QosBandwidthLimitRule(
context=None, id=FAKE_RULE_ID_2,
qos_policy_id=self.policy.id,
max_kbps=100, max_burst_kbps=200,
direction=common_constants.INGRESS_DIRECTION))
self.policy.rules = [self.egress_bandwidth_limit_rule,
self.ingress_bandwidth_limit_rule]
self.port = {'qos_policy_id': None, 'network_qos_policy_id': None,
'device_owner': 'random-device-owner'}
self.fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID,
qos_policy_id=self.policy.id)
def test_create(self):
self.driver.create(self.port, self.policy)
self.driver.create_bandwidth_limit.assert_has_calls([
mock.call(self.port, self.egress_bandwidth_limit_rule),
mock.call(self.port, self.ingress_bandwidth_limit_rule)
])
def test_update(self):
self.driver.update(self.port, self.policy)
self.driver.update_bandwidth_limit.assert_has_calls([
mock.call(self.port, self.egress_bandwidth_limit_rule),
mock.call(self.port, self.ingress_bandwidth_limit_rule)
])
def test_delete(self):
self.driver.delete(self.port, self.policy)
self.driver.delete_bandwidth_limit.assert_called_with(self.port)
self.driver.delete_bandwidth_limit_ingress.assert_called_with(
self.port)
def test_delete_no_policy(self):
self.driver.delete(self.port, qos_policy=None)
self.driver.delete_bandwidth_limit.assert_called_with(self.port)
self.driver.delete_bandwidth_limit_ingress.assert_called_with(
self.port)
def test__iterate_rules_with_unknown_rule_type(self):
self.policy.rules.append(self.fake_rule)
rules = list(self.driver._iterate_rules(self.policy.rules))
self.assertEqual(2, len(rules))
self.assertIsInstance(rules[0], rule.QosBandwidthLimitRule)
self.assertIsInstance(rules[1], rule.QosBandwidthLimitRule)
def test__handle_update_create_rules_checks_should_apply_to_port(self):
self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock(
return_value=False)
self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock(
return_value=False)
self.driver.create(self.port, self.policy)
self.assertFalse(self.driver.create_bandwidth_limit.called)
self.egress_bandwidth_limit_rule.should_apply_to_port = mock.Mock(
return_value=True)
self.ingress_bandwidth_limit_rule.should_apply_to_port = mock.Mock(
return_value=True)
self.driver.create(self.port, self.policy)
self.assertTrue(self.driver.create_bandwidth_limit.called)
def test__get_max_burst_value(self):
rule = self.egress_bandwidth_limit_rule
rule.max_burst_kbps = 0
expected_burst = rule.max_kbps * qos_consts.DEFAULT_BURST_RATE
self.assertEqual(
expected_burst, self.driver._get_egress_burst_value(rule)
)
def test__rule_type_has_ingress_direction(self):
self.assertTrue(
self.driver._rule_type_has_ingress_direction(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT))
# Should return False for rule type other than
# RULE_TYPE_BANDWIDTH_LIMIT
supported_rules = {
qos_consts.RULE_TYPE_DSCP_MARKING: {
qos_consts.DSCP_MARK: {
'type:values': common_constants.VALID_DSCP_MARKS}
}
}
with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules):
self.assertFalse(
self.driver._rule_type_has_ingress_direction(
qos_consts.RULE_TYPE_DSCP_MARKING))
# Should return False for rule type RULE_TYPE_BANDWIDTH_LIMIT but
# without INGRESS_DIRECTION in supported values
supported_rules = {
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
'type:values': [common_constants.EGRESS_DIRECTION]
}
}
with mock.patch.dict(self.driver.SUPPORTED_RULES, supported_rules):
self.assertFalse(
self.driver._rule_type_has_ingress_direction(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT))
def test__rule_is_ingress_direction(self):
self.assertFalse(
self.driver._rule_is_ingress_direction(
self.egress_bandwidth_limit_rule))
self.assertFalse(
self.driver._rule_is_ingress_direction(
self.fake_rule))
self.assertTrue(
self.driver._rule_is_ingress_direction(
self.ingress_bandwidth_limit_rule))
class QosExtensionBaseTestCase(base.BaseTestCase):
def setUp(self):
super(QosExtensionBaseTestCase, self).setUp()
conn_patcher = mock.patch(
'neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
self.qos_ext = qos.QosAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(
ovs_bridge.OVSAgentBridge('br-int'),
ovs_bridge.OVSAgentBridge('br-tun'))
self.qos_ext.consume_api(self.agent_api)
# Don't rely on used driver
mock.patch.object(
manager.NeutronManager, 'load_class_for_provider',
return_value=lambda: mock.Mock(
spec=qos_linux.QosLinuxAgentDriver)).start()
class QosExtensionRpcTestCase(QosExtensionBaseTestCase):
def setUp(self):
super(QosExtensionRpcTestCase, self).setUp()
self.qos_ext.initialize(
self.connection, constants.EXTENSION_DRIVER_TYPE)
self.pull_mock = mock.patch.object(
self.qos_ext.resource_rpc, 'pull',
return_value=TEST_POLICY).start()
def _create_test_port_dict(self, qos_policy_id=None):
return {'port_id': uuidutils.generate_uuid(),
'qos_policy_id': qos_policy_id or TEST_POLICY.id}
def test_handle_port_with_no_policy(self):
port = self._create_test_port_dict()
del port['qos_policy_id']
self.qos_ext._process_reset_port = mock.Mock()
self.qos_ext.handle_port(self.context, port)
self.qos_ext._process_reset_port.assert_called_with(port)
def test_handle_unknown_port(self):
port = self._create_test_port_dict()
qos_policy_id = port['qos_policy_id']
port_id = port['port_id']
self.qos_ext.handle_port(self.context, port)
# we make sure the underlying qos driver is called with the
# right parameters
self.qos_ext.qos_driver.create.assert_called_once_with(
port, TEST_POLICY)
self.assertEqual(port,
self.qos_ext.policy_map.qos_policy_ports[qos_policy_id][port_id])
self.assertIn(port_id, self.qos_ext.policy_map.port_policies)
self.assertEqual(TEST_POLICY,
self.qos_ext.policy_map.known_policies[qos_policy_id])
def test_handle_known_port(self):
port_obj1 = self._create_test_port_dict()
port_obj2 = dict(port_obj1)
self.qos_ext.handle_port(self.context, port_obj1)
self.qos_ext.qos_driver.reset_mock()
self.qos_ext.handle_port(self.context, port_obj2)
self.assertFalse(self.qos_ext.qos_driver.create.called)
def test_handle_known_port_change_policy_id(self):
port = self._create_test_port_dict()
self.qos_ext.handle_port(self.context, port)
self.qos_ext.resource_rpc.pull.reset_mock()
port['qos_policy_id'] = uuidutils.generate_uuid()
self.qos_ext.handle_port(self.context, port)
self.pull_mock.assert_called_once_with(
self.context, resources.QOS_POLICY,
port['qos_policy_id'])
def test_handle_diff_ports_same_policy_id(self):
port_obj1 = self._create_test_port_dict()
port_obj2 = self._create_test_port_dict()
self.qos_ext.handle_port(self.context, port_obj1)
self.pull_mock.assert_called_once_with(
self.context, resources.QOS_POLICY,
port_obj1['qos_policy_id'])
self.assertIsNotNone(
self.qos_ext.policy_map.get_port_policy(port_obj1))
self.assertIsNone(
self.qos_ext.policy_map.get_port_policy(port_obj2))
self.qos_ext.resource_rpc.pull.reset_mock()
self.qos_ext.handle_port(self.context, port_obj2)
self.assertFalse(self.pull_mock.called)
self.assertIsNotNone(
self.qos_ext.policy_map.get_port_policy(port_obj2))
self.assertEqual(
self.qos_ext.policy_map.get_port_policy(port_obj1),
self.qos_ext.policy_map.get_port_policy(port_obj2))
def test_delete_known_port(self):
port = self._create_test_port_dict()
self.qos_ext.handle_port(self.context, port)
self.qos_ext.qos_driver.reset_mock()
self.qos_ext.delete_port(self.context, port)
self.qos_ext.qos_driver.delete.assert_called_with(port)
self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port))
def test_delete_unknown_port(self):
port = self._create_test_port_dict()
self.qos_ext.delete_port(self.context, port)
self.assertTrue(self.qos_ext.qos_driver.delete.called)
self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port))
def test__handle_notification_ignores_all_event_types_except_updated(self):
with mock.patch.object(
self.qos_ext, '_process_update_policy') as update_mock:
for event_type in set(events.VALID) - {events.UPDATED}:
self.qos_ext._handle_notification(mock.Mock(), 'QOS',
object(), event_type)
self.assertFalse(update_mock.called)
def test__handle_notification_passes_update_events(self):
with mock.patch.object(
self.qos_ext, '_process_update_policy') as update_mock:
policy_obj = mock.Mock()
self.qos_ext._handle_notification(mock.Mock(), 'QOS',
[policy_obj], events.UPDATED)
update_mock.assert_called_with(policy_obj)
def test__process_update_policy(self):
port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id)
self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY)
self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2)
self.qos_ext._policy_rules_modified = mock.Mock(return_value=True)
policy_obj = mock.Mock()
policy_obj.id = port1['qos_policy_id']
self.qos_ext._process_update_policy(policy_obj)
self.qos_ext.qos_driver.update.assert_called_with(port1, policy_obj)
self.qos_ext.qos_driver.update.reset_mock()
policy_obj.id = port2['qos_policy_id']
self.qos_ext._process_update_policy(policy_obj)
self.qos_ext.qos_driver.update.assert_called_with(port2, policy_obj)
def test__process_update_policy_descr_not_propagated_into_driver(self):
port = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
self.qos_ext.policy_map.set_port_policy(port, TEST_POLICY)
self.qos_ext._policy_rules_modified = mock.Mock(return_value=False)
self.qos_ext._process_update_policy(TEST_POLICY_DESCR)
self.qos_ext._policy_rules_modified.assert_called_with(TEST_POLICY,
TEST_POLICY_DESCR)
self.assertFalse(self.qos_ext.qos_driver.delete.called)
self.assertFalse(self.qos_ext.qos_driver.update.called)
self.assertEqual(TEST_POLICY_DESCR,
self.qos_ext.policy_map.get_policy(TEST_POLICY.id))
def test__process_update_policy_not_known(self):
self.qos_ext._policy_rules_modified = mock.Mock()
self.qos_ext._process_update_policy(TEST_POLICY_DESCR)
self.assertFalse(self.qos_ext._policy_rules_modified.called)
self.assertFalse(self.qos_ext.qos_driver.delete.called)
self.assertFalse(self.qos_ext.qos_driver.update.called)
self.assertIsNone(self.qos_ext.policy_map.get_policy(
TEST_POLICY_DESCR.id))
def test__process_reset_port(self):
port1 = self._create_test_port_dict(qos_policy_id=TEST_POLICY.id)
port2 = self._create_test_port_dict(qos_policy_id=TEST_POLICY2.id)
self.qos_ext.policy_map.set_port_policy(port1, TEST_POLICY)
self.qos_ext.policy_map.set_port_policy(port2, TEST_POLICY2)
self.qos_ext._process_reset_port(port1)
self.qos_ext.qos_driver.delete.assert_called_with(port1)
self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port1))
self.assertIsNotNone(self.qos_ext.policy_map.get_port_policy(port2))
self.qos_ext.qos_driver.delete.reset_mock()
self.qos_ext._process_reset_port(port2)
self.qos_ext.qos_driver.delete.assert_called_with(port2)
self.assertIsNone(self.qos_ext.policy_map.get_port_policy(port2))
class QosExtensionInitializeTestCase(QosExtensionBaseTestCase):
@mock.patch.object(registry, 'register')
@mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
self.qos_ext.initialize(
self.connection, constants.EXTENSION_DRIVER_TYPE)
self.connection.create_consumer.assert_has_calls(
[mock.call(
resources_rpc.resource_type_versioned_topic(resource_type),
[rpc_mock()],
fanout=True)
for resource_type in self.qos_ext.SUPPORTED_RESOURCE_TYPES]
)
subscribe_mock.assert_called_with(mock.ANY, resources.QOS_POLICY)
class QosExtensionReflushRulesTestCase(QosExtensionBaseTestCase):
def setUp(self):
super(QosExtensionReflushRulesTestCase, self).setUp()
self.qos_ext.initialize(
self.connection, constants.EXTENSION_DRIVER_TYPE)
self.pull_mock = mock.patch.object(
self.qos_ext.resource_rpc, 'pull',
return_value=TEST_POLICY).start()
self.policy = policy.QosPolicy(**BASE_TEST_POLICY)
self.rule = (
rule.QosBandwidthLimitRule(context=None, id=FAKE_RULE_ID,
qos_policy_id=self.policy.id,
max_kbps=100, max_burst_kbps=10))
self.policy.rules = [self.rule]
self.port = {'port_id': uuidutils.generate_uuid(),
'qos_policy_id': TEST_POLICY.id}
self.new_policy = policy.QosPolicy(description='descr',
**BASE_TEST_POLICY)
def test_is_reflush_required_change_policy_descr(self):
self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
self.new_policy.rules = [self.rule]
self.assertFalse(self.qos_ext._policy_rules_modified(self.policy,
self.new_policy))
def test_is_reflush_required_change_policy_rule(self):
self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
updated_rule = (rule.QosBandwidthLimitRule(context=None,
id=FAKE_RULE_ID,
qos_policy_id=self.policy.id,
max_kbps=200,
max_burst_kbps=20))
self.new_policy.rules = [updated_rule]
self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
self.new_policy))
def test_is_reflush_required_remove_rules(self):
self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
self.new_policy.rules = []
self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
self.new_policy))
def test_is_reflush_required_add_rules(self):
self.qos_ext.policy_map.set_port_policy(self.port, self.policy)
self.new_policy.rules = [self.rule]
fake_rule = QosFakeRule(context=None, id=REALLY_FAKE_RULE_ID,
qos_policy_id=self.policy.id)
self.new_policy.rules.append(fake_rule)
self.assertTrue(self.qos_ext._policy_rules_modified(self.policy,
self.new_policy))
class PortPolicyMapTestCase(base.BaseTestCase):
def setUp(self):
super(PortPolicyMapTestCase, self).setUp()
self.policy_map = qos.PortPolicyMap()
def test_update_policy(self):
self.policy_map.update_policy(TEST_POLICY)
self.assertEqual(TEST_POLICY,
self.policy_map.known_policies[TEST_POLICY.id])
def _set_ports(self):
self.policy_map.set_port_policy(TEST_PORT, TEST_POLICY)
self.policy_map.set_port_policy(TEST_PORT2, TEST_POLICY2)
def test_set_port_policy(self):
self._set_ports()
self.assertEqual(TEST_POLICY,
self.policy_map.known_policies[TEST_POLICY.id])
self.assertIn(TEST_PORT['port_id'],
self.policy_map.qos_policy_ports[TEST_POLICY.id])
def test_get_port_policy(self):
self._set_ports()
self.assertEqual(TEST_POLICY,
self.policy_map.get_port_policy(TEST_PORT))
self.assertEqual(TEST_POLICY2,
self.policy_map.get_port_policy(TEST_PORT2))
def test_get_ports(self):
self._set_ports()
self.assertEqual([TEST_PORT],
list(self.policy_map.get_ports(TEST_POLICY)))
self.assertEqual([TEST_PORT2],
list(self.policy_map.get_ports(TEST_POLICY2)))
def test_clean_by_port(self):
self._set_ports()
self.policy_map.clean_by_port(TEST_PORT)
self.assertNotIn(TEST_POLICY.id, self.policy_map.known_policies)
self.assertNotIn(TEST_PORT['port_id'], self.policy_map.port_policies)
self.assertIn(TEST_POLICY2.id, self.policy_map.known_policies)
def test_clean_by_port_for_unknown_port(self):
self.policy_map._clean_policy_info = mock.Mock()
self.policy_map.clean_by_port(TEST_PORT)
self.policy_map._clean_policy_info.assert_not_called()
def test_has_policy_changed(self):
self._set_ports()
self.assertTrue(
self.policy_map.has_policy_changed(TEST_PORT, 'a_new_policy_id'))
self.assertFalse(
self.policy_map.has_policy_changed(TEST_PORT, TEST_POLICY.id))
|
"""Python DB-API (PEP 249) interface to SQL Service.
http://www.python.org/dev/peps/pep-0249/
"""
import collections
import datetime
import exceptions
import os
import time
import types
from google.storage.speckle.proto import client_error_code_pb2
from google.storage.speckle.proto import client_pb2
from google.storage.speckle.proto import jdbc_type
from google.storage.speckle.proto import sql_pb2
from google.storage.speckle.python import api
from google.storage.speckle.python.api import converters
__path__ = api.__path__
OAUTH_CREDENTIALS_PATH = os.path.expanduser('~/.googlesql_oauth2.dat')
apilevel = '2.0'
threadsafety = 1
paramstyle = 'format'
version_info = (1, 2, 2, 'final', 0)
class Warning(StandardError, exceptions.Warning):
pass
class Error(StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
Blob = converters.Blob
def Date(year, month, day):
return datetime.date(year, month, day)
def Time(hour, minute, second):
return datetime.time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
def Binary(string):
return Blob(string)
STRING = unicode
BINARY = Blob
NUMBER = float
DATETIME = datetime.datetime
ROWID = int
_PYTHON_TYPE_TO_JDBC_TYPE = {
types.IntType: jdbc_type.INTEGER,
types.LongType: jdbc_type.INTEGER,
types.FloatType: jdbc_type.DOUBLE,
types.BooleanType: jdbc_type.BOOLEAN,
types.StringType: jdbc_type.VARCHAR,
types.UnicodeType: jdbc_type.VARCHAR,
datetime.date: jdbc_type.DATE,
datetime.datetime: jdbc_type.TIMESTAMP,
datetime.time: jdbc_type.TIME,
converters.Blob: jdbc_type.BLOB,
}
def _ConvertFormatToQmark(statement, args):
"""Replaces '%s' with '?'.
The server actually supports '?' for bind parameters, but the
MySQLdb implementation of PEP 249 uses '%s'. Most clients don't
bother checking the paramstyle member and just hardcode '%s' in
their statements. This function converts a format-style statement
into a qmark-style statement.
Args:
statement: A string, a SQL statement.
args: A sequence of arguments matching the statement's bind variables,
if any.
Returns:
The converted string.
"""
if args:
qmarks = tuple('?' * len(args))
return statement % qmarks
return statement
class Cursor(object):
def __init__(self, conn, use_dict_cursor=False):
"""Initializer.
Args:
conn: A Connection object.
use_dict_cursor: Optional boolean to convert each row of results into a
dictionary. Defaults to False.
"""
self._conn = conn
self._description = None
self._rowcount = -1
self.arraysize = 1
self._open = True
self.lastrowid = None
self._use_dict_cursor = use_dict_cursor
@property
def description(self):
return self._description
@property
def rowcount(self):
return self._rowcount
def close(self):
"""Marks the cursor as unusable for further operations."""
self._CheckOpen()
self._open = False
def _GetJdbcTypeForArg(self, arg):
"""Get the JDBC type which corresponds to the given Python object type."""
arg_jdbc_type = _PYTHON_TYPE_TO_JDBC_TYPE.get(type(arg))
if arg_jdbc_type:
return arg_jdbc_type
for python_t, jdbc_t in _PYTHON_TYPE_TO_JDBC_TYPE.items():
if isinstance(arg, python_t):
return jdbc_t
try:
return self._GetJdbcTypeForArg(arg[0])
except TypeError:
raise TypeError('unknown type')
def _EncodeVariable(self, arg):
"""Converts a variable to a type and value.
Args:
arg: Any tuple, string, numeric, or datetime object.
Returns:
A (int, str) tuple, representing a JDBC type and encoded value.
Raises:
TypeError: The argument is not a recognized type.
"""
arg_jdbc_type = self._GetJdbcTypeForArg(arg)
value = self._conn.encoders[type(arg)](arg, self._conn.encoders)
return arg_jdbc_type, value
def _DecodeVariable(self, datatype, value):
"""Converts a type and value to a variable.
Args:
datatype: An integer.
value: A string.
Returns:
An object of some appropriate type.
Raises:
InterfaceError: datatype is not a recognized JDBC type.
ValueError: The value could not be parsed.
"""
converter = self._conn.converter.get(datatype)
if converter is None:
raise InterfaceError('unknown JDBC type %d' % datatype)
return converter(value)
def execute(self, statement, args=None):
"""Prepares and executes a database operation (query or command).
Args:
statement: A string, a SQL statement.
args: A sequence of arguments matching the statement's bind variables,
if any.
Raises:
InterfaceError: Unknown type used as a bind variable.
DatabaseError: A SQL exception occurred.
OperationalError: RPC problem.
"""
self._CheckOpen()
request = sql_pb2.ExecRequest()
request.options.include_generated_keys = True
if args is not None:
if not hasattr(args, '__iter__'):
args = [args]
for i, arg in enumerate(args):
bv = request.bind_variable.add()
bv.position = i + 1
if arg is None:
bv.type = jdbc_type.NULL
else:
try:
bv.type, bv.value = self._EncodeVariable(arg)
except TypeError:
raise InterfaceError('unknown type %s for arg %d' % (type(arg), i))
request.statement = _ConvertFormatToQmark(statement, args)
response = self._conn.MakeRequest('Exec', request)
result = response.result
if result.HasField('sql_exception'):
raise DatabaseError('%d: %s' % (result.sql_exception.code,
result.sql_exception.message))
self._rows = collections.deque()
if result.rows.columns:
self._description = []
for column in result.rows.columns:
self._description.append(
(column.label, column.type, column.display_size, None,
column.precision, column.scale, column.nullable))
else:
self._description = None
if result.rows.tuples:
assert self._description, 'Column descriptions do not exist.'
column_names = [col[0] for col in self._description]
self._rowcount = len(result.rows.tuples)
for tuple_proto in result.rows.tuples:
row = []
nulls = set(tuple_proto.nulls)
value_index = 0
for i, column_descr in enumerate(self._description):
if i in nulls:
row.append(None)
else:
row.append(self._DecodeVariable(column_descr[1],
tuple_proto.values[value_index]))
value_index += 1
if self._use_dict_cursor:
assert len(column_names) == len(row)
row = dict(zip(column_names, row))
else:
row = tuple(row)
self._rows.append(row)
else:
self._rowcount = result.rows_updated
if result.generated_keys:
self.lastrowid = long(result.generated_keys[-1])
def executemany(self, statement, seq_of_args):
"""Calls execute() for each value of seq_of_args.
Args:
statement: A string, a SQL statement.
seq_of_args: A sequence, each entry of which is a sequence of arguments
matching the statement's bind variables, if any.
"""
self._CheckOpen()
rowcount = 0
for args in seq_of_args:
self.execute(statement, args)
rowcount += self.rowcount
self._rowcount = rowcount
def fetchone(self):
"""Fetches the next row of a query result set.
Returns:
A sequence, or None when no more data is available.
Raises:
InternalError: The cursor has been closed, or no statement has been
executed yet.
"""
self._CheckOpen()
if self._rowcount == -1:
raise InternalError('fetchone() called before execute')
try:
return self._rows.popleft()
except IndexError:
return None
def fetchmany(self, size=None):
"""Fetches the next set of rows of a query result.
Args:
size: The maximum number of rows to return; by default, self.arraysize.
Returns:
A sequence of sequences, or an empty sequence when no more data is
available.
Raises:
InternalError: The cursor has been closed, or no statement has been
executed yet.
"""
self._CheckOpen()
if self._rowcount == -1:
raise InternalError('fetchmany() called before execute')
if size is None:
size = self.arraysize
if size >= len(self._rows):
return self.fetchall()
else:
result = []
for _ in xrange(size):
result.append(self._rows.popleft())
return tuple(result)
def fetchall(self):
"""Fetches all remaining rows of a query result.
Returns:
A sequence of sequences, or an empty sequence when no more data is
available.
Raises:
InternalError: The cursor has been closed, or no statement has been
executed yet.
"""
self._CheckOpen()
if self._rowcount == -1:
raise InternalError('fetchall() called before execute')
rows = self._rows
self._rows = collections.deque()
return tuple(rows)
def setinputsizes(self, unused_sizes):
self._CheckOpen()
def setoutputsize(self, unused_size, unused_column=None):
self._CheckOpen()
def _CheckOpen(self):
self._conn.CheckOpen()
if not self._open:
raise InternalError('cursor has been closed')
def __iter__(self):
return iter(self.fetchone, None)
class Connection(object):
def __init__(self, dsn, instance, database=None, user='root', password=None,
deadline_seconds=30.0, conv=None,
query_deadline_seconds=86400.0, retry_interval_seconds=30.0):
"""Creates a new SQL Service connection.
Args:
dsn: A string, the SQL Service job path or host:port.
instance: A string, the SQL Service instance name, often a username.
database: A string, semantics defined by the backend.
user: A string, database user name.
password: A string, database password.
deadline_seconds: A float, request deadline in seconds.
conv: A dict, maps types to a conversion function. See converters.py.
query_deadline_seconds: A float, query deadline in seconds.
retry_interval_seconds: A float, seconds to wait between each retry.
Raises:
OperationalError: Transport failure.
DatabaseError: Error from SQL Service server.
"""
self._dsn = dsn
self._instance = instance
self._database = database
self._user = user
self._password = password
self._deadline_seconds = deadline_seconds
self._connection_id = None
self._idempotent_request_id = 0
if not conv:
conv = converters.conversions
self._query_deadline_seconds = query_deadline_seconds
self._retry_interval_seconds = retry_interval_seconds
self.converter = {}
self.encoders = {}
for key, value in conv.items():
if isinstance(key, int):
self.converter[key] = value
else:
self.encoders[key] = value
self.OpenConnection()
def OpenConnection(self):
"""Opens a connection to SQL Service."""
request = sql_pb2.OpenConnectionRequest()
request.client_type = client_pb2.CLIENT_TYPE_PYTHON_DBAPI
prop = request.property.add()
prop.key = 'autoCommit'
prop.value = 'false'
if self._user:
prop = request.property.add()
prop.key = 'user'
prop.value = self._user
if self._password:
prop = request.property.add()
prop.key = 'password'
prop.value = self._password
if self._database:
prop = request.property.add()
prop.key = 'database'
prop.value = self._database
self.SetupClient()
response = self.MakeRequest('OpenConnection', request)
self._connection_id = response.connection_id
def SetupClient(self):
"""Setup a transport client to communicate with rdbms.
This is a template method to provide subclasses with a hook to perform any
necessary client initialization while opening a connection to rdbms.
"""
pass
def close(self):
"""Makes the connection and all its cursors unusable.
The connection will be unusable from this point forward; an Error
(or subclass) exception will be raised if any operation is attempted
with the connection.
"""
self.CheckOpen()
request = sql_pb2.CloseConnectionRequest()
self.MakeRequest('CloseConnection', request)
self._connection_id = None
def CheckOpen(self):
if self._connection_id is None:
raise InternalError('connection has been closed')
def commit(self):
"""Commits any pending transaction to the database.
Raises:
DatabaseError: A SQL exception occurred.
OperationalError: RPC problem.
"""
self.CheckOpen()
request = sql_pb2.ExecOpRequest()
request.op.type = client_pb2.OpProto.COMMIT
self.MakeRequest('ExecOp', request)
def rollback(self):
"""Rolls back any pending transaction to the database.
Raises:
DatabaseError: A SQL exception occurred.
OperationalError: RPC problem.
"""
self.CheckOpen()
request = sql_pb2.ExecOpRequest()
request.op.type = client_pb2.OpProto.ROLLBACK
self.MakeRequest('ExecOp', request)
def autocommit(self, value):
"""Changes whether there is an implicit commit after each statement.
By default, transactions must be explicitly committed.
Args:
value: A boolean.
Raises:
DatabaseError: A SQL exception occurred.
OperationalError: RPC problem.
"""
self.CheckOpen()
request = sql_pb2.ExecOpRequest()
request.op.type = client_pb2.OpProto.SET_AUTO_COMMIT
request.op.auto_commit = value
self.MakeRequest('ExecOp', request)
def cursor(self, **kwargs):
"""Returns a cursor for the current connection.
Args:
**kwargs: Optional keyword args to pass into cursor.
Returns:
A Cursor object.
"""
return Cursor(self, **kwargs)
def MakeRequest(self, stub_method, request):
"""Makes an ApiProxy request, and possibly raises an appropriate exception.
Args:
stub_method: A string, the name of the method to call.
request: A protobuf; 'instance' and 'connection_id' will be set
when available.
Returns:
A protobuf.
Raises:
DatabaseError: Error from SQL Service server.
"""
if self._instance:
request.instance = self._instance
if self._connection_id is not None:
request.connection_id = self._connection_id
if stub_method in ('Exec', 'ExecOp', 'GetMetadata'):
self._idempotent_request_id += 1
request.request_id = self._idempotent_request_id
response = self._MakeRetriableRequest(stub_method, request)
else:
response = self.MakeRequestImpl(stub_method, request)
if (hasattr(response, 'sql_exception') and
response.HasField('sql_exception')):
raise DatabaseError('%d: %s' % (response.sql_exception.code,
response.sql_exception.message))
return response
def _MakeRetriableRequest(self, stub_method, request):
"""Makes a retriable request.
Args:
stub_method: A string, the name of the method to call.
request: A protobuf.
Returns:
A protobuf.
Raises:
DatabaseError: Error from SQL Service server.
"""
absolute_deadline_seconds = time.clock() + self._query_deadline_seconds
response = self.MakeRequestImpl(stub_method, request)
if not response.HasField('sql_exception'):
return response
sql_exception = response.sql_exception
if (sql_exception.application_error_code !=
client_error_code_pb2.SqlServiceClientError.ERROR_TIMEOUT):
raise DatabaseError('%d: %s' % (sql_exception.code,
sql_exception.message))
if time.clock() >= absolute_deadline_seconds:
raise DatabaseError('%d: %s' % (sql_exception.code,
sql_exception.message))
return self._Retry(stub_method, request.request_id,
absolute_deadline_seconds)
def _Retry(self, stub_method, request_id, absolute_deadline_seconds):
"""Retries request with the given request id.
Continues to retry until either the deadline has expired or the response
has been received.
Args:
stub_method: A string, the name of the original method that triggered the
retry.
request_id: An integer, the request id used in the original request
absolute_deadline_seconds: An integer, absolute deadline in seconds.
Returns:
A protobuf.
Raises:
DatabaseError: If the ExecOpResponse contains a SqlException that it not
related to retry.
InternalError: If the ExceOpResponse is not valid.
"""
request = sql_pb2.ExecOpRequest()
request.op.type = client_pb2.OpProto.RETRY
request.op.request_id = request_id
request.connection_id = self._connection_id
request.instance = self._instance
while True:
seconds_remaining = absolute_deadline_seconds - time.clock()
if seconds_remaining <= 0:
raise InternalError('Request [%d] timed out' % (request_id))
time.sleep(min(self._retry_interval_seconds, seconds_remaining))
self._idempotent_request_id += 1
request.request_id = self._idempotent_request_id
response = self.MakeRequestImpl('ExecOp', request)
if not response.HasField('sql_exception'):
return self._ConvertCachedResponse(stub_method, response)
sql_exception = response.sql_exception
if (sql_exception.application_error_code !=
client_error_code_pb2.SqlServiceClientError.ERROR_RESPONSE_PENDING):
raise DatabaseError('%d: %s' % (response.sql_exception.code,
response.sql_exception.message))
def _ConvertCachedResponse(self, stub_method, exec_op_response):
"""Converts the cached response or RPC error.
Args:
stub_method: A string, the name of the original method that triggered the
retry.
exec_op_response: A protobuf, the retry response that contains either the
RPC error or the cached response.
Returns:
A protobuf, the cached response.
Raises:
DatabaseError: If the cached response contains SqlException.
InternalError: If a cached RpcErrorProto exists.
"""
if exec_op_response.HasField('cached_rpc_error'):
raise InternalError('%d: %s' % (
exec_op_response.cached_rpc_error.error_code,
exec_op_response.cached_rpc_error.error_message))
if not exec_op_response.HasField('cached_payload'):
raise InternalError('Invalid exec op response for retry request')
if stub_method == 'Exec':
response = sql_pb2.ExecResponse()
elif stub_method == 'ExecOp':
response = sql_pb2.ExecOpResponse()
elif stub_method == 'GetMetadata':
response = sql_pb2.MetadataResponse()
else:
raise InternalError('Found unexpected stub_method: %s' % (stub_method))
response.ParseFromString(exec_op_response.cached_payload)
if response.HasField('sql_exception'):
raise DatabaseError('%d: %s' % (response.sql_exception.code,
response.sql_exception.message))
return response
def MakeRequestImpl(self, stub_method, request):
raise InternalError('No transport defined. Try using rdbms_[transport]')
def get_server_info(self):
"""Returns a string that represents the server version number.
Non-standard; Provided for API compatibility with MySQLdb.
Returns:
The server version number string.
"""
self.CheckOpen()
request = sql_pb2.MetadataRequest()
request.metadata = client_pb2.METADATATYPE_DATABASE_METADATA_BASIC
response = self.MakeRequest('GetMetadata', request)
return response.jdbc_database_metadata.database_product_version
def ping(self, reconnect=False):
"""Checks whether or not the connection to the server is working.
If it has gone down, an automatic reconnection is attempted.
This function can be used by clients that remain idle for a long while, to
check whether or not the server has closed the connection and reconnect if
necessary.
Non-standard. You should assume that ping() performs an implicit rollback;
use only when starting a new transaction. You have been warned.
Args:
reconnect: Whether to perform an automatic reconnection.
Raises:
DatabaseError: The connection to the server is not working.
"""
self.CheckOpen()
request = sql_pb2.ExecOpRequest()
request.op.type = client_pb2.OpProto.PING
try:
self.MakeRequest('ExecOp', request)
except DatabaseError:
if not reconnect:
raise
self._connection_id = None
self.OpenConnection()
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
connect = Connection
|
"""
[START gae_block_comment_tag]
[END gae_block_comment_tag]
"""
|
from future import standard_library
standard_library.install_aliases()
import logging
import sys
from thrift.transport.TTransport import *
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
if sys.version_info[0] > 2:
from io import BytesIO as buffer_writer
else:
from cStringIO import StringIO as buffer_writer
LOG = logging.getLogger(__name__)
class THttpClient(TTransportBase):
"""
HTTP transport mode for Thrift.
HTTPS and Kerberos support with Request.
e.g.
mode = THttpClient('http://hbase-thrift-v1.com:9090')
mode = THttpClient('http://hive-localhost:10001/cliservice')
"""
def __init__(self, base_url):
self._base_url = base_url
self._client = HttpClient(self._base_url, logger=LOG)
self._data = None
self._headers = None
self._wbuf = buffer_writer()
def open(self):
pass
def set_kerberos_auth(self, service="HTTP"):
self._client.set_kerberos_auth(service=service)
def set_basic_auth(self, username, password):
self._client.set_basic_auth(username, password)
def set_verify(self, verify=True):
self._client.set_verify(verify)
def close(self):
self._headers = None
# Close session too?
def isOpen(self):
return self._client is not None
def setTimeout(self, ms):
if not self._headers:
self._headers = {}
self._headers.update(timeout=str(int(ms / 1000)))
def setCustomHeaders(self, headers):
self._headers = headers
def read(self, sz):
return self._data
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
data = self._wbuf.getvalue()
self._wbuf = buffer_writer()
# POST
self._root = Resource(self._client)
self._data = self._root.post('', data=data, headers=self._headers)
|
"""Python wrapper for gcd.sh."""
__author__ = 'eddavisson@google.com (Ed Davisson)'
import logging
import os
import shutil
import socket
import subprocess
import tempfile
import time
import urllib
import zipfile
import httplib2
import portpicker
from googledatastore import connection
_DEFAULT_GCD_OPTIONS = ['--allow_remote_shutdown', '--testing']
class LocalCloudDatastoreFactory(object):
"""A factory for constructing LocalCloudDatastore objects."""
def __init__(self, working_directory, gcd_zip, java=None):
"""Constructs a factory for building local datastore instances.
Args:
working_directory: path to a directory where temporary files will be
stored
gcd_zip: path to the gcd zip file
java: path to a java executable
Raises:
ValueError: if gcd.sh cannot be located in the gcd zip file
"""
self._working_directory = working_directory
self._remote_datastores = {}
# Extract GCD.
zipped_file = zipfile.ZipFile(gcd_zip)
self._gcd_dir = os.path.join(self._working_directory, 'gcd')
os.mkdir(self._gcd_dir)
zipped_file.extractall(self._gcd_dir)
# Locate gcd.sh in the unzipped directory (it may be in a directory which
# contains a version string).
gcd_dirs = [d for d in os.listdir(self._gcd_dir)
if os.path.isdir(os.path.join(self._gcd_dir, d))]
for d in gcd_dirs:
if d.startswith('gcd'):
self._gcd_sh = os.path.join(self._gcd_dir, d, 'gcd.sh')
break
else:
raise ValueError('could not find gcd.sh in zip file')
os.chmod(self._gcd_sh, 0700) # executable
# Make GCD use our copy of Java.
if java:
os.environ['JAVA'] = java
def Get(self, project_id):
"""Returns an existing local datastore instance for the provided project_id.
If a local datastore instance doesn't yet exist, it creates one.
"""
if project_id in self._remote_datastores:
return self._remote_datastores[project_id]
datastore = self.Create(project_id)
self._remote_datastores[project_id] = datastore
return datastore
def Create(self, project_id, start_options=None, deadline=10):
"""Creates a local datastore instance.
This method will wait for up to 'deadline' seconds for the datastore to
start.
Args:
project_id: project ID
start_options: a list of additional command-line options to pass to the
gcd.sh start command
deadline: number of seconds to wait for the datastore to respond
Returns:
a LocalCloudDatastore
Raises:
IOError: if the local datastore could not be started within the deadline
"""
return LocalCloudDatastore(self._gcd_sh, self._working_directory,
project_id, deadline, start_options)
def __del__(self):
# Delete temp files.
shutil.rmtree(self._gcd_dir)
class LocalCloudDatastore(object):
"""A local datastore (based on gcd.sh)."""
def __init__(self, gcd_sh, working_directory, project_id, deadline,
start_options):
"""Constructs a local datastore.
Clients should use LocalCloudDatastoreFactory to construct
LocalCloudDatastore instances.
Args:
gcd_sh: path to gcd.sh
working_directory: directory file where temporary files will be stored
project_id: project ID
deadline: number of seconds to wait for the datastore to start
start_options: a list of additional command-line options to pass to the
gcd.sh start command
Raises:
IOError: if the datastore failed to start within the deadline
"""
self._project_id = project_id
self._gcd_sh = gcd_sh
self._http = httplib2.Http()
self.__running = False
self._tmp_dir = tempfile.mkdtemp(dir=working_directory)
self._project_directory = os.path.join(self._tmp_dir, self._project_id)
p = subprocess.Popen([gcd_sh,
'create',
'--project_id=%s' % self._project_id,
self._project_directory])
if p.wait() != 0:
raise IOError('could not create project in directory: %s'
% self._project_directory)
# Start GCD and wait for it to start responding to requests.
port = portpicker.PickUnusedPort()
self._host = 'http://localhost:%d' % port
cmd = [self._gcd_sh, 'start', '--port=%d' % port]
cmd.extend(_DEFAULT_GCD_OPTIONS)
if start_options:
cmd.extend(start_options)
cmd.append(self._project_directory)
subprocess.Popen(cmd)
if not self._WaitForStartup(deadline):
raise IOError('datastore did not respond within %ds' % deadline)
endpoint = '%s/datastore/v1beta3/projects/%s' % (self._host,
self._project_id)
self.__datastore = connection.Datastore(project_endpoint=endpoint)
self.__running = True
def GetDatastore(self):
"""Returns a googledatatsore.Datastore that is connected to the gcd tool."""
return self.__datastore
def _WaitForStartup(self, deadline):
"""Waits for the datastore to start.
Args:
deadline: deadline in seconds
Returns:
True if the instance responds within the deadline, False otherwise.
"""
start = time.time()
sleep = 0.05
def Elapsed():
return time.time() - start
while True:
try:
response, _ = self._http.request(self._host)
if response.status == 200:
logging.info('local server responded after %f seconds', Elapsed())
return True
except socket.error:
pass
if Elapsed() >= deadline:
# Out of time; give up.
return False
else:
time.sleep(sleep)
sleep *= 2
def Clear(self):
"""Clears all data from the local datastore instance.
Returns:
True if the data was successfully cleared, False otherwise.
"""
body = urllib.urlencode({'action': 'Clear Datastore'})
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Content-length': str(len(body))}
response, _ = self._http.request('%s/_ah/admin/datastore' % self._host,
method='POST', headers=headers, body=body)
if response.status == 200:
return True
else:
logging.warning('failed to clear datastore; response was: %s', response)
def Stop(self):
if not self.__running:
return
logging.info('shutting down the datastore running at %s', self._host)
# Shut down the datastore.
headers = {'Content-length': '0'}
response, _ = self._http.request('%s/_ah/admin/quit' % self._host,
method='POST', headers=headers)
if response.status != 200:
logging.warning('failed to shut down datastore; response: %s', response)
self.__running = False
# Delete temp files.
shutil.rmtree(self._tmp_dir)
def __del__(self):
# If the user forgets to call Stop()
logging.warning('datastore shutting down due to '
'LocalCloudDatastore object deletion')
self.Stop()
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.engine.fs import EMPTY_SNAPSHOT
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Select
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecuteProcessRequest(datatype('ExecuteProcessRequest', ['argv', 'env', 'input_files_digest', 'digest_length'])):
"""Request for execution with args and snapshots to extract."""
@classmethod
def create_from_snapshot(cls, argv, env, snapshot):
return ExecuteProcessRequest(
argv=argv,
env=env,
input_files_digest=snapshot.fingerprint,
digest_length=snapshot.digest_length,
)
@classmethod
def create_with_empty_snapshot(cls, argv, env):
return cls.create_from_snapshot(argv, env, EMPTY_SNAPSHOT)
def __new__(cls, argv, env, input_files_digest, digest_length):
"""
:param args: Arguments to the process being run.
:param env: A tuple of environment variables and values.
"""
if not isinstance(argv, tuple):
raise ValueError('argv must be a tuple.')
if not isinstance(env, tuple):
raise ValueError('env must be a tuple.')
if not isinstance(input_files_digest, str):
raise ValueError('input_files_digest must be a str.')
if not isinstance(digest_length, int):
raise ValueError('digest_length must be an int.')
if digest_length < 0:
raise ValueError('digest_length must be >= 0.')
return super(ExecuteProcessRequest, cls).__new__(cls, argv, env, input_files_digest, digest_length)
class ExecuteProcessResult(datatype('ExecuteProcessResult', ['stdout', 'stderr', 'exit_code'])):
pass
def create_process_rules():
"""Intrinsically replaced on the rust side."""
return [execute_process_noop, RootRule(ExecuteProcessRequest)]
@rule(ExecuteProcessResult, [Select(ExecuteProcessRequest)])
def execute_process_noop(*args):
raise Exception('This task is replaced intrinsically, and should never run.')
|
import sys
sys.path.insert(0, './getresults')
import datetime
from flightsearch import flightsearch, flightresult
import os
import uuid
import time
from pprint import pprint
def main():
flyfrom = 'YYZ' #input("Enter departure city or airport code, e.g. Toronto or YYZ:\n")
datefrom = '2017-04-26' #input("Enter departure date and time, e.g. 2017-03-31 12:00:\n")
flyto = 'LHR' #input("Enter arrival city or airport code, e.g. London or LHR:\n")
dateto = '2017-05-26' #input("Enter arrival date and time, e.g. 2017-03-31 20:00:\n")
searchuuid = uuid.uuid4()
searchbegintime = time.time()
search = flightsearch(searchuuid = searchuuid, searchbegintime = searchbegintime, flyfrom = flyfrom,
datefrom = datefrom, flyto = flyto, dateto = dateto)
results = aggregatedflights(search)
search.searchendtime = time.time()
for key, value in results.items():
for item in value:
pprint(vars(item))
def aggregatedflights(flightsearch):
getresultsdir = './getresults'
resultdict = {}
for filename in os.listdir(getresultsdir):
if filename.startswith("get") and filename.endswith(".py"):
modulename = filename.split('.')[0]
mod = __import__(modulename)
resultdict[modulename] = mod.getresult(flightsearch)
else:
continue
return sortbyprice(resultdict)
def sortbyprice(flightresult):
## Coming soon
return flightresult
if __name__ == '__main__':
main()
|
import pytest
import torch
from torch.autograd import Variable
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBasicTextFieldEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBasicTextFieldEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
params = Params({
"words1": {
"type": "embedding",
"embedding_dim": 2
},
"words2": {
"type": "embedding",
"embedding_dim": 5
},
"words3": {
"type": "embedding",
"embedding_dim": 3
}
})
self.token_embedder = BasicTextFieldEmbedder.from_params(self.vocab, params)
self.inputs = {
"words1": Variable(torch.LongTensor([[0, 2, 3, 5]])),
"words2": Variable(torch.LongTensor([[1, 4, 3, 2]])),
"words3": Variable(torch.LongTensor([[1, 5, 1, 2]]))
}
def test_get_output_dim_aggregates_dimension_from_each_embedding(self):
assert self.token_embedder.get_output_dim() == 10
def test_forward_asserts_input_field_match(self):
self.inputs['words4'] = self.inputs['words3']
del self.inputs['words3']
with pytest.raises(ConfigurationError):
self.token_embedder(self.inputs)
self.inputs['words3'] = self.inputs['words4']
del self.inputs['words4']
def test_forward_concats_resultant_embeddings(self):
assert self.token_embedder(self.inputs).size() == (1, 4, 10)
|
import inventory.inventory_class as inv
import weapons.weapon_class as wp
class people:
"""This is the people class with attributes:"""
def name():
n = ''
return n
def health():
hp = 0
return hp
def descript():
d = 'Description of the person or creature'
return d
def equiped():
e = inv.inventory()
e.weapon = wp.weapon()
e.armor = 0
return e
def bag():
b = {}
return b
def hostile():
h = 0
return h
|
az.plot_dist(b, rug=True, quantiles=[.25, .5, .75], cumulative=True)
|
"""Tests for parser and parser plugin presets."""
from __future__ import unicode_literals
import unittest
from plaso.containers import artifacts
from plaso.parsers import presets
from tests import test_lib as shared_test_lib
class ParserPresetTest(shared_test_lib.BaseTestCase):
"""Tests for the parser and parser plugin preset."""
def testInitialize(self):
"""Tests the __init__ function."""
test_definition = presets.ParserPreset('test', ['parser1', 'parser2'])
self.assertIsNotNone(test_definition)
class ParserPresetsManagerTest(shared_test_lib.BaseTestCase):
"""Tests for the parser and parser plugin presets manager."""
_LINUX_PARSERS = [
'bash_history',
'bencode',
'czip/oxml',
'dockerjson',
'dpkg',
'filestat',
'gdrive_synclog',
'olecf',
'pls_recall',
'popularity_contest',
'selinux',
'sqlite/google_drive',
'sqlite/skype',
'sqlite/zeitgeist',
'syslog',
'systemd_journal',
'utmp',
'vsftpd',
'webhist',
'xchatlog',
'xchatscrollback',
'zsh_extended_history']
_MACOS_PARSERS = [
'asl_log',
'bash_history',
'bencode',
'bsm_log',
'cups_ipp',
'czip/oxml',
'filestat',
'fseventsd',
'gdrive_synclog',
'mac_appfirewall_log',
'mac_keychain',
'mac_securityd',
'macwifi',
'olecf',
'plist',
'sqlite/appusage',
'sqlite/google_drive',
'sqlite/imessage',
'sqlite/ls_quarantine',
'sqlite/mac_document_versions',
'sqlite/mackeeper_cache',
'sqlite/skype',
'syslog',
'utmpx',
'webhist',
'zsh_extended_history']
# TODO add tests for _ReadPresetDefinitionValues
# TODO add tests for _ReadPresetsFromFileObject
def testGetNames(self):
"""Tests the GetNames function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
test_names = list(test_manager.GetNames())
self.assertEqual(len(test_names), 7)
expected_names = sorted([
'android', 'linux', 'macos', 'webhist', 'win7', 'win_gen', 'winxp'])
self.assertEqual(test_names, expected_names)
def testGetParsersByPreset(self):
"""Tests the GetParsersByPreset function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
parser_names = test_manager.GetParsersByPreset('linux')
self.assertEqual(parser_names, self._LINUX_PARSERS)
with self.assertRaises(KeyError):
test_manager.GetParsersByPreset('bogus')
def testGetPresetByName(self):
"""Tests the GetPresetByName function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
test_preset = test_manager.GetPresetByName('linux')
self.assertIsNotNone(test_preset)
self.assertEqual(test_preset.name, 'linux')
self.assertEqual(test_preset.parsers, self._LINUX_PARSERS)
test_preset = test_manager.GetPresetByName('bogus')
self.assertIsNone(test_preset)
def testGetPresetsByOperatingSystem(self):
"""Tests the GetPresetsByOperatingSystem function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
operating_system = artifacts.OperatingSystemArtifact(family='MacOS')
test_presets = test_manager.GetPresetsByOperatingSystem(operating_system)
self.assertEqual(len(test_presets), 1)
self.assertEqual(test_presets[0].name, 'macos')
self.assertEqual(test_presets[0].parsers, self._MACOS_PARSERS)
operating_system = artifacts.OperatingSystemArtifact(family='bogus')
test_presets = test_manager.GetPresetsByOperatingSystem(operating_system)
self.assertEqual(len(test_presets), 0)
def testGetPresetsInformation(self):
"""Tests the GetPresetsInformation function."""
test_file_path = self._GetTestFilePath(['presets.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_manager = presets.ParserPresetsManager()
test_manager.ReadFromFile(test_file_path)
parser_presets_information = test_manager.GetPresetsInformation()
self.assertGreaterEqual(len(parser_presets_information), 1)
available_parser_names = [name for name, _ in parser_presets_information]
self.assertIn('linux', available_parser_names)
# TODO add tests for ReadFromFile
if __name__ == '__main__':
unittest.main()
|
import csv
import numpy
import matplotlib.pyplot as plt
price, size = numpy.loadtxt('house.csv', delimiter='|', usecols=(1, 2), unpack=True)
print price
print size
plt.figure()
plt.subplot(211)
plt.title("/ 10000RMB")
plt.hist(price, bins=20)
plt.subplot(212)
plt.xlabel("/ m**2")
plt.hist(size, bins=20)
plt.figure(2)
plt.title("price")
plt.plot(price)
plt.show()
price_mean = numpy.mean(price)
size_mean = numpy.mean(size)
price_var = numpy.var(price)
size_var = numpy.var(size)
print "价格的方差为:", price_var
print "面积的方差为:", size_var
|
import requests
import yaml
import time
import enum
import sys
import re
import logging
import ssl
from requests.auth import HTTPDigestAuth
from requests.auth import HTTPBasicAuth
from lxml import etree as ET
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
logger = logging.getLogger(__name__)
with open("config/config.yaml") as f:
config = yaml.load(f)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += 'HIGH:!DH:!aNULL'
try:
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += 'HIGH:!DH:!aNULL'
except AttributeError:
# no pyopenssl support used / needed / available
pass
class Ssl3HttpAdapter(HTTPAdapter):
""""Transport adapter" that allows us to use SSLv3."""
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize,
block=block, ssl_version=ssl.PROTOCOL_SSLv3)
session = requests.Session()
session.mount('https://wbgrp-crawl',Ssl3HttpAdapter())
class Crawl_Status():
none = "None"
unbuilt = "Unbuilt"
ready = "Ready"
paused = "Active: PAUSED"
running = "Active: RUNNING"
finished = "Finished: ABORTED"
class Crawl_Actions():
build = "build"
launch = "launch"
unpause = "unpause"
pause = "pause"
checkpoint = "checkpoint"
terminate = "terminate"
teardown = "teardown"
class Crawl_Reports():
summary = "CrawlSummaryReport"
seeds = "SeedsReport"
source = "SourceTagsReport"
hosts = "HostsReport"
mime = "MimetypesReport"
response = "ResponseCodeReport"
processors = "ProcessorsReport"
frontier = "FrontierSummaryReport"
thread = "ToeThreadsReport"
def get_crawl_status(url):
response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'})
if (response.status_code & 200) == 200:
root=ET.fromstring(response.text)
if root.find('statusDescription') is not None:
return root.find('statusDescription').text
elif root.find('crawlControllerState') is not None:
return root.find('crawlControllerState').text
def get_available_actions(url):
response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'})
actions = []
if (response.status_code & 200) == 200:
root=ET.fromstring(response.text)
for action in root.find('availableActions'):
actions.append(action.text)
return actions
def main():
url = 'https://localhost:6440/engine/job/monthly_test'
test_full_cycle(url)
def get_crawljob_page(url):
response = session.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'})
if (response.status_code & 200) == 200:
return response
def get_crawljob_text_page(url):
response = requests.get(url,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False)
if (response.status_code & 200) == 200:
return response
def get_config_path(url):
response = get_crawljob_page(url)
root=ET.fromstring(response.text)
config_path = root.find('primaryConfig').text
return config_path
def increment_crawl_number(url, source_config_file, dest_config_file):
parser = ET.XMLParser(remove_comments=False)
config_tree = ET.parse(source_config_file,parser=parser)
ns = {'beans': 'http://www.springframework.org/schema/beans'}
properties = config_tree.getroot().findall("./beans:bean[@id='simpleOverrides']/beans:property/beans:value",ns)[0].text
m = re.finditer('(?m)^[^\.]*[wW]arcWriter\.prefix=[^\d]*-(?P<warcid>\d{3})(-.*)?',properties)
for i in m:
warc_id=int(i.group('warcid'))
warc_id=warc_id+1
properties_incremented = re.sub('(?m)^(?P<prefix>[^\.]*[wW]arcWriter\.prefix=[^\d]*-)(?P<warcid>\d{3})(?P<suffix>(-.*)?)','\g<prefix>'+str(warc_id).zfill(3)+'\g<suffix>',properties)
config_tree.getroot().findall("./beans:bean[@id='simpleOverrides']/beans:property/beans:value",ns)[0].text = properties_incremented
config_tree.write(dest_config_file,xml_declaration=True,encoding="utf-8")
def find_replace_xpath(url, source_config_file, dest_config_file, xpath, regex, replacement):
parser = ET.XMLParser(remove_comments=False)
config_tree = ET.parse(source_config_file,parser=parser)
ns = {'beans': 'http://www.springframework.org/schema/beans'}
config_field = config_tree.getroot().findall(xpath,ns)[0].text
#print(config_field)
modified_field = re.sub(re.compile(regex,re.MULTILINE),replacement,config_field)
#print(modified_field)
config_tree.getroot().findall(xpath,ns)[0].text=modified_field
config_tree.write(dest_config_file,xml_declaration=True,encoding="utf-8")
def test_full_cycle(url):
status = get_crawl_status(url)
logger.info("Status: %s" %status)
available_actions = get_available_actions(url)
if status == Crawl_Status.unbuilt and "build" in available_actions:
build(url)
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.ready and "launch" in available_actions:
launch(url)
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.paused and "unpause" in available_actions:
unpause(url)
time.sleep(5)
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.running and "pause" in available_actions:
pause(url)
runScript(url,'rawOut.println("testing")')
runScript(url,'htmlOut.println("testing")')
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.paused and "checkpoint" in available_actions:
checkpoint(url)
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.paused and "terminate" in available_actions:
terminate(url)
status = get_crawl_status(url)
available_actions = get_available_actions(url)
if status == Crawl_Status.finished and "teardown" in available_actions:
teardown(url)
def do_crawl_action_until_status(url, action, expected_status):
logger.info("-Doing action: %s" %action)
response = send_command(url,{"action":action})
if (response.status_code & 200) == 200:
retries=0
while get_crawl_status(url) != expected_status:
if retries > config["max_retries"]:
logger.info("Max retries exceeded while waiting for: %s" % expected_status)
sys.exit()
logger.info("...")
time.sleep(config["retry_delay_seconds"])
retries+=1
logger.error("Status: %s" %expected_status)
def build(url):
do_crawl_action_until_status(url, Crawl_Actions.build, Crawl_Status.ready)
def launch(url):
do_crawl_action_until_status(url,Crawl_Actions.launch, Crawl_Status.paused)
def unpause(url):
do_crawl_action_until_status(url,Crawl_Actions.unpause,Crawl_Status.running)
def pause(url):
do_crawl_action_until_status(url, Crawl_Actions.pause, Crawl_Status.paused)
def checkpoint(url):
do_crawl_action_until_status(url, Crawl_Actions.checkpoint, Crawl_Status.paused)
def terminate(url):
do_crawl_action_until_status(url, Crawl_Actions.terminate, Crawl_Status.finished)
def teardown(url):
do_crawl_action_until_status(url, Crawl_Actions.teardown, Crawl_Status.unbuilt)
def runScript(url, script):
response = send_command(url + '/script',{'engine':'groovy','script':script})
if (response.status_code & 200) == 200:
logger.debug(response.text)
root = ET.fromstring(response.text)
return_script = root.find('script')
raw_out = root.find('rawOutput')
html_out = root.find('htmlOutput')
lines_executed = root.find('linesExecuted')
if return_script is not None:
logger.info("Script run: %s" % return_script.text)
if lines_executed is not None:
logger.info("%s lines executed" % lines_executed.text)
if raw_out is not None:
logger.info("Output:\n %s" % raw_out.text)
if html_out is not None:
logger.info("Output:\n %s" % html_out.text)
def send_command(url, data):
response = session.post(url,data=data,auth=HTTPDigestAuth(config["h3_settings"]["username"],config["h3_settings"]["password"]),verify=False, headers= {'accept':'application/xml'})
return response
if __name__ == "__main__":
main()
|
import os
import subprocess
import sys
import pytest
sys.path.append("tests/python")
import testing as tm
import test_demos as td # noqa
@pytest.mark.skipif(**tm.no_cupy())
def test_data_iterator():
script = os.path.join(td.PYTHON_DEMO_DIR, 'quantile_data_iterator.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_update_process_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'update_process.py')
cmd = ['python', script]
subprocess.check_call(cmd)
def test_categorical_demo():
script = os.path.join(td.PYTHON_DEMO_DIR, 'categorical.py')
cmd = ['python', script]
subprocess.check_call(cmd)
@pytest.mark.skipif(**tm.no_dask())
@pytest.mark.skipif(**tm.no_dask_cuda())
@pytest.mark.skipif(**tm.no_cupy())
@pytest.mark.mgpu
def test_dask_training():
script = os.path.join(tm.PROJECT_ROOT, 'demo', 'dask', 'gpu_training.py')
cmd = ['python', script, '--ddqdm=1']
subprocess.check_call(cmd)
cmd = ['python', script, '--ddqdm=0']
subprocess.check_call(cmd)
|
"""
Tests for zipline.pipeline.loaders.frame.DataFrameLoader.
"""
from unittest import TestCase
from mock import patch
from numpy import arange, ones
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from zipline.lib.adjustment import (
Float64Add,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import (
ADD,
DataFrameLoader,
MULTIPLY,
OVERWRITE,
)
from zipline.utils.tradingcalendar import trading_day
class DataFrameLoaderTestCase(TestCase):
def setUp(self):
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=trading_day,
periods=self.ndates,
)
self.mask = ones((len(self.dates), len(self.sids)), dtype=bool)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array(
[USEquityPricing.open], self.dates, self.sids, self.mask
)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.dates,
self.sids,
self.mask,
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(USEquityPricing.close, baseline)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.dates[dates_slice],
self.sids[sids_slice],
self.mask[dates_slice, sids_slice],
)
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * trading_day),
'end_date': self.dates[0] - trading_day,
'apply_date': self.dates[0] - trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + trading_day,
'end_date': self.dates[-1] + (2 * trading_day),
'apply_date': self.dates[-1] + (3 * trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(
first_row=0,
last_row=5,
first_col=0,
last_col=0,
value=0.5,
),
Float64Add(
first_row=0,
last_row=5,
first_col=1,
last_col=1,
value=1.0,
),
],
7: [
Float64Add(
first_row=5,
last_row=6,
first_col=1,
last_col=1,
value=1.0,
),
],
8: [
Float64Overwrite(
first_row=6,
last_row=7,
first_col=2,
last_col=2,
value=99.0,
)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask[dates_slice, sids_slice]
with patch('zipline.pipeline.loaders.frame.adjusted_array') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
dates=self.dates[dates_slice],
assets=self.sids[sids_slice],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
|
'''
Implements the RTS ALUA Target Port Group class.
This file is part of RTSLib.
Copyright (c) 2016 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from .node import CFSNode
from .utils import RTSLibError, RTSLibALUANotSupported, fread, fwrite
import six
alua_rw_params = ['alua_access_state', 'alua_access_status',
'alua_write_metadata', 'alua_access_type', 'preferred',
'nonop_delay_msecs', 'trans_delay_msecs',
'implicit_trans_secs', 'alua_support_offline',
'alua_support_standby', 'alua_support_transitioning',
'alua_support_active_nonoptimized',
'alua_support_unavailable', 'alua_support_active_optimized']
alua_ro_params = ['tg_pt_gp_id', 'members', 'alua_support_lba_dependent']
alua_types = ['None', 'Implicit', 'Explicit', 'Implicit and Explicit']
alua_statuses = ['None', 'Altered by Explicit STPG', 'Altered by Implicit ALUA']
class ALUATargetPortGroup(CFSNode):
"""
ALUA Target Port Group interface
"""
def __repr__(self):
return "<ALUA TPG %s>" % self.name
def __init__(self, storage_object, name, tag=None):
"""
@param storage_object: backstore storage object to create ALUA group for
@param name: name of ALUA group
@param tag: target port group id. If not passed in, try to look
up existing ALUA TPG with the same name
"""
if storage_object.alua_supported is False:
raise RTSLibALUANotSupported("Backend does not support ALUA setup")
# default_tg_pt_gp takes tag 1
if tag is not None and (tag > 65535 or tag < 1):
raise RTSLibError("The TPG Tag must be between 1 and 65535")
super(ALUATargetPortGroup, self).__init__()
self.name = name
self.storage_object = storage_object
self._path = "%s/alua/%s" % (storage_object.path, name)
if tag is not None:
try:
self._create_in_cfs_ine('create')
except OSError as msg:
raise RTSLibError(msg)
try:
fwrite("%s/tg_pt_gp_id" % self._path, tag)
except IOError as msg:
self.delete()
raise RTSLibError("Cannot set id to %d: %s" % (tag, str(msg)))
else:
try:
self._create_in_cfs_ine('lookup')
except OSError as msg:
raise RTSLibError(msg)
# Public
def delete(self):
"""
Delete ALUA TPG and unmap from LUNs
"""
self._check_self()
# default_tg_pt_gp created by the kernel and cannot be deleted
if self.name == "default_tg_pt_gp":
raise RTSLibError("Can not delete default_tg_pt_gp")
# This will reset the ALUA tpg to default_tg_pt_gp
super(ALUATargetPortGroup, self).delete()
def _get_alua_access_state(self):
self._check_self()
path = "%s/alua_access_state" % self.path
return int(fread(path))
def _set_alua_access_state(self, newstate):
self._check_self()
path = "%s/alua_access_state" % self.path
try:
fwrite(path, str(int(newstate)))
except IOError as e:
raise RTSLibError("Cannot change ALUA state: %s" % e)
def _get_alua_access_status(self):
self._check_self()
path = "%s/alua_access_status" % self.path
status = fread(path)
return alua_statuses.index(status)
def _set_alua_access_status(self, newstatus):
self._check_self()
path = "%s/alua_access_status" % self.path
try:
fwrite(path, str(int(newstatus)))
except IOError as e:
raise RTSLibError("Cannot change ALUA status: %s" % e)
def _get_alua_access_type(self):
self._check_self()
path = "%s/alua_access_type" % self.path
alua_type = fread(path)
return alua_types.index(alua_type)
def _set_alua_access_type(self, access_type):
self._check_self()
path = "%s/alua_access_type" % self.path
try:
fwrite(path, str(int(access_type)))
except IOError as e:
raise RTSLibError("Cannot change ALUA access type: %s" % e)
def _get_preferred(self):
self._check_self()
path = "%s/preferred" % self.path
return int(fread(path))
def _set_preferred(self, pref):
self._check_self()
path = "%s/preferred" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set preferred: %s" % e)
def _get_alua_write_metadata(self):
self._check_self()
path = "%s/alua_write_metadata" % self.path
return int(fread(path))
def _set_alua_write_metadata(self, pref):
self._check_self()
path = "%s/alua_write_metadata" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set alua_write_metadata: %s" % e)
def _get_alua_support_active_nonoptimized(self):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
return int(fread(path))
def _set_alua_support_active_nonoptimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_nonoptimized: %s" % e)
def _get_alua_support_active_optimized(self):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
return int(fread(path))
def _set_alua_support_active_optimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_optimized: %s" % e)
def _get_alua_support_offline(self):
self._check_self()
path = "%s/alua_support_offline" % self.path
return int(fread(path))
def _set_alua_support_offline(self, enabled):
self._check_self()
path = "%s/alua_support_offline" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_offline: %s" % e)
def _get_alua_support_unavailable(self):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
return int(fread(path))
def _set_alua_support_unavailable(self, enabled):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_unavailable: %s" % e)
def _get_alua_support_standby(self):
self._check_self()
path = "%s/alua_support_standby" % self.path
return int(fread(path))
def _set_alua_support_standby(self, enabled):
self._check_self()
path = "%s/alua_support_standby" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_standby: %s" % e)
def _get_alua_support_transitioning(self):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
return int(fread(path))
def _set_alua_support_transitioning(self, enabled):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_transitioning: %s" % e)
def _get_alua_support_lba_dependent(self):
self._check_self()
path = "%s/alua_support_lba_dependent" % self.path
return int(fread(path))
def _get_members(self):
self._check_self()
path = "%s/members" % self.path
member_list = []
for member in fread(path).splitlines():
lun_path = member.split("/")
if len(lun_path) != 4:
continue
member_list.append({ 'driver': lun_path[0], 'target': lun_path[1],
'tpgt': int(lun_path[2].split("_", 1)[1]),
'lun': int(lun_path[3].split("_", 1)[1]) })
return member_list
def _get_tg_pt_gp_id(self):
self._check_self()
path = "%s/tg_pt_gp_id" % self.path
return int(fread(path))
def _get_trans_delay_msecs(self):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
return int(fread(path))
def _set_trans_delay_msecs(self, secs):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set trans_delay_msecs: %s" % e)
def _get_implicit_trans_secs(self):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
return int(fread(path))
def _set_implicit_trans_secs(self, secs):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set implicit_trans_secs: %s" % e)
def _get_nonop_delay_msecs(self):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
return int(fread(path))
def _set_nonop_delay_msecs(self, delay):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
try:
fwrite(path, str(int(delay)))
except IOError as e:
raise RTSLibError("Cannot set nonop_delay_msecs: %s" % e)
def dump(self):
d = super(ALUATargetPortGroup, self).dump()
d['name'] = self.name
d['tg_pt_gp_id'] = self.tg_pt_gp_id
for param in alua_rw_params:
d[param] = getattr(self, param, None)
return d
alua_access_state = property(_get_alua_access_state, _set_alua_access_state,
doc="Get or set ALUA state. "
"0 = Active/optimized, "
"1 = Active/non-optimized, "
"2 = Standby, "
"3 = Unavailable, "
"4 = LBA Dependent, "
"14 = Offline, "
"15 = Transitioning")
alua_access_type = property(_get_alua_access_type, _set_alua_access_type,
doc="Get or set ALUA access type. "
"1 = Implicit, 2 = Explicit, 3 = Both")
alua_access_status = property(_get_alua_access_status,
_set_alua_access_status,
doc="Get or set ALUA access status. "
"0 = None, "
"1 = Altered by Explicit STPG, "
"2 = Altered by Implicit ALUA")
preferred = property(_get_preferred, _set_preferred,
doc="Get or set preferred bit. 1 = Pref, 0 Not-Pre")
alua_write_metadata = property(_get_alua_write_metadata,
_set_alua_write_metadata,
doc="Get or set alua_write_metadata flag. "
"enable (1) or disable (0)")
tg_pt_gp_id = property(_get_tg_pt_gp_id, doc="Get ALUA Target Port Group ID")
members = property(_get_members, doc="Get LUNs in Target Port Group")
alua_support_active_nonoptimized = property(_get_alua_support_active_nonoptimized,
_set_alua_support_active_nonoptimized,
doc="Enable (1) or disable (0) "
"Active/non-optimized support")
alua_support_active_optimized = property(_get_alua_support_active_optimized,
_set_alua_support_active_optimized,
doc="Enable (1) or disable (0) "
"Active/optimized support")
alua_support_offline = property(_get_alua_support_offline,
_set_alua_support_offline,
doc="Enable (1) or disable (0) "
"offline support")
alua_support_unavailable = property(_get_alua_support_unavailable,
_set_alua_support_unavailable,
doc="enable (1) or disable (0) "
"unavailable support")
alua_support_standby = property(_get_alua_support_standby,
_set_alua_support_standby,
doc="enable (1) or disable (0) "
"standby support")
alua_support_lba_dependent = property(_get_alua_support_lba_dependent,
doc="show lba_dependent support "
"enabled (1) or disabled (0)")
alua_support_transitioning = property(_get_alua_support_transitioning,
_set_alua_support_transitioning,
doc="enable (1) or disable (0) "
"transitioning support")
trans_delay_msecs = property(_get_trans_delay_msecs,
_set_trans_delay_msecs,
doc="msecs to delay state transition")
implicit_trans_secs = property(_get_implicit_trans_secs,
_set_implicit_trans_secs,
doc="implicit transition time limit")
nonop_delay_msecs = property(_get_nonop_delay_msecs, _set_nonop_delay_msecs,
doc="msecs to delay IO when non-optimized")
@classmethod
def setup(cls, storage_obj, alua_tpg, err_func):
name = alua_tpg['name']
if name == 'default_tg_pt_gp':
return
alua_tpg_obj = cls(storage_obj, name, alua_tpg['tg_pt_gp_id'])
for param, value in six.iteritems(alua_tpg):
if param != 'name' and param != 'tg_pt_gp_id':
try:
setattr(alua_tpg_obj, param, value)
except:
raise RTSLibError("Could not set attribute '%s' for alua tpg '%s'"
% (param, alua_tpg['name']))
|
from . import submission
from .submission.run_context import RunContext
from .submission.submit import SubmitTarget
from .submission.submit import PathType
from .submission.submit import SubmitConfig
from .submission.submit import submit_run
from .submission.submit import get_path_from_template
from .submission.submit import convert_path
from .submission.submit import make_run_dir_path
from .util import EasyDict
submit_config: SubmitConfig = None # Package level variable for SubmitConfig which is only valid when inside the run function.
|
"""A soccer ball that keeps track of ball-player contacts."""
import os
from dm_control import mjcf
from dm_control.entities import props
import numpy as np
from dm_control.utils import io as resources
_ASSETS_PATH = os.path.join(os.path.dirname(__file__), 'assets', 'soccer_ball')
_REGULATION_RADIUS = 0.117 # Meters.
_REGULATION_MASS = 0.45 # Kilograms.
_DEFAULT_FRICTION = (0.7, 0.05, 0.04) # (slide, spin, roll).
_DEFAULT_DAMP_RATIO = 0.4
def _get_texture(name):
contents = resources.GetResource(
os.path.join(_ASSETS_PATH, '{}.png'.format(name)))
return mjcf.Asset(contents, '.png')
def regulation_soccer_ball():
return SoccerBall(
radius=_REGULATION_RADIUS,
mass=_REGULATION_MASS,
friction=_DEFAULT_FRICTION,
damp_ratio=_DEFAULT_DAMP_RATIO)
class SoccerBall(props.Primitive):
"""A soccer ball that keeps track of entities that come into contact."""
def _build(self,
radius=0.35,
mass=0.045,
friction=(0.7, 0.075, 0.075),
damp_ratio=1.0,
name='soccer_ball'):
"""Builds this soccer ball.
Args:
radius: The radius (in meters) of this target sphere.
mass: Mass (in kilograms) of the ball.
friction: Friction parameters of the ball geom with the three dimensions
corresponding to (slide, spin, roll) frictions.
damp_ratio: A real positive number. Lower implies less dampening upon
contacts.
name: The name of this entity.
"""
super()._build(geom_type='sphere', size=(radius,), name=name)
texture = self._mjcf_root.asset.add(
'texture',
name='soccer_ball',
type='cube',
fileup=_get_texture('up'),
filedown=_get_texture('down'),
filefront=_get_texture('front'),
fileback=_get_texture('back'),
fileleft=_get_texture('left'),
fileright=_get_texture('right'))
material = self._mjcf_root.asset.add(
'material', name='soccer_ball', texture=texture)
if damp_ratio < 0.0:
raise ValueError(
f'Invalid `damp_ratio` parameter ({damp_ratio} is not positive).')
self._geom.set_attributes(
pos=[0, 0, radius],
size=[radius],
condim=6,
priority=1,
mass=mass,
friction=friction,
solref=[0.02, damp_ratio],
material=material)
# Add some tracking cameras for visualization and logging.
self._mjcf_root.worldbody.add(
'camera',
name='ball_cam_near',
pos=[0, -2, 2],
zaxis=[0, -1, 1],
fovy=70,
mode='trackcom')
self._mjcf_root.worldbody.add(
'camera',
name='ball_cam',
pos=[0, -7, 7],
zaxis=[0, -1, 1],
fovy=70,
mode='trackcom')
self._mjcf_root.worldbody.add(
'camera',
name='ball_cam_far',
pos=[0, -10, 10],
zaxis=[0, -1, 1],
fovy=70,
mode='trackcom')
# Keep track of entities to team mapping.
self._players = []
# Initialize tracker attributes.
self.initialize_entity_trackers()
def register_player(self, player):
self._players.append(player)
def initialize_entity_trackers(self):
self._last_hit = None
self._hit = False
self._repossessed = False
self._intercepted = False
# Tracks distance traveled by the ball in between consecutive hits.
self._pos_at_last_step = None
self._dist_since_last_hit = None
self._dist_between_last_hits = None
def initialize_episode(self, physics, unused_random_state):
self._geom_id = physics.model.name2id(self._geom.full_identifier, 'geom')
self._geom_id_to_player = {}
for player in self._players:
geoms = player.walker.mjcf_model.find_all('geom')
for geom in geoms:
geom_id = physics.model.name2id(geom.full_identifier, 'geom')
self._geom_id_to_player[geom_id] = player
self.initialize_entity_trackers()
def after_substep(self, physics, unused_random_state):
"""Resolve contacts and update ball-player contact trackers."""
if self._hit:
# Ball has already registered a valid contact within step (during one of
# previous after_substep calls).
return
# Iterate through all contacts to find the first contact between the ball
# and one of the registered entities.
for contact in physics.data.contact:
# Keep contacts that involve the ball and one of the registered entities.
has_self = False
for geom_id in (contact.geom1, contact.geom2):
if geom_id == self._geom_id:
has_self = True
else:
player = self._geom_id_to_player.get(geom_id)
if has_self and player:
# Detected a contact between the ball and an registered player.
if self._last_hit is not None:
self._intercepted = player.team != self._last_hit.team
else:
self._intercepted = True
# Register repossessed before updating last_hit player.
self._repossessed = player is not self._last_hit
self._last_hit = player
# Register hit event.
self._hit = True
break
def before_step(self, physics, random_state):
super().before_step(physics, random_state)
# Reset per simulation step indicator.
self._hit = False
self._repossessed = False
self._intercepted = False
def after_step(self, physics, random_state):
super().after_step(physics, random_state)
pos = physics.bind(self._geom).xpos
if self._hit:
# SoccerBall is hit on this step. Update dist_between_last_hits
# to dist_since_last_hit before resetting dist_since_last_hit.
self._dist_between_last_hits = self._dist_since_last_hit
self._dist_since_last_hit = 0.
self._pos_at_last_step = pos.copy()
if self._dist_since_last_hit is not None:
# Accumulate distance traveled since last hit event.
self._dist_since_last_hit += np.linalg.norm(pos - self._pos_at_last_step)
self._pos_at_last_step = pos.copy()
@property
def last_hit(self):
"""The player that last came in contact with the ball or `None`."""
return self._last_hit
@property
def hit(self):
"""Indicates if the ball is hit during the last simulation step.
For a timeline shown below:
..., agent.step, simulation, agent.step, ...
Returns:
True: if the ball is hit by a registered player during simulation step.
False: if not.
"""
return self._hit
@property
def repossessed(self):
"""Indicates if the ball has been repossessed by a different player.
For a timeline shown below:
..., agent.step, simulation, agent.step, ...
Returns:
True if the ball is hit by a registered player during simulation step
and that player is different from `last_hit`.
False: if the ball is not hit, or the ball is hit by `last_hit` player.
"""
return self._repossessed
@property
def intercepted(self):
"""Indicates if the ball has been intercepted by a different team.
For a timeline shown below:
..., agent.step, simulation, agent.step, ...
Returns:
True: if the ball is hit for the first time, or repossessed by an player
from a different team.
False: if the ball is not hit, not repossessed, or repossessed by a
teammate to `last_hit`.
"""
return self._intercepted
@property
def dist_between_last_hits(self):
"""Distance between last consecutive hits.
Returns:
Distance between last two consecutive hit events or `None` if there has
not been two consecutive hits on the ball.
"""
return self._dist_between_last_hits
|
"""
Track Control User Modes component originally designed for use with the APC40.
Copyright (C) 2010 Hanz Petrov <hanz.petrov@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import Live
from _Framework.ModeSelectorComponent import ModeSelectorComponent
from _Framework.ButtonElement import ButtonElement
from _Framework.DeviceComponent import DeviceComponent
class EncoderUserModesComponent(ModeSelectorComponent):
' SelectorComponent that assigns encoders to different user functions '
__module__ = __name__
def __init__(self, parent, encoder_modes, param_controls, bank_buttons, mixer, device, encoder_device_modes, encoder_eq_modes): #, mixer, sliders):
assert (len(bank_buttons) == 4)
ModeSelectorComponent.__init__(self)
self._parent = parent
self._encoder_modes = encoder_modes
self._param_controls = param_controls
self._bank_buttons = bank_buttons
self._mixer = mixer
self._device = device
self._encoder_device_modes = encoder_device_modes
self._encoder_eq_modes = encoder_eq_modes
self._mode_index = 0
self._modes_buttons = []
self._user_buttons = []
self._last_mode = 0
def disconnect(self):
ModeSelectorComponent.disconnect(self)
self._parent = None
self._encoder_modes = None
self._param_controls = None
self._bank_buttons = None
self._mixer = None
self._device = None
self._encoder_device_modes = None
self._encoder_eq_modes = None
self._modes_buttons = None
self._user_buttons = None
def on_enabled_changed(self):
pass
def set_mode(self, mode):
assert isinstance(mode, int)
assert (mode in range(self.number_of_modes()))
if (self._mode_index != mode):
self._last_mode = self._mode_index # keep track of previous mode, to allow conditional actions
self._mode_index = mode
self._set_modes()
def set_mode_buttons(self, buttons):
assert isinstance(buttons, (tuple,
type(None)))
for button in self._modes_buttons:
button.remove_value_listener(self._mode_value)
self._modes_buttons = []
if (buttons != None):
for button in buttons:
assert isinstance(button, ButtonElement)
identify_sender = True
button.add_value_listener(self._mode_value, identify_sender)
self._modes_buttons.append(button)
assert (self._mode_index in range(self.number_of_modes()))
def number_of_modes(self):
return 4
def update(self):
pass
def _mode_value(self, value, sender):
assert (len(self._modes_buttons) > 0)
assert isinstance(value, int)
assert isinstance(sender, ButtonElement)
assert (self._modes_buttons.count(sender) == 1)
if ((value is not 0) or (not sender.is_momentary())):
self.set_mode(self._modes_buttons.index(sender))
def _set_modes(self):
if self.is_enabled():
assert (self._mode_index in range(self.number_of_modes()))
for index in range(len(self._modes_buttons)):
if (index <= self._mode_index):
self._modes_buttons[index].turn_on()
else:
self._modes_buttons[index].turn_off()
for button in self._modes_buttons:
button.release_parameter()
button.use_default_message()
for control in self._param_controls:
control.release_parameter()
control.use_default_message()
#control.set_needs_takeover(False)
self._encoder_modes.set_enabled(False)
self._encoder_device_modes.set_lock_button(None)
self._encoder_device_modes._alt_device.set_bank_nav_buttons(None, None)
self._encoder_device_modes._alt_device.set_on_off_button(None)
if self._encoder_device_modes._alt_device._parameter_controls != None:
for control in self._encoder_device_modes._alt_device._parameter_controls:
control.release_parameter()
self._encoder_device_modes.set_enabled(False)
self._encoder_eq_modes.set_enabled(False)
self._encoder_eq_modes.set_lock_button(None)
if self._encoder_eq_modes._track_eq != None:
self._encoder_eq_modes._track_eq.set_cut_buttons(None)
if self._encoder_eq_modes._track_eq._gain_controls != None:
for control in self._encoder_eq_modes._track_eq._gain_controls:
control.release_parameter()
if self._encoder_eq_modes._strip != None:
self._encoder_eq_modes._strip.set_send_controls(None)
self._user_buttons = []
if (self._mode_index == 0):
self._encoder_modes.set_enabled(True)
elif (self._mode_index == 1):
self._encoder_device_modes.set_enabled(True)
self._encoder_device_modes.set_controls_and_buttons(self._param_controls, self._modes_buttons)
elif (self._mode_index == 2):
self._encoder_eq_modes.set_enabled(True)
self._encoder_eq_modes.set_controls_and_buttons(self._param_controls, self._modes_buttons)
elif (self._mode_index == 3):
self._encoder_eq_modes._ignore_buttons = True
if self._encoder_eq_modes._track_eq != None:
self._encoder_eq_modes._track_eq._ignore_cut_buttons = True
self._encoder_device_modes._ignore_buttons = True
for button in self._modes_buttons:
self._user_buttons.append(button)
for control in self._param_controls:
control.set_identifier((control.message_identifier() - 9))
control._ring_mode_button.send_value(0)
else:
pass
#self._rebuild_callback()
|
"""Tests for the Jaco arm class."""
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control import mjcf
from dm_control.entities.manipulators import kinova
from dm_control.entities.manipulators.kinova import jaco_arm
from dm_control.mujoco.wrapper import mjbindings
import numpy as np
mjlib = mjbindings.mjlib
class JacoArmTest(parameterized.TestCase):
def test_can_compile_and_step_model(self):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.step()
def test_can_attach_hand(self):
arm = kinova.JacoArm()
hand = kinova.JacoHand()
arm.attach(hand)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.step()
# TODO(b/159974149): Investigate why the mass does not match the datasheet.
@unittest.expectedFailure
def test_mass(self):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
mass = physics.bind(arm.mjcf_model.worldbody).subtreemass
expected_mass = 4.4
self.assertAlmostEqual(mass, expected_mass)
@parameterized.parameters([
dict(actuator_index=0,
control_input=0,
expected_velocity=0.),
dict(actuator_index=0,
control_input=jaco_arm._LARGE_JOINT_MAX_VELOCITY,
expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY),
dict(actuator_index=4,
control_input=jaco_arm._SMALL_JOINT_MAX_VELOCITY,
expected_velocity=jaco_arm._SMALL_JOINT_MAX_VELOCITY),
dict(actuator_index=0,
control_input=-jaco_arm._LARGE_JOINT_MAX_VELOCITY,
expected_velocity=-jaco_arm._LARGE_JOINT_MAX_VELOCITY),
dict(actuator_index=0,
control_input=2*jaco_arm._LARGE_JOINT_MAX_VELOCITY, # Test clipping
expected_velocity=jaco_arm._LARGE_JOINT_MAX_VELOCITY),
])
def test_velocity_actuation(
self, actuator_index, control_input, expected_velocity):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
actuator = arm.actuators[actuator_index]
bound_actuator = physics.bind(actuator)
bound_joint = physics.bind(actuator.joint)
acceleration_threshold = 1e-6
with physics.model.disable('contact', 'gravity'):
bound_actuator.ctrl = control_input
# Step until the joint has stopped accelerating.
while abs(bound_joint.qacc) > acceleration_threshold:
physics.step()
self.assertAlmostEqual(bound_joint.qvel[0], expected_velocity, delta=0.01)
@parameterized.parameters([
dict(joint_index=0, min_expected_torque=1.7, max_expected_torque=5.2),
dict(joint_index=5, min_expected_torque=0.8, max_expected_torque=7.0)])
def test_backdriving_torque(
self, joint_index, min_expected_torque, max_expected_torque):
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
bound_joint = physics.bind(arm.joints[joint_index])
torque = min_expected_torque * 0.8
velocity_threshold = 0.1*2*np.pi/60. # 0.1 RPM
torque_increment = 0.01
seconds_per_torque_increment = 1.
max_torque = max_expected_torque * 1.1
while torque < max_torque:
# Ensure that no other forces are acting on the arm.
with physics.model.disable('gravity', 'contact', 'actuation'):
# Reset the simulation so that the initial velocity is zero.
physics.reset()
bound_joint.qfrc_applied = torque
while physics.time() < seconds_per_torque_increment:
physics.step()
if bound_joint.qvel[0] >= velocity_threshold:
self.assertBetween(torque, min_expected_torque, max_expected_torque)
return
# If we failed to accelerate the joint to the target velocity within the
# time limit we'll reset the simulation and increase the torque.
torque += torque_increment
self.fail('Torque of {} Nm insufficient to backdrive joint.'.format(torque))
@parameterized.parameters([
dict(joint_pos=0., expected_obs=[0., 1.]),
dict(joint_pos=-0.5*np.pi, expected_obs=[-1., 0.]),
dict(joint_pos=np.pi, expected_obs=[0., -1.]),
dict(joint_pos=10*np.pi, expected_obs=[0., 1.])])
def test_joints_pos_observables(self, joint_pos, expected_obs):
joint_index = 0
arm = kinova.JacoArm()
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
physics.bind(arm.joints).qpos[joint_index] = joint_pos
actual_obs = arm.observables.joints_pos(physics)[joint_index]
np.testing.assert_array_almost_equal(expected_obs, actual_obs)
@parameterized.parameters(
dict(joint_index=idx, applied_torque=t)
for idx, t in itertools.product([0, 2, 4], [0., -6.8, 30.5]))
def test_joints_torque_observables(self, joint_index, applied_torque):
arm = kinova.JacoArm()
joint = arm.joints[joint_index]
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
with physics.model.disable('gravity', 'limit', 'contact', 'actuation'):
# Apply a cartesian torque to the body containing the joint. We use
# `xfrc_applied` rather than `qfrc_applied` because forces in
# `qfrc_applied` are not measured by the torque sensor).
physics.bind(joint.parent).xfrc_applied[3:] = (
applied_torque * physics.bind(joint).xaxis)
observed_torque = arm.observables.joints_torque(physics)[joint_index]
# Note the change in sign, since the sensor measures torques in the
# child->parent direction.
self.assertAlmostEqual(observed_torque, -applied_torque, delta=0.1)
class JacoHandTest(parameterized.TestCase):
def test_can_compile_and_step_model(self):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
physics.step()
# TODO(b/159974149): Investigate why the mass does not match the datasheet.
@unittest.expectedFailure
def test_hand_mass(self):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
mass = physics.bind(hand.mjcf_model.worldbody).subtreemass
expected_mass = 0.727
self.assertAlmostEqual(mass, expected_mass)
def test_grip_force(self):
arena = composer.Arena()
hand = kinova.JacoHand()
arena.attach(hand)
# A sphere with a touch sensor for measuring grip force.
prop_model = mjcf.RootElement(model='grip_target')
prop_model.worldbody.add('geom', type='sphere', size=[0.02])
touch_site = prop_model.worldbody.add('site', type='sphere', size=[0.025])
touch_sensor = prop_model.sensor.add('touch', site=touch_site)
prop = composer.ModelWrapperEntity(prop_model)
# Add some slide joints to allow movement of the target in the XY plane.
# This helps the contact solver to converge more reliably.
prop_frame = arena.attach(prop)
prop_frame.add('joint', name='slide_x', type='slide', axis=(1, 0, 0))
prop_frame.add('joint', name='slide_y', type='slide', axis=(0, 1, 0))
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
bound_pinch_site = physics.bind(hand.pinch_site)
bound_actuators = physics.bind(hand.actuators)
bound_joints = physics.bind(hand.joints)
bound_touch = physics.bind(touch_sensor)
# Position the grip target at the pinch site.
prop.set_pose(physics, position=bound_pinch_site.xpos)
# Close the fingers with as much force as the actuators will allow.
bound_actuators.ctrl = bound_actuators.ctrlrange[:, 1]
# Run the simulation forward until the joints stop moving.
physics.step()
qvel_thresh = 1e-3 # radians / s
while max(abs(bound_joints.qvel)) > qvel_thresh:
physics.step()
expected_min_grip_force = 20.
expected_max_grip_force = 30.
grip_force = bound_touch.sensordata
self.assertBetween(
grip_force, expected_min_grip_force, expected_max_grip_force,
msg='Expected grip force to be between {} and {} N, got {} N.'.format(
expected_min_grip_force, expected_max_grip_force, grip_force))
@parameterized.parameters([dict(opening=True), dict(opening=False)])
def test_finger_travel_time(self, opening):
hand = kinova.JacoHand()
physics = mjcf.Physics.from_mjcf_model(hand.mjcf_model)
bound_actuators = physics.bind(hand.actuators)
bound_joints = physics.bind(hand.joints)
min_ctrl, max_ctrl = bound_actuators.ctrlrange.T
min_qpos, max_qpos = bound_joints.range.T
# Measure the time taken for the finger joints to traverse 99.9% of their
# total range.
qpos_tol = 1e-3 * (max_qpos - min_qpos)
if opening:
hand.set_grasp(physics=physics, close_factors=1.) # Fully closed.
np.testing.assert_array_almost_equal(bound_joints.qpos, max_qpos)
target_pos = min_qpos # Fully open.
ctrl = min_ctrl # Open the fingers as fast as the actuators will allow.
else:
hand.set_grasp(physics=physics, close_factors=0.) # Fully open.
np.testing.assert_array_almost_equal(bound_joints.qpos, min_qpos)
target_pos = max_qpos # Fully closed.
ctrl = max_ctrl # Close the fingers as fast as the actuators will allow.
# Run the simulation until all joints have reached their target positions.
bound_actuators.ctrl = ctrl
while np.any(abs(bound_joints.qpos - target_pos) > qpos_tol):
with physics.model.disable('gravity'):
physics.step()
expected_travel_time = 1.2 # Seconds.
self.assertAlmostEqual(physics.time(), expected_travel_time, delta=0.1)
@parameterized.parameters([
dict(pos=np.r_[0., 0., 0.3], quat=np.r_[0., 1., 0., 1.]),
dict(pos=np.r_[0., -0.1, 0.5], quat=np.r_[1., 1., 0., 0.]),
])
def test_pinch_site_observables(self, pos, quat):
arm = kinova.JacoArm()
hand = kinova.JacoHand()
arena = composer.Arena()
arm.attach(hand)
arena.attach(arm)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
# Normalize the quaternion.
quat /= np.linalg.norm(quat)
# Drive the arm so that the pinch site is at the desired position and
# orientation.
success = arm.set_site_to_xpos(
physics=physics,
random_state=np.random.RandomState(0),
site=hand.pinch_site,
target_pos=pos,
target_quat=quat)
self.assertTrue(success)
# Check that the observations are as expected.
observed_pos = hand.observables.pinch_site_pos(physics)
np.testing.assert_allclose(observed_pos, pos, atol=1e-3)
observed_rmat = hand.observables.pinch_site_rmat(physics).reshape(3, 3)
expected_rmat = np.empty((3, 3), np.double)
mjlib.mju_quat2Mat(expected_rmat.ravel(), quat)
difference_rmat = observed_rmat.dot(expected_rmat.T)
# `difference_rmat` might not be perfectly orthonormal, which could lead to
# an invalid value being passed to arccos.
u, _, vt = np.linalg.svd(difference_rmat, full_matrices=False)
ortho_difference_rmat = u.dot(vt)
angular_difference = np.arccos((np.trace(ortho_difference_rmat) - 1) / 2)
self.assertLess(angular_difference, 1e-3)
if __name__ == '__main__':
absltest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from ddp.messages.client import MethodMessage
from ddp.messages.client import MethodMessageParser
class MethodMessageParserTestCase(unittest.TestCase):
def setUp(self):
self.parser = MethodMessageParser()
def test_parse(self):
id = 'id'
method = 'method'
params = [True, 1.0]
message = self.parser.parse({'msg': 'method', 'id': id,
'method': method, 'params': params})
self.assertEqual(message, MethodMessage(id, method, params))
|
"""The tests for the Tasmota binary sensor platform."""
import copy
from datetime import timedelta
import json
from unittest.mock import patch
from hatasmota.utils import (
get_topic_stat_result,
get_topic_stat_status,
get_topic_tele_sensor,
get_topic_tele_will,
)
from homeassistant.components import binary_sensor
from homeassistant.components.tasmota.const import DEFAULT_PREFIX
from homeassistant.const import (
ATTR_ASSUMED_STATE,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
import homeassistant.core as ha
import homeassistant.util.dt as dt_util
from .test_common import (
DEFAULT_CONFIG,
help_test_availability,
help_test_availability_discovery_update,
help_test_availability_poll_state,
help_test_availability_when_connection_lost,
help_test_discovery_device_remove,
help_test_discovery_removal,
help_test_discovery_update_unchanged,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
)
from tests.common import async_fire_mqtt_message, async_fire_time_changed
async def test_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"OFF"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test force update flag
entity = hass.data["entity_components"]["binary_sensor"].get_entity(
"binary_sensor.tasmota_binary_sensor_1"
)
assert entity.force_update
async def test_controlling_state_via_mqtt_switchname(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Custom Name"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Custom Name":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test periodic state update
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"ON"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Custom Name":"OFF"}')
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
# Test polled state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"ON"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Custom Name":"OFF"}}'
)
state = hass.states.get("binary_sensor.custom_name")
assert state.state == STATE_OFF
async def test_pushon_controlling_state_via_mqtt(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# Test normal state update
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"OFF"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test periodic state update is ignored
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/SENSOR", '{"Switch1":"ON"}')
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
# Test polled state update is ignored
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/STATUS10", '{"StatusSNS":{"Switch1":"ON"}}'
)
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
async def test_friendly_names(hass, mqtt_mock, setup_tasmota):
"""Test state update via MQTT."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swc"][1] = 1
config["swn"][1] = "Beer"
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Tasmota binary_sensor 1"
state = hass.states.get("binary_sensor.beer")
assert state.state == "unavailable"
assert state.attributes.get("friendly_name") == "Beer"
async def test_off_delay(hass, mqtt_mock, setup_tasmota):
"""Test off_delay option."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 13 # PUSHON: 1s off_delay
mac = config["mac"]
async_fire_mqtt_message(
hass,
f"{DEFAULT_PREFIX}/{mac}/config",
json.dumps(config),
)
await hass.async_block_till_done()
events = []
@ha.callback
def callback(event):
"""Verify event got called."""
events.append(event.data["new_state"].state)
hass.bus.async_listen(EVENT_STATE_CHANGED, callback)
async_fire_mqtt_message(hass, "tasmota_49A3BC/tele/LWT", "Online")
await hass.async_block_till_done()
assert events == ["off"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on"]
async_fire_mqtt_message(
hass, "tasmota_49A3BC/stat/RESULT", '{"Switch1":{"Action":"ON"}}'
)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_ON
assert events == ["off", "on", "on"]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1))
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.tasmota_binary_sensor_1")
assert state.state == STATE_OFF
assert events == ["off", "on", "on", "off"]
async def test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test availability after MQTT disconnection."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_when_connection_lost(
hass, mqtt_client_mock, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability(hass, mqtt_mock, setup_tasmota):
"""Test availability."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability(hass, mqtt_mock, binary_sensor.DOMAIN, config)
async def test_availability_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test availability discovery update."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_availability_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
async def test_availability_poll_state(
hass, mqtt_client_mock, mqtt_mock, setup_tasmota
):
"""Test polling after MQTT connection (re)established."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
poll_topic = "tasmota_49A3BC/cmnd/STATUS"
await help_test_availability_poll_state(
hass,
mqtt_client_mock,
mqtt_mock,
binary_sensor.DOMAIN,
config,
poll_topic,
"10",
)
async def test_discovery_removal_binary_sensor(hass, mqtt_mock, caplog, setup_tasmota):
"""Test removal of discovered binary_sensor."""
config1 = copy.deepcopy(DEFAULT_CONFIG)
config2 = copy.deepcopy(DEFAULT_CONFIG)
config1["swc"][0] = 1
config2["swc"][0] = 0
config1["swn"][0] = "Test"
config2["swn"][0] = "Test"
await help_test_discovery_removal(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config1, config2
)
async def test_discovery_update_unchanged_binary_sensor(
hass, mqtt_mock, caplog, setup_tasmota
):
"""Test update of discovered binary_sensor."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
with patch(
"homeassistant.components.tasmota.binary_sensor.TasmotaBinarySensor.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, binary_sensor.DOMAIN, config, discovery_update
)
async def test_discovery_device_remove(hass, mqtt_mock, setup_tasmota):
"""Test device registry remove."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
unique_id = f"{DEFAULT_CONFIG['mac']}_binary_sensor_switch_0"
await help_test_discovery_device_remove(
hass, mqtt_mock, binary_sensor.DOMAIN, unique_id, config
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock, setup_tasmota):
"""Test MQTT subscriptions are managed when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
topics = [
get_topic_stat_result(config),
get_topic_tele_sensor(config),
get_topic_stat_status(config, 10),
get_topic_tele_will(config),
]
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, binary_sensor.DOMAIN, config, topics
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock, setup_tasmota):
"""Test MQTT discovery update when entity_id is updated."""
config = copy.deepcopy(DEFAULT_CONFIG)
config["swc"][0] = 1
config["swn"][0] = "Test"
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, binary_sensor.DOMAIN, config
)
|
"""Code that creates simple startup projects."""
from pathlib import Path
from enum import Enum
import subprocess
import shutil
import sys
import os
import re
from glob import glob
from mesonbuild import mesonlib
from mesonbuild.environment import detect_ninja
from mesonbuild.templates.samplefactory import sameple_generator
import typing as T
if T.TYPE_CHECKING:
import argparse
'''
we currently have one meson template at this time.
'''
from mesonbuild.templates.mesontemplates import create_meson_build
FORTRAN_SUFFIXES = {'.f', '.for', '.F', '.f90', '.F90'}
LANG_SUFFIXES = {'.c', '.cc', '.cpp', '.cs', '.cu', '.d', '.m', '.mm', '.rs', '.java', '.vala'} | FORTRAN_SUFFIXES
LANG_SUPPORTED = {'c', 'cpp', 'cs', 'cuda', 'd', 'fortran', 'java', 'rust', 'objc', 'objcpp', 'vala'}
DEFAULT_PROJECT = 'executable'
DEFAULT_VERSION = '0.1'
class DEFAULT_TYPES(Enum):
EXE = 'executable'
LIB = 'library'
INFO_MESSAGE = '''Sample project created. To build it run the
following commands:
meson setup builddir
meson compile -C builddir
'''
def create_sample(options: 'argparse.Namespace') -> None:
'''
Based on what arguments are passed we check for a match in language
then check for project type and create new Meson samples project.
'''
sample_gen = sameple_generator(options)
if options.type == DEFAULT_TYPES['EXE'].value:
sample_gen.create_executable()
elif options.type == DEFAULT_TYPES['LIB'].value:
sample_gen.create_library()
else:
raise RuntimeError('Unreachable code')
print(INFO_MESSAGE)
def autodetect_options(options: 'argparse.Namespace', sample: bool = False) -> None:
'''
Here we autodetect options for args not passed in so don't have to
think about it.
'''
if not options.name:
options.name = Path().resolve().stem
if not re.match('[a-zA-Z_][a-zA-Z0-9]*', options.name) and sample:
raise SystemExit(f'Name of current directory "{options.name}" is not usable as a sample project name.\n'
'Specify a project name with --name.')
print(f'Using "{options.name}" (name of current directory) as project name.')
if not options.executable:
options.executable = options.name
print(f'Using "{options.executable}" (project name) as name of executable to build.')
if sample:
# The rest of the autodetection is not applicable to generating sample projects.
return
if not options.srcfiles:
srcfiles = []
for f in (f for f in Path().iterdir() if f.is_file()):
if f.suffix in LANG_SUFFIXES:
srcfiles.append(f)
if not srcfiles:
raise SystemExit('No recognizable source files found.\n'
'Run meson init in an empty directory to create a sample project.')
options.srcfiles = srcfiles
print("Detected source files: " + ' '.join(map(str, srcfiles)))
options.srcfiles = [Path(f) for f in options.srcfiles]
if not options.language:
for f in options.srcfiles:
if f.suffix == '.c':
options.language = 'c'
break
if f.suffix in ('.cc', '.cpp'):
options.language = 'cpp'
break
if f.suffix == '.cs':
options.language = 'cs'
break
if f.suffix == '.cu':
options.language = 'cuda'
break
if f.suffix == '.d':
options.language = 'd'
break
if f.suffix in FORTRAN_SUFFIXES:
options.language = 'fortran'
break
if f.suffix == '.rs':
options.language = 'rust'
break
if f.suffix == '.m':
options.language = 'objc'
break
if f.suffix == '.mm':
options.language = 'objcpp'
break
if f.suffix == '.java':
options.language = 'java'
break
if f.suffix == '.vala':
options.language = 'vala'
break
if not options.language:
raise SystemExit("Can't autodetect language, please specify it with -l.")
print("Detected language: " + options.language)
def add_arguments(parser: 'argparse.ArgumentParser') -> None:
'''
Here we add args for that the user can passed when making a new
Meson project.
'''
parser.add_argument("srcfiles", metavar="sourcefile", nargs="*", help="source files. default: all recognized files in current directory")
parser.add_argument('-C', dest='wd', action=mesonlib.RealPathAction,
help='directory to cd into before running')
parser.add_argument("-n", "--name", help="project name. default: name of current directory")
parser.add_argument("-e", "--executable", help="executable name. default: project name")
parser.add_argument("-d", "--deps", help="dependencies, comma-separated")
parser.add_argument("-l", "--language", choices=sorted(LANG_SUPPORTED), help="project language. default: autodetected based on source files")
parser.add_argument("-b", "--build", action='store_true', help="build after generation")
parser.add_argument("--builddir", default='build', help="directory for build")
parser.add_argument("-f", "--force", action="store_true", help="force overwrite of existing files and directories.")
parser.add_argument('--type', default=DEFAULT_PROJECT, choices=('executable', 'library'), help=f"project type. default: {DEFAULT_PROJECT} based project")
parser.add_argument('--version', default=DEFAULT_VERSION, help=f"project version. default: {DEFAULT_VERSION}")
def run(options: 'argparse.Namespace') -> int:
'''
Here we generate the new Meson sample project.
'''
if not Path(options.wd).exists():
sys.exit('Project source root directory not found. Run this command in source directory root.')
os.chdir(options.wd)
if not glob('*'):
autodetect_options(options, sample=True)
if not options.language:
print('Defaulting to generating a C language project.')
options.language = 'c'
create_sample(options)
else:
autodetect_options(options)
if Path('meson.build').is_file() and not options.force:
raise SystemExit('meson.build already exists. Use --force to overwrite.')
create_meson_build(options)
if options.build:
if Path(options.builddir).is_dir() and options.force:
print('Build directory already exists, deleting it.')
shutil.rmtree(options.builddir)
print('Building...')
cmd = mesonlib.get_meson_command() + [options.builddir]
ret = subprocess.run(cmd)
if ret.returncode:
raise SystemExit
cmd = detect_ninja() + ['-C', options.builddir]
ret = subprocess.run(cmd)
if ret.returncode:
raise SystemExit
return 0
|
From SimpleCV import Camera
cam = Camera()
while True:
# Get Image from camera
img = cam.getImage()
# Make image black and white
img = img.binarize()
# Draw the text "Hello World" on image
img.drawText("Hello World!")
# Show the image
img.show()
|
class RelationHistory(In.entity.Entity):
'''RelationHistory Entity class.
'''
def __init__(self, data = None, items = None, **args):
# default
self.relation_id = 0
self.action = ''
self.actor_entity_type = ''
self.actor_entity_id = 0
self.message = ''
super().__init__(data, items, **args)
@IN.register('RelationHistory', type = 'Entitier')
class RelationHistoryEntitier(In.entity.EntityEntitier):
'''Base RelationHistory Entitier'''
# RelationHistory needs entity insert/update/delete hooks
invoke_entity_hook = False
# load all is very heavy
entity_load_all = False
@IN.register('RelationHistory', type = 'Model')
class RelationHistoryModel(In.entity.EntityModel):
'''RelationHistory Model'''
@IN.hook
def entity_model():
return {
'RelationHistory' : { # entity name
'table' : { # table
'name' : 'relation_history',
'columns' : { # table columns / entity attributes
'id' : {},
'type' : {},
'created' : {},
'status' : {},
'nabar_id' : {},
'relation_id' : {
'type' : 'int', 'unsigned' : True, 'not null' : True,
'description' : 'RelationHistory Id',
},
'actor_entity_type' : {
'type' : 'varchar', 'length' : 32, 'not null' : True,
},
'actor_entity_id' : {
'type' : 'varchar', 'int' : 32, 'not null' : True, 'default' : 'nabar',
},
'message' : {
'type' : 'varchar', 'length' : 32, 'not null' : True,
},
},
'keys' : {
'primary' : 'id',
},
},
},
}
@IN.register('RelationHistory', type = 'Themer')
class RelationHistoryThemer(In.entity.EntityThemer):
'''RelationHistory themer'''
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ToIndexStore.basemodel_ptr'
db.delete_column(u'catalog_toindexstore', u'basemodel_ptr_id')
# Adding field 'ToIndexStore.id'
db.execute('ALTER TABLE "catalog_toindexstore" ADD COLUMN "id" SERIAL NOT NULL PRIMARY KEY')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'ToIndexStore.basemodel_ptr'
raise RuntimeError("Cannot reverse this migration. 'ToIndexStore.basemodel_ptr' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'ToIndexStore.basemodel_ptr'
db.add_column(u'catalog_toindexstore', u'basemodel_ptr',
self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalog.BaseModel'], unique=True, primary_key=True),
keep_default=False)
# Deleting field 'ToIndexStore.id'
db.delete_column(u'catalog_toindexstore', u'id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.abstractlike': {
'Meta': {'object_name': 'AbstractLike', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'liked_time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.abstracttop': {
'Meta': {'object_name': 'AbstractTop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'recorded_time': ('django.db.models.fields.DateTimeField', [], {})
},
'catalog.basemodel': {
'Meta': {'object_name': 'BaseModel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'})
},
'catalog.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop', '_ormbases': ['catalog.AbstractLike']},
u'abstractlike_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractLike']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.list': {
'Meta': {'object_name': 'List', '_ormbases': ['catalog.BaseModel']},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.location': {
'Meta': {'object_name': 'Location', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.note': {
'Meta': {'object_name': 'Note', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers', '_ormbases': ['catalog.AbstractTop']},
u'abstracttop_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.AbstractTop']", 'unique': 'True', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial', '_ormbases': ['catalog.BaseModel']},
u'basemodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.BaseModel']", 'unique': 'True', 'primary_key': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
|
import copy
import mock
from oslo_serialization import jsonutils
import webob
from nova.api.openstack.compute import image_metadata as image_metadata_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import image_fixtures
IMAGE_FIXTURES = image_fixtures.get_image_fixtures()
CHK_QUOTA_STR = 'nova.api.openstack.common.check_img_metadata_properties_quota'
def get_image_123():
return copy.deepcopy(IMAGE_FIXTURES)[0]
class ImageMetaDataTestV21(test.NoDBTestCase):
controller_class = image_metadata_v21.ImageMetadataController
invalid_request = exception.ValidationError
base_path = '/v2/%s/images/' % fakes.FAKE_PROJECT_ID
def setUp(self):
super(ImageMetaDataTestV21, self).setUp()
self.controller = self.controller_class()
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_index(self, get_all_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
res_dict = self.controller.index(req, '123')
expected = {'metadata': {'key1': 'value1'}}
self.assertEqual(res_dict, expected)
get_all_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show(self, get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
res_dict = self.controller.show(req, '123', 'key1')
self.assertIn('meta', res_dict)
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
get_mocked.assert_called_once_with(mock.ANY, '123')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_show_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key9')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '123', 'key9')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_show_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, '100', 'key9')
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_create(self, get_mocked, update_mocked, quota_mocked):
mock_result = copy.deepcopy(get_image_123())
mock_result['properties']['key7'] = 'value7'
update_mocked.return_value = mock_result
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'value1', # existing meta
'key7': 'value7' # new meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key1': 'value1', 'key7': 'value7'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_create_image_not_found(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'POST'
body = {"metadata": {"key7": "value7"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, '100', body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_all(self, get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update_all(req, '123', body=body)
get_mocked.assert_called_once_with(mock.ANY, '123')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key9': 'value9' # replace meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'metadata': {'key9': 'value9'}}
self.assertEqual(expected_output, res)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_all_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata')
req.method = 'PUT'
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item(self, _get_mocked, update_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
res = self.controller.update(req, '123', 'key1', body=body)
expected = copy.deepcopy(get_image_123())
expected['properties'] = {
'key1': 'zz' # changed meta
}
quota_mocked.assert_called_once_with(mock.ANY, expected["properties"])
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
expected_output = {'meta': {'key1': 'zz'}}
self.assertEqual(res, expected_output)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_update_item_image_not_found(self, _get_mocked, quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "zz"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, '100', 'key1',
body=body)
self.assertFalse(quota_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_bad_body(self, get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"key1": "zz"}
req.body = b''
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPBadRequest())
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get')
def test_update_item_too_many_keys(self, get_mocked, update_mocked,
_quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"foo": "bar"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'key1',
body=body)
self.assertFalse(get_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_update_item_body_uri_mismatch(self, _get_mocked, update_mocked,
quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, '123', 'bad',
body=body)
self.assertFalse(quota_mocked.called)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete(self, _get_mocked, update_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'DELETE'
res = self.controller.delete(req, '123', 'key1')
expected = copy.deepcopy(get_image_123())
expected['properties'] = {}
update_mocked.assert_called_once_with(mock.ANY, '123', expected,
data=None, purge_props=True)
self.assertIsNone(res)
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_delete_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '123', 'blah')
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotFound(image_id='100'))
def test_delete_image_not_found(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '100/metadata/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, '100', 'key1')
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_create(self, _get_mocked,
update_mocked, _quota_mocked):
body = {"metadata": {"foo": "bar"}}
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata')
req.method = 'POST'
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, '123', body=body)
self.assertFalse(update_mocked.called)
@mock.patch(CHK_QUOTA_STR,
side_effect=webob.exc.HTTPForbidden(explanation=''))
@mock.patch('nova.image.api.API.update')
@mock.patch('nova.image.api.API.get', return_value=get_image_123())
def test_too_many_metadata_items_on_put(self, _get_mocked,
update_mocked, _quota_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/blah')
req.method = 'PUT'
body = {"meta": {"blah": "blah", "blah1": "blah1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.invalid_request,
self.controller.update, req, '123', 'blah',
body=body)
self.assertFalse(update_mocked.called)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update(self, _get_mocked):
req = fakes.HTTPRequest.blank(self.base_path + '123/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update, req, '123', 'key1',
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_update_all(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'PUT'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.update_all, req, image_id,
body=body)
@mock.patch('nova.image.api.API.get',
side_effect=exception.ImageNotAuthorized(image_id='123'))
def test_image_not_authorized_create(self, _get_mocked):
image_id = 131
# see nova.tests.unit.api.openstack.fakes:_make_image_fixtures
req = fakes.HTTPRequest.blank(self.base_path +
'%s/metadata/key1' % image_id)
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.create, req, image_id,
body=body)
class ImageMetadataControllerV239(test.NoDBTestCase):
def setUp(self):
super(ImageMetadataControllerV239, self).setUp()
self.controller = image_metadata_v21.ImageMetadataController()
self.req = fakes.HTTPRequest.blank('', version='2.39')
def test_not_found_for_all_image_metadata_api(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req,
fakes.FAKE_UUID, 'id', {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update_all, self.req,
fakes.FAKE_UUID, {'metadata': {}})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
|
"""File system module."""
import posixpath
from functools import wraps as implements
import ibis.common.exceptions as com
from ibis.config import options
class HDFSError(com.IbisError):
"""HDFS Error class."""
pass
class HDFS:
"""Interface class to HDFS.
Interface class to HDFS for ibis that abstracts away (and protects
user/developer against) various 3rd party library API differences.
"""
def exists(self, path: str) -> bool:
"""Check if the file exists.
Parameters
----------
path : string
Returns
-------
bool
Raises
------
NotImplementedError
"""
raise NotImplementedError
def status(self, path: str) -> dict:
"""Check if the status of the path.
Parameters
----------
path : string
Returns
-------
status : dict
Raises
------
NotImplementedError
"""
raise NotImplementedError
def chmod(self, hdfs_path: str, permissions: str):
"""Change permissions of a file of directory.
Parameters
----------
hdfs_path : string
Directory or path
permissions : string
Octal permissions string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def chown(self, hdfs_path: str, owner: str = None, group: str = None):
"""Change owner (and/or group) of a file or directory.
Parameters
----------
hdfs_path : string
Directory or path
owner : string, optional
Name of owner
group : string, optional
Name of group
Raises
------
NotImplementedError
"""
raise NotImplementedError
def head(
self, hdfs_path: str, nbytes: int = 1024, offset: int = 0
) -> bytes:
"""Retrieve the requested number of bytes from a file.
Parameters
----------
hdfs_path : string
Absolute HDFS path
nbytes : int, default 1024 (1K)
Number of bytes to retrieve
offset : int, default 0
Number of bytes at beginning of file to skip before retrieving data
Returns
-------
head_data : bytes
Raises
------
NotImplementedError
"""
raise NotImplementedError
def get(
self, hdfs_path: str, local_path: str = '.', overwrite: bool = False
) -> str:
"""
Download remote file or directory to the local filesystem.
Parameters
----------
hdfs_path : string
local_path : string, default '.'
overwrite : bool, default False
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
Raises
------
NotImplementedError
"""
raise NotImplementedError
def put(
self,
hdfs_path: str,
resource,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
) -> str:
"""
Write file or directory to HDFS.
Parameters
----------
hdfs_path : string
Directory or path
resource : string or buffer-like
Relative or absolute path to local resource, or a file-like object
overwrite : boolean, default False
verbose : boolean, default ibis options.verbose
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
Raises
------
NotImplementedError
"""
raise NotImplementedError
def put_tarfile(
self,
hdfs_path: str,
local_path: str,
compression: str = 'gzip',
verbose: bool = None,
overwrite: bool = False,
):
"""
Write contents of tar archive to HDFS.
Write contents of tar archive to HDFS directly without having to
decompress it locally first.
Parameters
----------
hdfs_path : string
local_path : string
compression : {'gzip', 'bz2', None}
overwrite : boolean, default False
verbose : boolean, default None (global default)
Raises
------
ValueError
if given compression is none of the following: None, gzip or bz2.
"""
import tarfile
modes = {None: 'r', 'gzip': 'r:gz', 'bz2': 'r:bz2'}
if compression not in modes:
raise ValueError(f'Invalid compression type {compression}')
mode = modes[compression]
tf = tarfile.open(local_path, mode=mode)
for info in tf:
if not info.isfile():
continue
buf = tf.extractfile(info)
abspath = posixpath.join(hdfs_path, info.path)
self.put(abspath, buf, verbose=verbose, overwrite=overwrite)
def put_zipfile(self, hdfs_path: str, local_path: str):
"""Write contents of zipfile archive to HDFS.
Parameters
----------
hdfs_path : string
local_path : string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def write(
self,
hdfs_path: str,
buf,
overwrite: bool = False,
blocksize: int = None,
replication=None,
buffersize: int = None,
):
"""HDFS Write function.
Parameters
----------
hdfs_path : string
buf
overwrite : bool, defaul False
blocksize : int
replication
buffersize : int
Raises
------
NotImplementedError
"""
raise NotImplementedError
def mkdir(self, path: str):
"""Create new directory.
Parameters
----------
path : string
"""
pass
def ls(self, hdfs_path: str, status: bool = False) -> list:
"""Return contents of directory.
Parameters
----------
hdfs_path : string
status : bool
Returns
-------
list
Raises
------
NotImplementedError
"""
raise NotImplementedError
def size(self, hdfs_path: str) -> int:
"""Return total size of file or directory.
Parameters
----------
hdfs_path : basestring
Returns
-------
size : int
Raises
------
NotImplementedError
"""
raise NotImplementedError
def tail(self, hdfs_path: str, nbytes: int = 1024) -> bytes:
"""Retrieve the requested number of bytes from the end of a file.
Parameters
----------
hdfs_path : string
nbytes : int
Returns
-------
data_tail : bytes
Raises
------
NotImplementedError
"""
raise NotImplementedError
def mv(
self, hdfs_path_src: str, hdfs_path_dest: str, overwrite: bool = True
):
"""Move hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src: string
hdfs_path_dest: string
overwrite : boolean, default True
Overwrite hdfs_path_dest if it exists.
Raises
------
NotImplementedError
"""
raise NotImplementedError
def cp(self, hdfs_path_src: str, hdfs_path_dest: str):
"""Copy hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src : string
hdfs_path_dest : string
Raises
------
NotImplementedError
"""
raise NotImplementedError
def rm(self, path: str):
"""Delete a single file.
Parameters
----------
path : string
"""
return self.delete(path)
def rmdir(self, path: str):
"""Delete a directory and all its contents.
Parameters
----------
path : string
"""
self.client.delete(path, recursive=True)
def _find_any_file(self, hdfs_dir):
contents = self.ls(hdfs_dir, status=True)
def valid_filename(name):
head, tail = posixpath.split(name)
tail = tail.lower()
return (
not tail.endswith('.tmp')
and not tail.endswith('.copying')
and not tail.startswith('_')
and not tail.startswith('.')
)
for filename, meta in contents:
if meta['type'].lower() == 'file' and valid_filename(filename):
return filename
raise com.IbisError('No files found in the passed directory')
class WebHDFS(HDFS):
"""A WebHDFS-based interface to HDFS using the HDFSCli library."""
def __init__(self, client):
self.client = client
@property
def protocol(self) -> str:
"""Return the protocol used by WebHDFS.
Returns
-------
protocol : string
"""
return 'webhdfs'
def status(self, path: str) -> dict:
"""Retrieve HDFS metadata for path.
Parameters
----------
path : str
Returns
-------
status : dict
Client status
"""
return self.client.status(path)
@implements(HDFS.chmod)
def chmod(self, path: str, permissions: str):
"""Change the permissions of a HDFS file.
Parameters
----------
path : string
permissions : string
New octal permissions string of the file.
"""
self.client.set_permission(path, permissions)
@implements(HDFS.chown)
def chown(self, path: str, owner=None, group=None):
"""
Change the owner of a HDFS file.
At least one of `owner` and `group` must be specified.
Parameters
----------
hdfs_path : HDFS path.
owner : string, optional
group: string, optional
"""
self.client.set_owner(path, owner, group)
@implements(HDFS.exists)
def exists(self, path: str) -> dict:
"""Check if the HDFS file exists.
Parameters
----------
path : string
Returns
-------
bool
"""
return not self.client.status(path, strict=False) is None
@implements(HDFS.ls)
def ls(self, hdfs_path: str, status: bool = False) -> list:
"""Return contents of directory.
Parameters
----------
hdfs_path : string
status : bool
Returns
-------
list
"""
return self.client.list(hdfs_path, status=status)
@implements(HDFS.mkdir)
def mkdir(self, dir_path: str):
"""Create new directory.
Parameters
----------
path : string
"""
self.client.makedirs(dir_path)
@implements(HDFS.size)
def size(self, hdfs_path: str) -> int:
"""Return total size of file or directory.
Parameters
----------
hdfs_path : string
Returns
-------
size : int
"""
return self.client.content(hdfs_path)['length']
@implements(HDFS.mv)
def mv(
self, hdfs_path_src: str, hdfs_path_dest: str, overwrite: bool = True
):
"""Move hdfs_path_src to hdfs_path_dest.
Parameters
----------
hdfs_path_src: string
hdfs_path_dest: string
overwrite : boolean, default True
Overwrite hdfs_path_dest if it exists.
"""
if overwrite and self.exists(hdfs_path_dest):
if self.status(hdfs_path_dest)['type'] == 'FILE':
self.rm(hdfs_path_dest)
self.client.rename(hdfs_path_src, hdfs_path_dest)
def delete(self, hdfs_path: str, recursive: bool = False) -> bool:
"""Delete a file located at `hdfs_path`.
Parameters
----------
hdfs_path : string
recursive : bool, default False
Returns
-------
bool
True if the function was successful.
"""
return self.client.delete(hdfs_path, recursive=recursive)
@implements(HDFS.head)
def head(
self, hdfs_path: str, nbytes: int = 1024, offset: int = 0
) -> bytes:
"""Retrieve the requested number of bytes from a file.
Parameters
----------
hdfs_path : string
Absolute HDFS path
nbytes : int, default 1024 (1K)
Number of bytes to retrieve
offset : int, default 0
Number of bytes at beginning of file to skip before retrieving data
Returns
-------
head_data : bytes
"""
_reader = self.client.read(hdfs_path, offset=offset, length=nbytes)
with _reader as reader:
return reader.read()
@implements(HDFS.put)
def put(
self,
hdfs_path: str,
resource,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
):
"""
Write file or directory to HDFS.
Parameters
----------
hdfs_path : string
Directory or path
resource : string or buffer-like
Relative or absolute path to local resource, or a file-like object
overwrite : boolean, default False
verbose : boolean, default ibis options.verbose
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
"""
verbose = verbose or options.verbose
if isinstance(resource, str):
# `resource` is a path.
return self.client.upload(
hdfs_path, resource, overwrite=overwrite, **kwargs
)
else:
# `resource` is a file-like object.
hdfs_path = self.client.resolve(hdfs_path)
self.client.write(
hdfs_path, data=resource, overwrite=overwrite, **kwargs
)
return hdfs_path
@implements(HDFS.get)
def get(
self,
hdfs_path: str,
local_path: str,
overwrite: bool = False,
verbose: bool = None,
**kwargs,
) -> str:
"""
Download remote file or directory to the local filesystem.
Parameters
----------
hdfs_path : string
local_path : string, default '.'
overwrite : bool, default False
Further keyword arguments passed down to any internal API used.
Returns
-------
written_path : string
The path to the written file or directory
"""
verbose = verbose or options.verbose
return self.client.download(
hdfs_path, local_path, overwrite=overwrite, **kwargs
)
def hdfs_connect(
host='localhost',
port=50070,
protocol='webhdfs',
use_https='default',
auth_mechanism='NOSASL',
verify=True,
session=None,
**kwds,
):
"""Connect to HDFS.
Parameters
----------
host : str
Host name of the HDFS NameNode
port : int
NameNode's WebHDFS port
protocol : str,
The protocol used to communicate with HDFS. The only valid value is
``'webhdfs'``.
use_https : bool
Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure
authentication, the default for this is True, otherwise False.
auth_mechanism : str
Set to NOSASL or PLAIN for non-secure clusters.
Set to GSSAPI or LDAP for Kerberos-secured clusters.
verify : bool
Set to :data:`False` to turn off verifying SSL certificates.
session : Optional[requests.Session]
A custom :class:`requests.Session` object.
Notes
-----
Other keywords are forwarded to HDFS library classes.
Returns
-------
WebHDFS
"""
import requests
if session is None:
session = requests.Session()
session.verify = verify
if auth_mechanism in ('GSSAPI', 'LDAP'):
from hdfs.ext.kerberos import KerberosClient
if use_https == 'default':
prefix = 'https'
else:
prefix = 'https' if use_https else 'http'
# note SSL
url = f'{prefix}://{host}:{port}'
kwds.setdefault('mutual_auth', 'OPTIONAL')
hdfs_client = KerberosClient(url, session=session, **kwds)
else:
if use_https == 'default':
prefix = 'http'
else:
prefix = 'https' if use_https else 'http'
from hdfs.client import InsecureClient
url = f'{prefix}://{host}:{port}'
hdfs_client = InsecureClient(url, session=session, **kwds)
return WebHDFS(hdfs_client)
|
import common
import ports
import os
import subprocess
import time
await_seconds = 1 # Async invocations must be verfied (or used) after a delay.
def setup_container_port(docker_pid=None, port_name=None, port_ip_addr=None):
"""
Push the vport to the container namespace.
"""
# Push the KNI port to namespace.
common.run_local_cmd(
"ip link set {} netns {}".format(port_name, docker_pid))
time.sleep(await_seconds)
common.run_local_cmd(
"ip netns exec {} ifconfig {} up".format(docker_pid, port_name))
time.sleep(await_seconds)
common.run_local_cmd("ip netns exec {} ifconfig {} {}".format(
docker_pid, port_name, port_ip_addr))
time.sleep(await_seconds)
common.run_local_cmd(
"ip netns exec {} ifconfig {} promisc".format(docker_pid, port_name))
time.sleep(await_seconds)
def setup_network_namespace(docker_pid=None, port_name=None):
"""
Setup the network namespace for the docker container.
"""
# Namespace configuration.
proc_filepath = "/proc/{}/ns/net".format(docker_pid)
netns_filepath = "/var/run/netns/{}".format(docker_pid)
# Proc dir wont exist if the container is not running.
if not os.path.isfile(proc_filepath):
print('proc pid dir does not exist. {}'.format(proc_filepath))
exit()
# Create a symbolic link.
common.run_local_cmd("ln -sf {} {}".format(proc_filepath, netns_filepath))
# Wait for linking to be successful.
time.sleep(await_seconds)
# Check if the netns is correctly setup.
if not os.path.isfile(netns_filepath):
print('netns pid dir does not exist. {}'.format(netns_filepath))
exit()
# Verify that the KNI port (exposed by DPDK) is up.
kni_ports = common.get_kni_ports()
if port_name not in kni_ports:
print('KNI {} not up'.format(port_name))
exit()
# Get port ip and ethernet address.
port_ip_addr = common.run_local_cmd(
common.get_port_ip(port_name), get_output=True)
port_eth_addr = common.run_local_cmd(
common.get_port_ether(port_name), get_output=True)
# Verify port is setup with a valid ip and ethernet address.
if not common.is_ipv4(port_ip_addr):
print('Port {} does not have an assigned IP addr')
exit()
if not common.is_mac(port_eth_addr):
print('Port {} does not have an assigned ether addr')
exit()
setup_container_port(docker_pid=docker_pid,
port_name=port_name, port_ip_addr=port_ip_addr)
return port_eth_addr, port_ip_addr
def start_container(command=None, port_name=None, name=None):
"""
Start the docker containers. Setup the network namespace.
"""
docker_pid_cmd = "%s %s" % ("docker inspect -f {{.State.Pid}}", name)
print('Docker command: {}'.format(command))
# Start the docker.
common.run_local_cmd(command)
# Wait for container to run.
time.sleep(await_seconds)
docker_pid = common.run_local_cmd(docker_pid_cmd, get_output=True)
# If there is no such container with this 'name', then this will error out.
docker_pid = int(docker_pid)
port_eth_addr, port_ip_addr = setup_network_namespace(
docker_pid=docker_pid, port_name=port_name)
# Get the container IP address and Ethernet address.
container_port_ip_addr = common.run_local_cmd(
common.get_container_port_ip(str(docker_pid), port_name), get_output=True)
container_port_eth_addr = common.run_local_cmd(
common.get_container_port_ether(str(docker_pid), port_name), get_output=True)
# Verify the correctness of the port and ethernet addr.
if container_port_ip_addr != port_ip_addr:
print('Incorrect IP within container: Container {}, Host {}'
.format(container_port_ip_addr, port_ip_addr))
print(common.get_container_port_ip(str(docker_pid), port_name))
exit()
if container_port_eth_addr != port_eth_addr:
print('Incorrect Ether within container: Container {}, Host {}'
.format(container_port_eth_addr, port_eth_addr))
print(common.get_container_port_ether(str(docker_pid), port_name))
exit()
# Store the configuration.
ret = {
'name': name,
'netns': docker_pid,
'pid': docker_pid,
'ip_addr': container_port_ip_addr,
'eth_addr': container_port_eth_addr,
'port_name': port_name,
'command': command
}
return ret
def connect(node_a, node_b):
"""
Setup the IP route and ARP table in the containers.
Inputs:
- node_a : Container 'a' conf.
- node_b : Container 'b' conf.
node_a and node_b are interchangable.
"""
route_a2b = ("ip netns exec {} ip route add {} dev {}"
.format(node_a['netns'],
node_b['ip_addr'],
node_a['port_name']))
route_b2a = ("ip netns exec {} ip route add {} dev {}"
.format(node_b['netns'],
node_a['ip_addr'],
node_b['port_name']))
arp_a2b = ("ip netns exec {} arp -s {} {}"
.format(node_a['netns'],
node_b['ip_addr'],
node_b['eth_addr']))
arp_b2a = ("ip netns exec {} arp -s {} {}"
.format(node_b['netns'],
node_a['ip_addr'],
node_a['eth_addr']))
common.run_local_cmd(route_a2b)
common.run_local_cmd(route_b2a)
common.run_local_cmd(arp_a2b)
common.run_local_cmd(arp_b2a)
def dns(node_this, node_other):
"""
Setup the DNS in 'node_this' so that 'node_other'
can be reached by name (e.g., resolved) instead of IP.
Input:
- node_a: Container whose DNS is to be updated.
- node_b: Container that should be reachable.
"""
command = ("docker exec -u root -it {} bash -c \"echo \'{} {}\' >> /etc/hosts\""
.format(node_this['name'], node_other['ip_addr'], node_other['name']))
os.popen(command)
time.sleep(await_seconds)
def number_of_running_processes():
"""
Return the count of running containers.
"""
n_docker = common.run_local_cmd('expr $(docker ps -a | wc -l) - 1', get_output=True)
return int(n_docker)
def stop_all_docker_containers():
"""
Stop all containers.
"""
common.run_local_cmd('docker stop $(docker ps -a -q)')
time.sleep(await_seconds)
def remove_all_docker_containers():
"""
Remove all containers.
"""
common.run_local_cmd('docker rm $(docker ps -a -q)')
time.sleep(await_seconds)
|
from __future__ import print_function
from optparse import OptionParser
import json
import os
import pdb
import pickle
import sys
import h5py
import numpy as np
import pandas as pd
import pysam
import pyBigWig
import tensorflow as tf
if tf.__version__[0] == '1':
tf.compat.v1.enable_eager_execution()
from basenji import bed
from basenji import dna_io
from basenji import seqnn
from basenji import stream
'''
basenji_predict_bed.py
Predict sequences from a BED file.
'''
def main():
usage = 'usage: %prog [options] <model_file> <bed_file>'
parser = OptionParser(usage)
parser.add_option('-b', dest='bigwig_indexes',
default=None, help='Comma-separated list of target indexes to write BigWigs')
parser.add_option('-e', dest='embed_layer',
default=None, type='int',
help='Embed sequences using the specified layer index.')
parser.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('-g', dest='genome_file',
default=None,
help='Chromosome length information [Default: %default]')
parser.add_option('-l', dest='site_length',
default=None, type='int',
help='Prediction site length. [Default: model seq_length]')
parser.add_option('-o', dest='out_dir',
default='pred_out',
help='Output directory [Default: %default]')
# parser.add_option('--plots', dest='plots',
# default=False, action='store_true',
# help='Make heatmap plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
parser.add_option('-s', dest='sum',
default=False, action='store_true',
help='Sum site predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--species', dest='species',
default='human')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
(options, args) = parser.parse_args()
if len(args) == 2:
model_file = args[0]
bed_file = args[1]
elif len(args) == 4:
# multi worker
options_pkl_file = args[0]
model_file = args[1]
bed_file = args[2]
worker_index = int(args[3])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameter and model files and BED file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
if options.bigwig_indexes is not None:
options.bigwig_indexes = [int(bi) for bi in options.bigwig_indexes.split(',')]
else:
options.bigwig_indexes = []
if len(options.bigwig_indexes) > 0:
bigwig_dir = '%s/bigwig' % options.out_dir
if not os.path.isdir(bigwig_dir):
os.mkdir(bigwig_dir)
#################################################################
# read parameters and collet target information
if options.targets_file is None:
target_slice = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_slice = targets_df.index
#################################################################
# setup model
seqnn_model = tf.saved_model.load(model_file).model
# query num model targets
seq_length = seqnn_model.predict_on_batch.input_signature[0].shape[1]
null_1hot = np.zeros((1,seq_length,4))
null_preds = seqnn_model.predict_on_batch(null_1hot)
null_preds = null_preds[options.species].numpy()
_, preds_length, preds_depth = null_preds.shape
# hack sizes
preds_window = 128
seq_crop = (seq_length - preds_length*preds_window) // 2
#################################################################
# sequence dataset
if options.site_length is None:
options.site_length = preds_window*preds_length
print('site_length: %d' % options.site_length)
# construct model sequences
model_seqs_dna, model_seqs_coords = bed.make_bed_seqs(
bed_file, options.genome_fasta,
seq_length, stranded=False)
# construct site coordinates
site_seqs_coords = bed.read_bed_coords(bed_file, options.site_length)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(model_seqs_dna), options.processes+1, dtype='int')
model_seqs_dna = model_seqs_dna[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
model_seqs_coords = model_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
site_seqs_coords = site_seqs_coords[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_seqs = len(model_seqs_dna)
#################################################################
# setup output
assert(preds_length % 2 == 0)
preds_mid = preds_length // 2
assert(options.site_length % preds_window == 0)
site_preds_length = options.site_length // preds_window
assert(site_preds_length % 2 == 0)
site_preds_start = preds_mid - site_preds_length//2
site_preds_end = site_preds_start + site_preds_length
# initialize HDF5
out_h5_file = '%s/predict.h5' % options.out_dir
if os.path.isfile(out_h5_file):
os.remove(out_h5_file)
out_h5 = h5py.File(out_h5_file, 'w')
# create predictions
if options.sum:
out_h5.create_dataset('preds', shape=(num_seqs, preds_depth), dtype='float16')
else:
out_h5.create_dataset('preds', shape=(num_seqs, site_preds_length, preds_depth), dtype='float16')
# store site coordinates
site_seqs_chr, site_seqs_start, site_seqs_end = zip(*site_seqs_coords)
site_seqs_chr = np.array(site_seqs_chr, dtype='S')
site_seqs_start = np.array(site_seqs_start)
site_seqs_end = np.array(site_seqs_end)
out_h5.create_dataset('chrom', data=site_seqs_chr)
out_h5.create_dataset('start', data=site_seqs_start)
out_h5.create_dataset('end', data=site_seqs_end)
#################################################################
# predict scores, write output
# define sequence generator
def seqs_gen():
for seq_dna in model_seqs_dna:
yield dna_io.dna_1hot(seq_dna)
# initialize predictions stream
preds_stream = stream.PredStreamSonnet(seqnn_model, seqs_gen(),
rc=options.rc, shifts=options.shifts, species=options.species)
for si in range(num_seqs):
preds_seq = preds_stream[si]
# slice site
preds_site = preds_seq[site_preds_start:site_preds_end,:]
# write
if options.sum:
out_h5['preds'][si] = preds_site.sum(axis=0)
else:
out_h5['preds'][si] = preds_site
# write bigwig
for ti in options.bigwig_indexes:
bw_file = '%s/s%d_t%d.bw' % (bigwig_dir, si, ti)
bigwig_write(preds_seq[:,ti], model_seqs_coords[si], bw_file,
options.genome_file, seq_crop)
# close output HDF5
out_h5.close()
def bigwig_open(bw_file, genome_file):
""" Open the bigwig file for writing and write the header. """
bw_out = pyBigWig.open(bw_file, 'w')
chrom_sizes = []
for line in open(genome_file):
a = line.split()
chrom_sizes.append((a[0], int(a[1])))
bw_out.addHeader(chrom_sizes)
return bw_out
def bigwig_write(signal, seq_coords, bw_file, genome_file, seq_crop=0):
""" Write a signal track to a BigWig file over the region
specified by seqs_coords.
Args
signal: Sequences x Length signal array
seq_coords: (chr,start,end)
bw_file: BigWig filename
genome_file: Chromosome lengths file
seq_crop: Sequence length cropped from each side of the sequence.
"""
target_length = len(signal)
# open bigwig
bw_out = bigwig_open(bw_file, genome_file)
# initialize entry arrays
entry_starts = []
entry_ends = []
# set entries
chrm, start, end = seq_coords
preds_pool = (end - start - 2 * seq_crop) // target_length
bw_start = start + seq_crop
for li in range(target_length):
bw_end = bw_start + preds_pool
entry_starts.append(bw_start)
entry_ends.append(bw_end)
bw_start = bw_end
# add
bw_out.addEntries(
[chrm]*target_length,
entry_starts,
ends=entry_ends,
values=[float(s) for s in signal])
bw_out.close()
if __name__ == '__main__':
main()
|
import signal
import boto.sqs
import ujson
from mobile_push.config import setting
from mobile_push.logger import logger
from mobile_push.message_router import MessageRouter
keep_running = True
def sigterm_handler(signum, _):
global keep_running
logger.warn('Receive SIGTERM')
keep_running = False
def get_queue():
conn = boto.sqs.connect_to_region(setting.get('sqs', 'region'))
return conn.get_queue(setting.get('sqs', 'queue'))
def poll_message(queue):
message = queue.read(wait_time_seconds=20)
if message is None:
return
try:
body = message.get_body()
units = ujson.loads(body)
except ValueError:
logger.error('Cannot parse: %s', body)
units = []
if not isinstance(units, list):
units = [units]
for unit in units:
try:
MessageRouter(unit).get_actor().run(unit)
except MessageRouter.BaseError:
logger.error('Cannot route message: %s', ujson.dumps(unit))
except Exception as e:
logger.exception(e)
queue.delete_message(message)
def main():
signal.signal(signal.SIGTERM, sigterm_handler)
q = get_queue()
while keep_running:
poll_message(q)
if __name__ == '__main__':
main()
|
import os
import subprocess
from ruamel import yaml
import great_expectations as ge
context = ge.get_context()
gcp_project = os.environ.get("GE_TEST_GCP_PROJECT")
if not gcp_project:
raise ValueError(
"Environment Variable GE_TEST_GCP_PROJECT is required to run BigQuery integration tests"
)
great_expectations_yaml_file_path = os.path.join(
context.root_directory, "great_expectations.yml"
)
with open(great_expectations_yaml_file_path) as f:
great_expectations_yaml = yaml.safe_load(f)
stores = great_expectations_yaml["stores"]
pop_stores = ["checkpoint_store", "evaluation_parameter_store", "validations_store"]
for store in pop_stores:
stores.pop(store)
actual_existing_expectations_store = {}
actual_existing_expectations_store["stores"] = stores
actual_existing_expectations_store["expectations_store_name"] = great_expectations_yaml[
"expectations_store_name"
]
expected_existing_expectations_store_yaml = """
stores:
expectations_store:
class_name: ExpectationsStore
store_backend:
class_name: TupleFilesystemStoreBackend
base_directory: expectations/
expectations_store_name: expectations_store
"""
assert actual_existing_expectations_store == yaml.safe_load(
expected_existing_expectations_store_yaml
)
configured_expectations_store_yaml = """
stores:
expectations_GCS_store:
class_name: ExpectationsStore
store_backend:
class_name: TupleGCSStoreBackend
project: <YOUR GCP PROJECT NAME>
bucket: <YOUR GCS BUCKET NAME>
prefix: <YOUR GCS PREFIX NAME>
expectations_store_name: expectations_GCS_store
"""
configured_expectations_store = yaml.safe_load(configured_expectations_store_yaml)
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"project"
] = gcp_project
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"bucket"
] = "test_metadata_store"
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"prefix"
] = "how_to_configure_an_expectation_store_in_gcs/expectations"
try:
# remove this bucket if there was a failure in the script last time
result = subprocess.run(
"gsutil rm -r gs://test_metadata_store/how_to_configure_an_expectation_store_in_gcs/expectations".split(),
check=True,
stderr=subprocess.PIPE,
)
except Exception as e:
pass
context.add_store(
store_name=configured_expectations_store["expectations_store_name"],
store_config=configured_expectations_store["stores"]["expectations_GCS_store"],
)
with open(great_expectations_yaml_file_path) as f:
great_expectations_yaml = yaml.safe_load(f)
great_expectations_yaml["expectations_store_name"] = "expectations_GCS_store"
great_expectations_yaml["stores"]["expectations_GCS_store"]["store_backend"].pop(
"suppress_store_backend_id"
)
with open(great_expectations_yaml_file_path, "w") as f:
yaml.dump(great_expectations_yaml, f, default_flow_style=False)
expectation_suite_name = "my_expectation_suite"
context.create_expectation_suite(expectation_suite_name=expectation_suite_name)
copy_expectation_command = """
gsutil cp expectations/my_expectation_suite.json gs://<YOUR GCS BUCKET NAME>/<YOUR GCS PREFIX NAME>/my_expectation_suite.json
"""
local_expectation_suite_file_path = os.path.join(
context.root_directory, "expectations", f"{expectation_suite_name}.json"
)
copy_expectation_command = copy_expectation_command.replace(
"expectations/my_expectation_suite.json", local_expectation_suite_file_path
)
copy_expectation_command = copy_expectation_command.replace(
"<YOUR GCS BUCKET NAME>",
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"bucket"
],
)
copy_expectation_command = copy_expectation_command.replace(
"<YOUR GCS PREFIX NAME>/my_expectation_suite.json",
configured_expectations_store["stores"]["expectations_GCS_store"]["store_backend"][
"prefix"
]
+ f"/{expectation_suite_name}.json",
)
result = subprocess.run(
copy_expectation_command.strip().split(),
check=True,
stderr=subprocess.PIPE,
)
stderr = result.stderr.decode("utf-8")
copy_expectation_output = """
Operation completed over 1 objects
"""
assert copy_expectation_output.strip() in stderr
list_expectation_stores_command = """
great_expectations store list
"""
result = subprocess.run(
list_expectation_stores_command.strip().split(),
check=True,
stdout=subprocess.PIPE,
)
stdout = result.stdout.decode("utf-8")
list_expectation_stores_output = """
- name: expectations_GCS_store
class_name: ExpectationsStore
store_backend:
class_name: TupleGCSStoreBackend
project: <YOUR GCP PROJECT NAME>
bucket: <YOUR GCS BUCKET NAME>
prefix: <YOUR GCS PREFIX NAME>
"""
assert "expectations_GCS_store" in list_expectation_stores_output
assert "expectations_GCS_store" in stdout
assert "TupleGCSStoreBackend" in list_expectation_stores_output
assert "TupleGCSStoreBackend" in stdout
list_expectation_suites_command = """
great_expectations suite list
"""
result = subprocess.run(
list_expectation_suites_command.strip().split(),
check=True,
stdout=subprocess.PIPE,
)
stdout = result.stdout.decode("utf-8")
list_expectation_suites_output = """
1 Expectation Suite found:
- my_expectation_suite
"""
assert "1 Expectation Suite found:" in list_expectation_suites_output
assert "1 Expectation Suite found:" in stdout
assert "my_expectation_suite" in list_expectation_suites_output
assert "my_expectation_suite" in stdout
result = subprocess.run(
"gsutil rm -r gs://test_metadata_store/how_to_configure_an_expectation_store_in_gcs/expectations".split(),
check=True,
stderr=subprocess.PIPE,
)
|
"""Support for KNX/IP climate devices."""
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
STATE_DRY, STATE_ECO, STATE_FAN_ONLY, STATE_HEAT, STATE_IDLE, STATE_MANUAL,
SUPPORT_ON_OFF, SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE)
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import ATTR_DISCOVER_DEVICES, DATA_KNX
CONF_SETPOINT_SHIFT_ADDRESS = 'setpoint_shift_address'
CONF_SETPOINT_SHIFT_STATE_ADDRESS = 'setpoint_shift_state_address'
CONF_SETPOINT_SHIFT_STEP = 'setpoint_shift_step'
CONF_SETPOINT_SHIFT_MAX = 'setpoint_shift_max'
CONF_SETPOINT_SHIFT_MIN = 'setpoint_shift_min'
CONF_TEMPERATURE_ADDRESS = 'temperature_address'
CONF_TARGET_TEMPERATURE_ADDRESS = 'target_temperature_address'
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = 'target_temperature_state_address'
CONF_OPERATION_MODE_ADDRESS = 'operation_mode_address'
CONF_OPERATION_MODE_STATE_ADDRESS = 'operation_mode_state_address'
CONF_CONTROLLER_STATUS_ADDRESS = 'controller_status_address'
CONF_CONTROLLER_STATUS_STATE_ADDRESS = 'controller_status_state_address'
CONF_CONTROLLER_MODE_ADDRESS = 'controller_mode_address'
CONF_CONTROLLER_MODE_STATE_ADDRESS = 'controller_mode_state_address'
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS = \
'operation_mode_frost_protection_address'
CONF_OPERATION_MODE_NIGHT_ADDRESS = 'operation_mode_night_address'
CONF_OPERATION_MODE_COMFORT_ADDRESS = 'operation_mode_comfort_address'
CONF_OPERATION_MODES = 'operation_modes'
CONF_ON_OFF_ADDRESS = 'on_off_address'
CONF_ON_OFF_STATE_ADDRESS = 'on_off_state_address'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
DEFAULT_NAME = 'KNX Climate'
DEFAULT_SETPOINT_SHIFT_STEP = 0.5
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEPENDENCIES = ['knx']
OPERATION_MODES = {
# Map DPT 201.100 HVAC operating modes
"Frost Protection": STATE_MANUAL,
"Night": STATE_IDLE,
"Standby": STATE_ECO,
"Comfort": STATE_HEAT,
# Map DPT 201.104 HVAC control modes
"Fan only": STATE_FAN_ONLY,
"Dehumidification": STATE_DRY
}
OPERATION_MODES_INV = dict((
reversed(item) for item in OPERATION_MODES.items()))
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_TEMPERATURE_ADDRESS): cv.string,
vol.Required(CONF_TARGET_TEMPERATURE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_TARGET_TEMPERATURE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STATE_ADDRESS): cv.string,
vol.Optional(CONF_SETPOINT_SHIFT_STEP,
default=DEFAULT_SETPOINT_SHIFT_STEP): vol.All(
float, vol.Range(min=0, max=2)),
vol.Optional(CONF_SETPOINT_SHIFT_MAX, default=DEFAULT_SETPOINT_SHIFT_MAX):
vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_SETPOINT_SHIFT_MIN, default=DEFAULT_SETPOINT_SHIFT_MIN):
vol.All(int, vol.Range(min=-32, max=0)),
vol.Optional(CONF_OPERATION_MODE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_STATUS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_ADDRESS): cv.string,
vol.Optional(CONF_CONTROLLER_MODE_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_NIGHT_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODE_COMFORT_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_ADDRESS): cv.string,
vol.Optional(CONF_ON_OFF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_OPERATION_MODES):
vol.All(cv.ensure_list, [vol.In(OPERATION_MODES)]),
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up climate(s) for KNX platform."""
if discovery_info is not None:
async_add_entities_discovery(hass, discovery_info, async_add_entities)
else:
async_add_entities_config(hass, config, async_add_entities)
@callback
def async_add_entities_discovery(hass, discovery_info, async_add_entities):
"""Set up climates for KNX platform configured within platform."""
entities = []
for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:
device = hass.data[DATA_KNX].xknx.devices[device_name]
entities.append(KNXClimate(device))
async_add_entities(entities)
@callback
def async_add_entities_config(hass, config, async_add_entities):
"""Set up climate for KNX platform configured within platform."""
import xknx
climate_mode = xknx.devices.ClimateMode(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME) + " Mode",
group_address_operation_mode=config.get(CONF_OPERATION_MODE_ADDRESS),
group_address_operation_mode_state=config.get(
CONF_OPERATION_MODE_STATE_ADDRESS),
group_address_controller_status=config.get(
CONF_CONTROLLER_STATUS_ADDRESS),
group_address_controller_status_state=config.get(
CONF_CONTROLLER_STATUS_STATE_ADDRESS),
group_address_controller_mode=config.get(
CONF_CONTROLLER_MODE_ADDRESS),
group_address_controller_mode_state=config.get(
CONF_CONTROLLER_MODE_STATE_ADDRESS),
group_address_operation_mode_protection=config.get(
CONF_OPERATION_MODE_FROST_PROTECTION_ADDRESS),
group_address_operation_mode_night=config.get(
CONF_OPERATION_MODE_NIGHT_ADDRESS),
group_address_operation_mode_comfort=config.get(
CONF_OPERATION_MODE_COMFORT_ADDRESS),
operation_modes=config.get(
CONF_OPERATION_MODES))
hass.data[DATA_KNX].xknx.devices.add(climate_mode)
climate = xknx.devices.Climate(
hass.data[DATA_KNX].xknx,
name=config.get(CONF_NAME),
group_address_temperature=config[CONF_TEMPERATURE_ADDRESS],
group_address_target_temperature=config.get(
CONF_TARGET_TEMPERATURE_ADDRESS),
group_address_target_temperature_state=config[
CONF_TARGET_TEMPERATURE_STATE_ADDRESS],
group_address_setpoint_shift=config.get(CONF_SETPOINT_SHIFT_ADDRESS),
group_address_setpoint_shift_state=config.get(
CONF_SETPOINT_SHIFT_STATE_ADDRESS),
setpoint_shift_step=config.get(CONF_SETPOINT_SHIFT_STEP),
setpoint_shift_max=config.get(CONF_SETPOINT_SHIFT_MAX),
setpoint_shift_min=config.get(CONF_SETPOINT_SHIFT_MIN),
group_address_on_off=config.get(CONF_ON_OFF_ADDRESS),
group_address_on_off_state=config.get(CONF_ON_OFF_STATE_ADDRESS),
min_temp=config.get(CONF_MIN_TEMP),
max_temp=config.get(CONF_MAX_TEMP),
mode=climate_mode)
hass.data[DATA_KNX].xknx.devices.add(climate)
async_add_entities([KNXClimate(climate)])
class KNXClimate(ClimateDevice):
"""Representation of a KNX climate device."""
def __init__(self, device):
"""Initialize of a KNX climate device."""
self.device = device
self._unit_of_measurement = TEMP_CELSIUS
@property
def supported_features(self):
"""Return the list of supported features."""
support = SUPPORT_TARGET_TEMPERATURE
if self.device.mode.supports_operation_mode:
support |= SUPPORT_OPERATION_MODE
if self.device.supports_on_off:
support |= SUPPORT_ON_OFF
return support
async def async_added_to_hass(self):
"""Register callbacks to update hass after device was changed."""
async def after_update_callback(device):
"""Call after device was updated."""
await self.async_update_ha_state()
self.device.register_device_updated_cb(after_update_callback)
@property
def name(self):
"""Return the name of the KNX device."""
return self.device.name
@property
def available(self):
"""Return True if entity is available."""
return self.hass.data[DATA_KNX].connected
@property
def should_poll(self):
"""No polling needed within KNX."""
return False
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self.device.temperature.value
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.device.setpoint_shift_step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.device.target_temperature.value
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.device.target_temperature_min
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.device.target_temperature_max
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self.device.set_target_temperature(temperature)
await self.async_update_ha_state()
@property
def current_operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.device.mode.supports_operation_mode:
return OPERATION_MODES.get(self.device.mode.operation_mode.value)
return None
@property
def operation_list(self):
"""Return the list of available operation modes."""
return [OPERATION_MODES.get(operation_mode.value) for
operation_mode in
self.device.mode.operation_modes]
async def async_set_operation_mode(self, operation_mode):
"""Set operation mode."""
if self.device.mode.supports_operation_mode:
from xknx.knx import HVACOperationMode
knx_operation_mode = HVACOperationMode(
OPERATION_MODES_INV.get(operation_mode))
await self.device.mode.set_operation_mode(knx_operation_mode)
await self.async_update_ha_state()
@property
def is_on(self):
"""Return true if the device is on."""
if self.device.supports_on_off:
return self.device.is_on
return None
async def async_turn_on(self):
"""Turn on."""
await self.device.turn_on()
async def async_turn_off(self):
"""Turn off."""
await self.device.turn_off()
|
import argparse
import logging
import os
import stat
import subprocess
import sys
import time
import yaml
class ConfigurationError(Exception):
pass
def configure_waf_haproxy_cp(logger, run_dir, mgmt_ip, haproxy_cp_ip):
sh_file = "{}/waf_set_haproxy_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "echo \"<VirtualHost *:80>\r"
send " AddDefaultCharset UTF-8\r"
send " ProxyPreserveHost On\r"
send " ProxyRequests off\r"
send " ProxyVia Off\r"
send " ProxyPass / http://{haproxy_cp_ip}:5000/\r"
send " ProxyPassReverse / http://{haproxy_cp_ip}:5000/\r"
send " </VirtualHost>\" > /etc/httpd/conf.d/waf_proxy.conf\r"
expect "]# "
send "echo \"<IfModule mod_security2.c>\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/modsecurity_crs_10_setup.conf\r"
send " IncludeOptional modsecurity.d/owasp-modsecurity-crs/base_rules/*.conf\r\r"
send " SecRuleEngine On\r"
send " SecRequestBodyAccess On\r"
send " SecResponseBodyAccess On\r"
send " SecDebugLog /var/log/httpd/modsec-debug.log\r"
send " SecDebugLogLevel 3\r"
send "</IfModule>\" > /etc/httpd/conf.d/mod_security.conf\r"
expect "]# "
send "systemctl stop httpd\r"
expect "]# "
send "systemctl start httpd\r"
expect "]# "
'''.format(mgmt_ip=mgmt_ip, haproxy_cp_ip=haproxy_cp_ip))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name):
sh_file = "{}/haproxy_add_waf_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "grep \"server {waf_server_name} {waf_cp_ip}\" /etc/haproxy/haproxy.cfg && echo \"Already configured\" && exit 0\r"
expect {{
"]$ " {{ exit }}
"]# "
}}
send "sed -i \'s/\\(.*WAF list.*\\)/\\1\\n server {waf_server_name} {waf_cp_ip}:80 check/g\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_cp_ip=waf_cp_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy add waf config failed: {}".format(rc))
def configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name):
sh_file = "{}/haproxy_remove_httpd_config-{}.sh".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logger.debug("Creating script file %s", sh_file)
with open(sh_file, "w") as f:
f.write(r'''#!/usr/bin/expect -f
set login "centos"
set addr {mgmt_ip}
set pw "centos"
set retry 0
set max 20
while {{ $retry < $max }} {{
sleep 5
spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $login@$addr
set timeout 10
expect "yes/no" {{
send "yes\r"
expect "*?assword:" {{ send "$pw\r"; break }}
}} "*?assword:" {{ send "$pw\r"; break }}
set retry [ expr $retry+1 ]
if {{ $retry == $max }} {{
puts "Configuration timed out."
exit 1
}}
}}
expect "]$ "
send "sudo su\r"
expect "]# "
send "sed -i \'/server {waf_server_name}/d\' /etc/haproxy/haproxy.cfg\r"
expect "]# "
send "systemctl reload haproxy\r"
expect "]# "
'''.format(mgmt_ip=haproxy_mgmt_ip, waf_server_name=waf_server_name))
os.chmod(sh_file, stat.S_IRWXU)
rc = subprocess.call(sh_file, shell=True)
if rc != 0:
raise ConfigurationError("HAProxy remove waf config failed: {}".format(rc))
def main(argv=sys.argv[1:]):
try:
parser = argparse.ArgumentParser()
parser.add_argument("yaml_cfg_file", type=argparse.FileType('r'))
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--quiet", "-q", dest="verbose", action="store_false")
args = parser.parse_args()
run_dir = os.path.join(os.environ['RIFT_INSTALL'], "var/run/rift")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
log_file = "{}/rift_waf_config-{}.log".format(run_dir, time.strftime("%Y%m%d%H%M%S"))
logging.basicConfig(filename=log_file, level=logging.DEBUG)
logger = logging.getLogger()
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
except Exception as e:
print("Got exception:{}".format(e))
raise
try:
dry_run = args.dry_run
yaml_str = args.yaml_cfg_file.read()
logger.debug("Input YAML file: %s", yaml_str)
yaml_cfg = yaml.load(yaml_str)
logger.debug("Input YAML cfg: %s", yaml_cfg)
# Check if this is post scale out trigger
def find_cp_ip(vnfr_list, vnfd_name, cp_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
for cp in vnfr['connection_points']:
logger.debug("Connection point: %s", format(cp))
if cp_name in cp['name']:
return cp['ip_address']
raise ValueError("Could not find vnfd %s connection point %s", vnfd_name, cp_name)
def find_mgmt_ip(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr['rw_mgmt_ip']
raise ValueError("Could not find vnfd %s mgmt ip", vnfd_name)
def find_vnfr(vnfr_list, vnfd_name):
for vnfr in vnfr_list:
if vnfd_name in vnfr['name']:
return vnfr
raise ValueError("Could not find vnfd %s", vnfd_name)
haproxy_cp_ip = find_cp_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd", "cp0")
haproxy_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_others'], "haproxy_vnfd")
waf_cp_ip = find_cp_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd", "cp0")
waf_mgmt_ip = find_mgmt_ip(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
waf_vnfr = find_vnfr(yaml_cfg['vnfrs_in_group'], "waf_vnfd")
# HAProxy wants to use a name without .'s
waf_server_name = waf_vnfr["name"].replace(".", "__")
if yaml_cfg['trigger'] == 'post_scale_out':
logger.debug("Sleeping for 60 seconds to give VNFD mgmt VM a chance to boot up")
time.sleep(60)
configure_haproxy_add_waf(logger, run_dir, haproxy_mgmt_ip, waf_cp_ip, waf_server_name)
configure_waf_haproxy_cp(logger, run_dir, waf_mgmt_ip, haproxy_cp_ip)
elif yaml_cfg['trigger'] == 'pre_scale_in':
configure_haproxy_remove_waf(logger, run_dir, haproxy_mgmt_ip, waf_server_name)
else:
raise ValueError("Unexpected trigger {}".format(yaml_cfg['trigger']))
except Exception as e:
logger.exception(e)
raise
if __name__ == "__main__":
main()
|
import sys
import time
try:
from unittest import mock
except ImportError:
import mock
import testtools
import threading
import six
from six.moves.queue import Queue, Empty
from swiftclient import multithreading as mt
from swiftclient.exceptions import ClientException
class ThreadTestCase(testtools.TestCase):
def setUp(self):
super(ThreadTestCase, self).setUp()
self.got_args_kwargs = Queue()
self.starting_thread_count = threading.active_count()
def _func(self, q_item, *args, **kwargs):
self.got_items.put(q_item)
self.got_args_kwargs.put((args, kwargs))
if q_item == 'go boom':
raise Exception('I went boom!')
if q_item == 'c boom':
raise ClientException(
'Client Boom', http_scheme='http', http_host='192.168.22.1',
http_port=80, http_path='/booze', http_status=404,
http_reason='to much', http_response_content='no sir!')
return 'best result EVAR!'
def assertQueueContains(self, queue, expected_contents):
got_contents = []
try:
while True:
got_contents.append(queue.get(timeout=0.1))
except Empty:
pass
if isinstance(expected_contents, set):
got_contents = set(got_contents)
self.assertEqual(expected_contents, got_contents)
class TestQueueFunctionThread(ThreadTestCase):
def setUp(self):
super(TestQueueFunctionThread, self).setUp()
self.input_queue = Queue()
self.got_items = Queue()
self.stored_results = []
self.qft = mt.QueueFunctionThread(self.input_queue, self._func,
'one_arg', 'two_arg',
red_fish='blue_arg',
store_results=self.stored_results)
self.qft.start()
def tearDown(self):
if self.qft.is_alive():
self.finish_up_thread()
super(TestQueueFunctionThread, self).tearDown()
def finish_up_thread(self):
self.input_queue.put(mt.StopWorkerThreadSignal())
while self.qft.is_alive():
time.sleep(0.05)
def test_plumbing_and_store_results(self):
self.input_queue.put('abc')
self.input_queue.put(123)
self.finish_up_thread()
self.assertQueueContains(self.got_items, ['abc', 123])
self.assertQueueContains(self.got_args_kwargs, [
(('one_arg', 'two_arg'), {'red_fish': 'blue_arg'}),
(('one_arg', 'two_arg'), {'red_fish': 'blue_arg'})])
self.assertEqual(self.stored_results,
['best result EVAR!', 'best result EVAR!'])
def test_exception_handling(self):
self.input_queue.put('go boom')
self.input_queue.put('ok')
self.input_queue.put('go boom')
self.finish_up_thread()
self.assertQueueContains(self.got_items,
['go boom', 'ok', 'go boom'])
self.assertEqual(len(self.qft.exc_infos), 2)
self.assertEqual(Exception, self.qft.exc_infos[0][0])
self.assertEqual(Exception, self.qft.exc_infos[1][0])
self.assertEqual(('I went boom!',), self.qft.exc_infos[0][1].args)
self.assertEqual(('I went boom!',), self.qft.exc_infos[1][1].args)
class TestQueueFunctionManager(ThreadTestCase):
def setUp(self):
super(TestQueueFunctionManager, self).setUp()
self.thread_manager = mock.create_autospec(
mt.MultiThreadingManager, spec_set=True, instance=True)
self.thread_count = 4
self.error_counter = [0]
self.got_items = Queue()
self.stored_results = []
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'),
thread_kwargs={'a': 'b', 'store_results': self.stored_results},
error_counter=self.error_counter,
connection_maker=self.connection_maker)
def connection_maker(self):
return 'yup, I made a connection'
def test_context_manager_without_error_counter(self):
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'),
thread_kwargs={'a': 'b', 'store_results': self.stored_results},
connection_maker=self.connection_maker)
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
input_queue.put('go boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(1, len(error_strs))
self.assertTrue('Exception: I went boom!' in error_strs[0])
def test_context_manager_without_conn_maker_or_error_counter(self):
self.qfq = mt.QueueFunctionManager(
self._func, self.thread_count, self.thread_manager,
thread_args=('1arg', '2arg'), thread_kwargs={'a': 'b'})
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('slap%d' % i)
self.assertEqual(self.starting_thread_count, threading.active_count())
self.assertEqual([], self.thread_manager.error.call_args_list)
self.assertEqual(0, self.error_counter[0])
self.assertQueueContains(self.got_items,
set(['slap%d' % i for i in range(20)]))
self.assertQueueContains(
self.got_args_kwargs,
[(('1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, [])
def test_context_manager_with_exceptions(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i if i % 2 == 0 else 'go boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(10, len(error_strs))
self.assertTrue(all(['Exception: I went boom!' in s for s in
error_strs]))
self.assertEqual(10, self.error_counter[0])
expected_items = set(['go boom'] +
['item%d' % i for i in range(20)
if i % 2 == 0])
self.assertQueueContains(self.got_items, expected_items)
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 10)
def test_context_manager_with_client_exceptions(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i if i % 2 == 0 else 'c boom')
self.assertEqual(self.starting_thread_count, threading.active_count())
error_strs = list(map(str, self.thread_manager.error.call_args_list))
self.assertEqual(10, len(error_strs))
stringification = 'Client Boom: ' \
'http://192.168.22.1:80/booze 404 to much no sir!'
self.assertTrue(all([stringification in s for s in error_strs]))
self.assertEqual(10, self.error_counter[0])
expected_items = set(['c boom'] +
['item%d' % i for i in range(20)
if i % 2 == 0])
self.assertQueueContains(self.got_items, expected_items)
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 10)
def test_context_manager_with_connection_maker(self):
with self.qfq as input_queue:
self.assertEqual(self.starting_thread_count + self.thread_count,
threading.active_count())
for i in range(20):
input_queue.put('item%d' % i)
self.assertEqual(self.starting_thread_count, threading.active_count())
self.assertEqual([], self.thread_manager.error.call_args_list)
self.assertEqual(0, self.error_counter[0])
self.assertQueueContains(self.got_items,
set(['item%d' % i for i in range(20)]))
self.assertQueueContains(
self.got_args_kwargs,
[(('yup, I made a connection', '1arg', '2arg'), {'a': 'b'})] * 20)
self.assertEqual(self.stored_results, ['best result EVAR!'] * 20)
class TestMultiThreadingManager(ThreadTestCase):
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_instantiation(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
self.assertEqual([
mock.call(thread_manager._print, 1, thread_manager),
mock.call(thread_manager._print_error, 1, thread_manager),
], mock_qfq.call_args_list)
# These contexts don't get entered into until the
# MultiThreadingManager's context is entered.
self.assertEqual([], thread_manager.printer.__enter__.call_args_list)
self.assertEqual([],
thread_manager.error_printer.__enter__.call_args_list)
# Test default values for the streams.
self.assertEqual(sys.stdout, thread_manager.print_stream)
self.assertEqual(sys.stderr, thread_manager.error_stream)
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_queue_manager_no_args(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
mock_qfq.reset_mock()
mock_qfq.return_value = 'slap happy!'
self.assertEqual(
'slap happy!',
thread_manager.queue_manager(self._func, 88))
self.assertEqual([
mock.call(self._func, 88, thread_manager, thread_args=(),
thread_kwargs={}, connection_maker=None,
error_counter=None)
], mock_qfq.call_args_list)
@mock.patch('swiftclient.multithreading.QueueFunctionManager')
def test_queue_manager_with_args(self, mock_qfq):
thread_manager = mt.MultiThreadingManager()
mock_qfq.reset_mock()
mock_qfq.return_value = 'do run run'
self.assertEqual(
'do run run',
thread_manager.queue_manager(self._func, 88, 'fun', times='are',
connection_maker='abc', to='be had',
error_counter='def'))
self.assertEqual([
mock.call(self._func, 88, thread_manager, thread_args=('fun',),
thread_kwargs={'times': 'are', 'to': 'be had'},
connection_maker='abc', error_counter='def')
], mock_qfq.call_args_list)
def test_printers(self):
out_stream = six.StringIO()
err_stream = six.StringIO()
with mt.MultiThreadingManager(
print_stream=out_stream,
error_stream=err_stream) as thread_manager:
# Sanity-checking these gives power to the previous test which
# looked at the default values of thread_manager.print/error_stream
self.assertEqual(out_stream, thread_manager.print_stream)
self.assertEqual(err_stream, thread_manager.error_stream)
self.assertEqual(self.starting_thread_count + 2,
threading.active_count())
thread_manager.print_msg('one-argument')
thread_manager.print_msg('one %s, %d fish', 'fish', 88)
thread_manager.error('I have %d problems, but a %s is not one',
99, u'\u062A\u062A')
thread_manager.print_msg('some\n%s\nover the %r', 'where',
u'\u062A\u062A')
thread_manager.error('one-error-argument')
thread_manager.error('Sometimes\n%.1f%% just\ndoes not\nwork!',
3.14159)
self.assertEqual(self.starting_thread_count, threading.active_count())
out_stream.seek(0)
if six.PY3:
over_the = "over the '\u062a\u062a'\n"
else:
over_the = "over the u'\\u062a\\u062a'\n"
self.assertEqual([
'one-argument\n',
'one fish, 88 fish\n',
'some\n', 'where\n', over_the,
], list(out_stream.readlines()))
err_stream.seek(0)
first_item = u'I have 99 problems, but a \u062A\u062A is not one\n'
if six.PY2:
first_item = first_item.encode('utf8')
self.assertEqual([
first_item,
'one-error-argument\n',
'Sometimes\n', '3.1% just\n', 'does not\n', 'work!\n',
], list(err_stream.readlines()))
self.assertEqual(3, thread_manager.error_count)
if __name__ == '__main__':
testtools.main()
|
import concurrent
from concurrent.futures._base import Future
import json
from threading import Barrier
import time
import unittest
import requests_mock
from rpcclient.client import RpcClient
from rpcclient.deserialize import DictDeserializer
from rpcclient.exceptions import RemoteFailedError
from rpcclient.handlers import RequestHandler
from rpcclient.test.testutils import insert_id, create_mock_rpc_client
UNMAPPED_BEHAVIOUR = DictDeserializer.UnmappedBehaviour
__author__ = 'yoav.luft@ajillionmax.com'
class ClientTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
def test_login(self):
self.assertEqual(self.client.token, "yea")
@requests_mock.mock()
def test_get_first_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_get_second_level_method(self, mock):
mock.register_uri('POST', "http://server/api/", status_code=200, json=insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}}),
)
self.client.test.level2(arg1="arg")
request = mock.request_history[-1].json()
self.assertRegex(request['jsonrpc'], '2.0')
self.assertRegex(request['method'], 'test.level2')
self.assertIn('token', request['params'])
self.assertRegex(request['params']['token'], 'yea')
self.assertIn('arg1', request['params'])
self.assertRegex(request['params']['arg1'], 'arg')
@requests_mock.mock()
def test_async_request(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
start_time = time.time()
interval_time = 2
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
self.assertGreater(time.time() - start_time, interval_time, "Expected request to wait between calls")
last_request = mock.request_history[-1].json()
self.assertIn('method', last_request)
self.assertRegex(last_request['method'], 'report.data.get')
self.assertIn('params', last_request)
self.assertIn('report_token', last_request['params'])
self.assertRegex(last_request['params']['report_token'], "08d7d7bc608848668b3afa6b528a45d8")
@requests_mock.mock()
def test_async_timeout(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.assertRaises(TimeoutError, self.client.test.task, _timeout=3, _sleep_interval=2)
@requests_mock.mock()
def test_async_timeout_from_configuration(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
self.client.configuration['timeout'] = 3
self.client.configuration['sleep_interval'] = 2
self.assertRaises(TimeoutError, self.client.test.task)
@requests_mock.mock()
def test_async_handler_ignores_single_failure_for_status(self, mock):
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
interval_time = 1
response = self.client.test.task(_sleep_interval=interval_time)
self.assertEqual(response, {"report": "success"})
def test_override_handlers(self):
called_with_params = {}
class MockHandler(RequestHandler):
def __init__(self, method, url, headers, token, configuration=None, **kwargs):
super().__init__(method, url, headers, token, configuration, **kwargs)
called_with_params['method'] = method
def handle(self, **kwargs):
return 'Mock value'
client = RpcClient(configuration={
'host': 'http://mockhost',
'handlers': [
(lambda *args, **kwargs: True, MockHandler)
],
'login': 'False token',
'username': '',
'password': '',
})
self.assertEqual(client.some.method(arg1='Argument'), 'Mock value')
self.assertEqual(called_with_params['method'], 'some.method')
self.assertEqual(client.token, 'False token')
@requests_mock.mock()
def test_async_can_run_in_different_thread(self, mock):
b = Barrier(2, timeout=5)
def block_response(response_dict):
def callback(request, context):
b.wait()
body = request.body
request_json = json.loads(body)
response_dict['id'] = request_json['id']
context.status_code = 200
return response_dict
return callback
mock.register_uri('POST', "http://server/api/", [
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report_token": "08d7d7bc608848668b3afa6b528a45d8"}})},
{'status_code': 200, 'json': block_response(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "processing"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"status": "ready"}})},
{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test.task(_sleep_interval=0.5, _async=True)
b.wait()
self.assertIsInstance(response, Future)
self.assertTrue(response.running())
done, not_done = concurrent.futures.wait([response], timeout=5)
self.assertGreater(len(done), 0)
self.assertIsInstance(response.result(), dict)
@requests_mock.mock()
def test_return_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
response = self.client.test(arg1="arg")
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_return_list_result(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": [1, 2, 3]})},
])
response = self.client.test(arg1="arg")
self.assertListEqual(response, [1, 2, 3])
@requests_mock.mock()
def test_raises_error_on_none_200(self, mock):
mock.register_uri('POST', "http://server/api/", json=insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
}, status_code=500))
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_response_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": 1, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
@requests_mock.mock()
def test_raises_error_on_result_error(self, mock):
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id({
"error": None, "jsonrpc": "2.0", "id": {},
"result": {"error": "true"}
})}
])
self.assertRaises(RemoteFailedError, self.client.test, arg1="arg1")
class AutoDeserializationTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.client = create_mock_rpc_client()
@requests_mock.mock()
def test_deserializer_passed_in_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
response = self.client.test(_deserializer=result_deserializer)
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_given_in_dictionary_used_just_for_method(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': {
'test': result_deserializer,
}
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
@requests_mock.mock()
def test_deserializer_from_factory(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': lambda method: result_deserializer if method == 'test' else None
})
response = client.test2()
self.assertNotIsInstance(response, Result)
self.assertEqual(response, {"report": "success"})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
@requests_mock.mock()
def test_deserializer_global_from_conf(self, mock):
class Result(object):
def __init__(self, report): self.report = report
mock.register_uri('POST', "http://server/api/",
[{'status_code': 200, 'json': insert_id(
{"error": None, "jsonrpc": "2.0", "id": {},
"result": {"report": "success"}})},
])
result_deserializer = DictDeserializer(Result, unmapped_behaviour=UNMAPPED_BEHAVIOUR.TO_KWARGS)
client = RpcClient(configuration={
'host': 'http://server/',
'login': 'False token',
'username': '',
'password': '',
'deserializers': result_deserializer
})
response = client.test()
self.assertIsInstance(response, Result)
self.assertEqual(response.report, "success")
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.version import compare_versions, format_stack_version
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
from resource_management.core.source import Template
from resource_management.core.logger import Logger
from yarn import yarn
from service import service
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HistoryServer(Script):
def get_component_name(self):
return "hadoop-mapreduce-historyserver"
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
yarn(name="historyserver")
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and compare_versions(format_stack_version(params.version), '4.0.0.0') >= 0:
conf_select.select(params.stack_name, "hadoop", params.version)
stack_select.select("hadoop-mapreduce-historyserver", params.version)
#Execute(format("iop-select set hadoop-mapreduce-historyserver {version}"))
#copy_tarballs_to_hdfs('mapreduce', 'hadoop-mapreduce-historyserver', params.mapred_user, params.hdfs_user, params.user_group)
# MC Hammer said, "Can't touch this"
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
copy_to_hdfs("slider", params.user_group, params.hdfs_user, skip=params.host_sys_prepped)
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
# MC Hammer said, "Can't touch this"
resource_created = copy_to_hdfs(
"mapreduce",
params.user_group,
params.hdfs_user,
skip=params.host_sys_prepped)
resource_created = copy_to_hdfs(
"slider",
params.user_group,
params.hdfs_user,
skip=params.host_sys_prepped) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
service('historyserver', action='start', serviceName='mapreduce')
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
service('historyserver', action='stop', serviceName='mapreduce')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.mapred_historyserver_pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
expectations = {}
expectations.update(build_expectations('mapred-site',
None,
[
'mapreduce.jobhistory.keytab',
'mapreduce.jobhistory.principal',
'mapreduce.jobhistory.webapp.spnego-keytab-file',
'mapreduce.jobhistory.webapp.spnego-principal'
],
None))
security_params = get_params_from_filesystem(status_params.hadoop_conf_dir,
{'mapred-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if ( 'mapred-site' not in security_params or
'mapreduce.jobhistory.keytab' not in security_params['mapred-site'] or
'mapreduce.jobhistory.principal' not in security_params['mapred-site'] or
'mapreduce.jobhistory.webapp.spnego-keytab-file' not in security_params['mapred-site'] or
'mapreduce.jobhistory.webapp.spnego-principal' not in security_params['mapred-site']):
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out(
{"securityIssuesFound": "Keytab file or principal not set."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.mapred_user,
security_params['mapred-site']['mapreduce.jobhistory.keytab'],
security_params['mapred-site']['mapreduce.jobhistory.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.mapred_user,
security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-keytab-file'],
security_params['mapred-site']['mapreduce.jobhistory.webapp.spnego-principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
if __name__ == "__main__":
HistoryServer().execute()
|
"""Functional tests for binary coefficient-wise operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_ADD = lambda x, y: x + y
_SUB = lambda x, y: x - y
_MUL = lambda x, y: x * y
_POW = lambda x, y: x**y
_TRUEDIV = lambda x, y: x / y
_FLOORDIV = lambda x, y: x // y
_MOD = lambda x, y: x % y
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), x_values
def _default_tolerance(dtype):
"""Returns a sensible default tolerance for comparing results of a given type.
Args:
dtype: A datatype.
"""
if dtype == np.float16:
return 5e-3
elif dtype in (np.float32, np.complex64):
return 1e-3
elif dtype in (np.float64, np.complex128):
return 1e-5
else:
return None # Fail fast for unexpected types
class BinaryOpTest(test.TestCase):
def _compareCpu(self, x, y, np_func, tf_func, also_compare_variables=False):
np_ans = np_func(x, y)
with test_util.force_cpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = self.evaluate(tf_func(x, iny))
np_right = self.evaluate(tf_func(inx, y))
if also_compare_variables:
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate(variables.global_variables_initializer())
print(type(x), type(y), type(var_x), type(var_y))
print(type(tf_func(x, var_y)), type(tf_func(var_x, y)))
np_var_left = self.evaluate(tf_func(x, var_y))
np_var_right = self.evaluate(tf_func(var_x, y))
if np_ans.dtype != np.object:
self.assertAllClose(np_ans, tf_cpu)
self.assertAllClose(np_ans, np_left)
self.assertAllClose(np_ans, np_right)
if also_compare_variables:
self.assertAllClose(np_ans, np_var_left)
self.assertAllClose(np_ans, np_var_right)
self.assertShapeEqual(np_ans, out)
_GRAD_TOL = {
dtypes_lib.float16: 1e-3,
dtypes_lib.float32: 1e-3,
dtypes_lib.complex64: 1e-2,
dtypes_lib.float64: 1e-5,
dtypes_lib.complex128: 1e-4
}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inxf, xs, outf, zs, x_init_value=xf, delta=1e-3)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
if numeric_gradient_type is not None:
xf = x.astype(numeric_gradient_type)
yf = y.astype(numeric_gradient_type)
inxf = ops.convert_to_tensor(xf)
inyf = ops.convert_to_tensor(yf)
outf = tf_func(inxf, inyf)
_, jacob_n = gradient_checker.compute_gradient(
inyf, ys, outf, zs, x_init_value=yf)
jacob_n = jacob_n.astype(x.dtype)
tol = self._GRAD_TOL[dtypes_lib.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def _compareBoth(self, x, y, np_func, tf_func, also_compare_variables=False):
self._compareCpu(x, y, np_func, tf_func, also_compare_variables)
if x.dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
if tf_func not in (_FLOORDIV, math_ops.floordiv, math_ops.zeta,
math_ops.polygamma):
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
if tf_func in (math_ops.zeta, math_ops.polygamma):
# These methods only support gradients in the second parameter
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
@test_util.run_deprecated_v1
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(x, y, np.add, math_ops.add, also_compare_variables=True)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(5, 6).astype(np.float32)
x2 = np.random.randn(5, 6).astype(np.float32)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.05] = 0.05 * np.sign(x1[np.abs(x1) < 0.05])
x2[np.abs(x2) < 0.05] = 0.05 * np.sign(x2[np.abs(x2) < 0.05])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
# Need x > 1
self._compareBoth(x_pos_small + 1, a_pos_small, special.zeta,
math_ops.zeta)
n_small = np.arange(0, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(n_small, x_pos_small, special.polygamma,
math_ops.polygamma)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
@test_util.run_deprecated_v1
def testFloatDifferentShapes(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.float32)
y = np.array([1, 2]).reshape(2, 1).astype(np.float32)
with self.cached_session() as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
s = math_ops.reduce_sum(inx * iny)
gx, gy = sess.run(gradients_impl.gradients(s, [inx, iny]))
# gx is simply the broadcasted y
self.assertAllEqual(gx,
np.array([1, 1, 2, 2]).reshape(2, 2).astype(np.float32))
# gy is x's column summed up
self.assertAllEqual(gy, np.array([3, 7]).reshape(2, 1).astype(np.float32))
def testFloatVariableOverload(self):
x = np.array([1, 2, 3, 4]).reshape(2, 2).astype(np.int32)
y = np.array([1, 2]).reshape(2, 1).astype(np.int32)
var_x = variables.Variable(x)
var_y = variables.Variable(y)
self.evaluate([var_x.initializer, var_y.initializer])
left_result = self.evaluate(var_x * y)
right_result = self.evaluate(x * var_y)
np_result = x * y
self.assertAllEqual(np_result, left_result)
self.assertAllEqual(np_result, right_result)
@test_util.run_deprecated_v1
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
self._compareBoth(x, y + 0.1, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.arctan2, math_ops.atan2)
x1 = np.random.randn(7, 4).astype(np.float64)
x2 = np.random.randn(7, 4).astype(np.float64)
# Remove tiny values--atan2 gradients are flaky near the origin.
x1[np.abs(x1) < 0.5] = 0.5 * np.sign(x1[np.abs(x1) < 0.5])
x2[np.abs(x2) < 0.5] = 0.5 * np.sign(x2[np.abs(x2) < 0.5])
self._compareBoth(x1, x2, np.arctan2, math_ops.atan2)
try:
from scipy import special # pylint: disable=g-import-not-at-top
a_pos_small = np.linspace(0.1, 2, 15).reshape(1, 3, 5).astype(np.float32)
x_pos_small = np.linspace(0.1, 10, 15).reshape(1, 3, 5).astype(np.float32)
self._compareBoth(a_pos_small, x_pos_small, special.gammainc,
math_ops.igamma)
self._compareBoth(a_pos_small, x_pos_small, special.gammaincc,
math_ops.igammac)
except ImportError as e:
tf_logging.warn("Cannot test special functions: %s" % str(e))
def testUint8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint8)
self._compareBoth(x, y, np.add, math_ops.add)
def testInt8Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int8)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int8)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testInt16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
def testUint16Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint16)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint16)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
def testInt32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.int32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int32)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
# _compareBoth tests on GPU only for floating point types, so test
# _MOD for int32 on GPU by calling _compareGpu
self._compareGpu(x, y, np.mod, _MOD)
def testUint32Basic(self):
x = np.arange(1, 13, 2).reshape(1, 3, 2).astype(np.uint32)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.uint32)
self._compareBoth(x, y, np.add, math_ops.add_v2)
def testInt64Basic(self):
x = np.arange(1 << 40, 13 << 40, 2 << 40).reshape(1, 3, 2).astype(np.int64)
y = np.arange(1, 7, 1).reshape(1, 3, 2).astype(np.int64)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.floor_divide, math_ops.floordiv)
self._compareBoth(x, y, np.mod, math_ops.mod)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y, np.true_divide, _TRUEDIV)
self._compareBoth(x, y, np.floor_divide, _FLOORDIV)
self._compareBoth(x, y, np.mod, _MOD)
@test_util.run_deprecated_v1
def testComplex64Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex64)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex64)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
@test_util.run_deprecated_v1
def testComplex128Basic(self):
x = np.complex(1, 1) * np.linspace(-10, 10, 6).reshape(1, 3, 2).astype(
np.complex128)
y = np.complex(1, 1) * np.linspace(20, -20, 6).reshape(1, 3, 2).astype(
np.complex128)
self._compareBoth(x, y, np.add, math_ops.add)
self._compareBoth(x, y, np.subtract, math_ops.subtract)
self._compareBoth(x, y, np.multiply, math_ops.multiply)
self._compareBoth(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareBoth(x, y, np.add, _ADD)
self._compareBoth(x, y, np.subtract, _SUB)
self._compareBoth(x, y, np.multiply, _MUL)
self._compareBoth(x, y + 0.1, np.true_divide, _TRUEDIV)
def testStringComparison(self):
x = np.array([["abc", "bh"], ["c", ""]])
y = np.array([["abc", "bh"], ["def", "hi"]])
with test_util.force_cpu():
cmp_eq = math_ops.equal(x, y)
cmp_not_eq = math_ops.not_equal(x, y)
values = self.evaluate([cmp_eq, cmp_not_eq])
self.assertAllEqual([[True, True], [False, False]], values[0])
self.assertAllEqual([[False, False], [True, True]], values[1])
def testString(self):
x = np.array([["x_0_0", "x_0_1", "x_0_2"], ["x_1_0", "x_1_1", "x_1_2"],
["x_2_0", "x_2_1", "x_2_2"]],
dtype=np.object)
y = np.array([["y_0_0", "y_0_1", "y_0_2"], ["y_1_0", "y_1_1", "y_1_2"],
["y_2_0", "y_2_1", "y_2_2"]],
dtype=np.object)
z = np.array([["z_0", "z_1", "z_2"]], dtype=np.object)
w = np.array("w", dtype=np.object)
self._compareCpu(x, y, _ADD, _ADD)
self._compareCpu(x, z, _ADD, _ADD)
self._compareCpu(x, w, _ADD, _ADD)
self._compareCpu(z, w, _ADD, _ADD)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
if dtype in (np.complex64, np.complex128):
x = (1 + np.linspace(0, 2 + 3j, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 2 - 2j, np.prod(ys))).astype(dtype).reshape(ys)
else:
x = (1 + np.linspace(0, 5, np.prod(xs))).astype(dtype).reshape(xs)
y = (1 + np.linspace(0, 5, np.prod(ys))).astype(dtype).reshape(ys)
self._compareCpu(x, y, np_func, tf_func)
if x.dtype in (np.float16, np.float32, np.float64):
# TODO(aselle): Make the test work for dtypes:
# (np.complex64, np.complex128).
if tf_func not in (_FLOORDIV, math_ops.floordiv):
if x.dtype == np.float16:
# Compare fp16 theoretical gradients to fp32 numerical gradients,
# since fp16 numerical gradients are too imprecise unless great
# care is taken with choosing the inputs and the delta. This is
# a weaker check (in particular, it does not test the op itself,
# only its gradient), but it's much better than nothing.
self._compareGradientX(x, y, np_func, tf_func, np.float)
self._compareGradientY(x, y, np_func, tf_func, np.float)
else:
self._compareGradientX(x, y, np_func, tf_func)
self._compareGradientY(x, y, np_func, tf_func)
self._compareGpu(x, y, np_func, tf_func)
# TODO(josh11b,vrv): Refactor this to use parameterized tests.
def _testBCastByFunc(self, funcs, xs, ys):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
np.complex64,
np.complex128,
]
for dtype in dtypes:
for (np_func, tf_func) in funcs:
if (dtype in (np.complex64, np.complex128) and
tf_func in (_FLOORDIV, math_ops.floordiv)):
continue # floordiv makes no sense for complex numbers
self._compareBCast(xs, ys, dtype, np_func, tf_func)
self._compareBCast(ys, xs, dtype, np_func, tf_func)
def _testBCastA(self, xs, ys):
funcs = [
(np.add, math_ops.add),
(np.add, _ADD),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastB(self, xs, ys):
funcs = [
(np.subtract, math_ops.subtract),
(np.subtract, _SUB),
(np.power, math_ops.pow),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastC(self, xs, ys):
funcs = [
(np.multiply, math_ops.multiply),
(np.multiply, _MUL),
]
self._testBCastByFunc(funcs, xs, ys)
def _testBCastD(self, xs, ys):
funcs = [
(np.true_divide, math_ops.truediv),
(np.floor_divide, math_ops.floordiv),
(np.true_divide, _TRUEDIV),
(np.floor_divide, _FLOORDIV),
]
self._testBCastByFunc(funcs, xs, ys)
@test_util.run_deprecated_v1
def testBCast_0A(self):
self._testBCastA([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0B(self):
self._testBCastB([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0C(self):
self._testBCastC([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_0D(self):
self._testBCastD([1, 3, 2], [1])
@test_util.run_deprecated_v1
def testBCast_1A(self):
self._testBCastA([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1B(self):
self._testBCastB([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1C(self):
self._testBCastC([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_1D(self):
self._testBCastD([1, 3, 2], [2])
@test_util.run_deprecated_v1
def testBCast_2A(self):
self._testBCastA([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2B(self):
self._testBCastB([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2C(self):
self._testBCastC([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_2D(self):
self._testBCastD([1, 3, 2], [3, 2])
@test_util.run_deprecated_v1
def testBCast_3A(self):
self._testBCastA([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3B(self):
self._testBCastB([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3C(self):
self._testBCastC([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_3D(self):
self._testBCastD([1, 3, 2], [3, 1])
@test_util.run_deprecated_v1
def testBCast_4A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_4D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_5A(self):
self._testBCastA([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5B(self):
self._testBCastB([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5C(self):
self._testBCastC([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_5D(self):
self._testBCastD([1, 3, 2], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_6A(self):
self._testBCastA([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6B(self):
self._testBCastB([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6C(self):
self._testBCastC([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_6D(self):
self._testBCastD([1, 3, 2], [2, 1, 1])
@test_util.run_deprecated_v1
def testBCast_7A(self):
self._testBCastA([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7B(self):
self._testBCastB([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7C(self):
self._testBCastC([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_7D(self):
self._testBCastD([1, 3, 2], [1, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8A(self):
self._testBCastA([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8B(self):
self._testBCastB([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8C(self):
self._testBCastC([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_8D(self):
self._testBCastD([2, 1, 5], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_9A(self):
self._testBCastA([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9B(self):
self._testBCastB([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9C(self):
self._testBCastC([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_9D(self):
self._testBCastD([2, 0, 5], [2, 0, 1])
@test_util.run_deprecated_v1
def testBCast_10A(self):
self._testBCastA([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10B(self):
self._testBCastB([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10C(self):
self._testBCastC([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_10D(self):
self._testBCastD([2, 3, 0], [2, 3, 1])
@test_util.run_deprecated_v1
def testBCast_11A(self):
self._testBCastA([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11B(self):
self._testBCastB([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11C(self):
self._testBCastC([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_11D(self):
self._testBCastD([1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12A(self):
self._testBCastA([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12B(self):
self._testBCastB([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12C(self):
self._testBCastC([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_12D(self):
self._testBCastD([1, 1, 1, 1, 3, 2], [1, 3, 2])
@test_util.run_deprecated_v1
def testBCast_13A(self):
self._testBCastA([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13B(self):
self._testBCastB([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13C(self):
self._testBCastC([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_13D(self):
self._testBCastD([1, 3, 2, 1, 1], [1])
@test_util.run_deprecated_v1
def testBCast_14A(self):
self._testBCastA([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14B(self):
self._testBCastB([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14C(self):
self._testBCastC([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_14D(self):
self._testBCastD([2, 3, 1, 1, 5], [1])
@test_util.run_deprecated_v1
def testBCast_15A(self):
self._testBCastA([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15B(self):
self._testBCastB([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15C(self):
self._testBCastC([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testBCast_15D(self):
self._testBCastD([10, 3, 1, 2], [3, 1, 2])
@test_util.run_deprecated_v1
def testMismatchedDimensions(self):
for func in [
math_ops.add, math_ops.subtract, math_ops.multiply, math_ops.div, _ADD,
_SUB, _MUL, _TRUEDIV, _FLOORDIV
]:
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Dimensions must" in str(e)):
func(
ops.convert_to_tensor([10.0, 20.0, 30.0]),
ops.convert_to_tensor([[40.0, 50.0], [60.0, 70.0]]))
@test_util.run_deprecated_v1
def testZeroPowGrad(self):
with self.cached_session():
for dtype in (np.float16, np.float32, np.float64, np.complex64,
np.complex128):
x = constant_op.constant(0.0, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertEqual(error, 0)
@test_util.run_deprecated_v1
def testComplexPowGrad(self):
with self.cached_session():
for dtype in np.complex64, np.complex128:
for base in 2.0, -2.0:
x = constant_op.constant(base, dtype=dtype)
y = constant_op.constant(2.0, dtype=dtype)
z = math_ops.pow(x, y)
error = gradient_checker.compute_gradient_error(y, [], z, [])
self.assertLess(error, 2e-4)
def testAtan2SpecialValues(self):
x1l, x2l = zip((+0.0, +0.0), (+0.0, -0.0), (-0.0, +0.0), (-0.0, -0.0),
(1.2345, float("inf")), (1.2345, -float("inf")),
(-4.321, float("inf")), (-4.125, -float("inf")),
(float("inf"), float("inf")), (float("inf"), -float("inf")),
(-float("inf"), float("inf")),
(-float("inf"), -float("inf")))
for dtype in np.float32, np.float64:
x1 = np.array(x1l).astype(dtype)
x2 = np.array(x2l).astype(dtype)
self._compareCpu(x1, x2, np.arctan2, math_ops.atan2)
self._compareGpu(x1, x2, np.arctan2, math_ops.atan2)
def testPowNegativeExponent(self):
for dtype in [np.int32, np.int64]:
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([-2, 3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = np.array([2, -3]).astype(dtype)
self.evaluate(math_ops.pow(x, y))
with test_util.force_cpu():
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
"Integers to negative integer powers are not allowed"):
x = np.array([5, 2]).astype(dtype)
y = -3
self.evaluate(math_ops.pow(x, y))
class ComparisonOpTest(test.TestCase):
def _compareScalar(self, func, x, y, dtype):
with test_util.use_gpu():
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
data = [-1, 0, 1]
for t in dtypes:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.less, x, y, t), x < y)
self.assertEqual(
self._compareScalar(math_ops.less_equal, x, y, t), x <= y)
self.assertEqual(
self._compareScalar(math_ops.greater, x, y, t), x > y)
self.assertEqual(
self._compareScalar(math_ops.greater_equal, x, y, t), x >= y)
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
data = [-1, 0, 1, -1j, 1j, 1 + 1j, 1 - 1j]
for t in [np.complex64, np.complex128]:
for x in data:
for y in data:
self.assertEqual(self._compareScalar(math_ops.equal, x, y, t), x == y)
self.assertEqual(
self._compareScalar(math_ops.not_equal, x, y, t), x != y)
def _compare(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with test_util.use_gpu():
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
x = np.linspace(-15, 15, 6).reshape(1, 3, 2)
y = np.linspace(20, -10, 6).reshape(1, 3, 2)
for t in [np.float16, np.float32, np.float64, np.int32, np.int64]:
xt = x.astype(t)
yt = y.astype(t)
self._compare(xt, yt, np.less, math_ops.less)
self._compare(xt, yt, np.less_equal, math_ops.less_equal)
self._compare(xt, yt, np.greater, math_ops.greater)
self._compare(xt, yt, np.greater_equal, math_ops.greater_equal)
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
# Complex types do not support ordering but do support equality tests.
for t in [np.complex64, np.complex128]:
xt = x.astype(t)
xt -= 1j * xt
yt = y.astype(t)
yt -= 1j * yt
self._compare(xt, yt, np.equal, math_ops.equal)
self._compare(xt, yt, np.not_equal, math_ops.not_equal)
def _compareBCast(self, xs, ys, dtype, np_func, tf_func):
x = np.linspace(-15, 15, np.prod(xs)).astype(dtype).reshape(xs)
y = np.linspace(20, -10, np.prod(ys)).astype(dtype).reshape(ys)
if dtype in (np.complex64, np.complex128):
x -= 1j * x
y -= 1j * y
self._compare(x, y, np_func, tf_func)
self._compare(y, x, np_func, tf_func)
def _testBCastByFunc(self, np_func, tf_func, include_complex=False):
shapes = [
([1, 3, 2], [1]),
([1, 3, 2], [2]),
([1, 3, 2], [3, 2]),
([1, 3, 2], [3, 1]),
([1, 3, 2], [1, 3, 2]),
([1, 3, 2], [2, 3, 1]),
([1, 3, 2], [2, 1, 1]),
([1, 3, 2], [1, 3, 1]),
([2, 1, 5], [2, 3, 1]),
([2, 0, 5], [2, 0, 1]),
([2, 3, 0], [2, 3, 1]),
]
dtypes = [
np.float16,
np.float32,
np.float64,
np.int32,
np.int64,
]
if include_complex:
dtypes.extend([np.complex64, np.complex128])
for (xs, ys) in shapes:
for dtype in dtypes:
self._compareBCast(xs, ys, dtype, np_func, tf_func)
def testBCastLess(self):
self._testBCastByFunc(np.less, math_ops.less)
def testBCastLessEqual(self):
self._testBCastByFunc(np.less_equal, math_ops.less_equal)
def testBCastGreater(self):
self._testBCastByFunc(np.greater, math_ops.greater)
def testBCastGreaterEqual(self):
self._testBCastByFunc(np.greater_equal, math_ops.greater_equal)
def testBCastEqual(self):
self._testBCastByFunc(np.equal, math_ops.equal, include_complex=True)
def testBCastNotEqual(self):
self._testBCastByFunc(
np.not_equal, math_ops.not_equal, include_complex=True)
def testShapeMismatch(self):
dtypes = [np.float16, np.float32, np.float64, np.int32, np.int64]
funcs = [
math_ops.less, math_ops.less_equal, math_ops.greater,
math_ops.greater_equal, math_ops.equal, math_ops.not_equal
]
x = np.arange(0, 10).reshape([2, 5])
y = np.arange(0, 10).reshape([5, 2])
for t in dtypes:
for f in funcs:
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
"Incompatible shapes|Dimensions must be equal"):
f(x.astype(t), y.astype(t))
def testEqualDType(self):
dtypes = [
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.bool,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype)
yt = y.astype(dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
for dtype in [np.complex64, np.complex128]:
xt = x.astype(dtype)
xt -= 1j * xt
yt = y.astype(dtype)
yt -= 1j * yt
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
def testEqualQuantizeDType(self):
dtypes = [
dtypes_lib.qint8,
dtypes_lib.qint16,
dtypes_lib.quint8,
dtypes_lib.quint16,
]
x = np.asarray([0, 1, 2, 3, 4])
y = np.asarray([0, 1, 2, 3, 4])
for dtype in dtypes:
xt = x.astype(dtype.as_numpy_dtype)
yt = y.astype(dtype.as_numpy_dtype)
cmp_eq = math_ops.equal(xt, yt)
cmp_ne = math_ops.not_equal(xt, yt)
values = self.evaluate([cmp_eq, cmp_ne])
self.assertAllEqual(
[[True, True, True, True, True], [False, False, False, False, False]],
values)
if __name__ == "__main__":
test.main()
|
import argparse, sys
from confluent_kafka import avro, KafkaError
from confluent_kafka.admin import AdminClient, NewTopic
from uuid import uuid4
name_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Name",
"type": "record",
"fields": [
{"name": "name", "type": "string"}
]
}
"""
class Name(object):
"""
Name stores the deserialized Avro record for the Kafka key.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["name", "id"]
def __init__(self, name=None):
self.name = name
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_name(obj, ctx):
return Name(obj['name'])
@staticmethod
def name_to_dict(name, ctx):
return Name.to_dict(name)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(name=self.name)
count_schema = """
{
"namespace": "io.confluent.examples.clients.cloud",
"name": "Count",
"type": "record",
"fields": [
{"name": "count", "type": "int"}
]
}
"""
class Count(object):
"""
Count stores the deserialized Avro record for the Kafka value.
"""
# Use __slots__ to explicitly declare all data members.
__slots__ = ["count", "id"]
def __init__(self, count=None):
self.count = count
# Unique id used to track produce request success/failures.
# Do *not* include in the serialized object.
self.id = uuid4()
@staticmethod
def dict_to_count(obj, ctx):
return Count(obj['count'])
@staticmethod
def count_to_dict(count, ctx):
return Count.to_dict(count)
def to_dict(self):
"""
The Avro Python library does not support code generation.
For this reason we must provide a dict representation of our class for serialization.
"""
return dict(count=self.count)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Confluent Python Client example to produce messages \
to Confluent Cloud")
parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
required.add_argument('-f',
dest="config_file",
help="path to Confluent Cloud configuration file",
required=True)
required.add_argument('-t',
dest="topic",
help="topic name",
required=True)
args = parser.parse_args()
return args
def read_ccloud_config(config_file):
"""Read Confluent Cloud configuration for librdkafka clients"""
conf = {}
with open(config_file) as fh:
for line in fh:
line = line.strip()
if len(line) != 0 and line[0] != "#":
parameter, value = line.strip().split('=', 1)
conf[parameter] = value.strip()
#conf['ssl.ca.location'] = certifi.where()
return conf
def pop_schema_registry_params_from_config(conf):
"""Remove potential Schema Registry related configurations from dictionary"""
conf.pop('schema.registry.url', None)
conf.pop('basic.auth.user.info', None)
conf.pop('basic.auth.credentials.source', None)
return conf
def create_topic(conf, topic):
"""
Create a topic if needed
Examples of additional admin API functionality:
https://github.com/confluentinc/confluent-kafka-python/blob/master/examples/adminapi.py
"""
admin_client_conf = pop_schema_registry_params_from_config(conf.copy())
a = AdminClient(admin_client_conf)
fs = a.create_topics([NewTopic(
topic,
num_partitions=1,
replication_factor=3
)])
for topic, f in fs.items():
try:
f.result() # The result itself is None
print("Topic {} created".format(topic))
except Exception as e:
# Continue if error code TOPIC_ALREADY_EXISTS, which may be true
# Otherwise fail fast
if e.args[0].code() != KafkaError.TOPIC_ALREADY_EXISTS:
print("Failed to create topic {}: {}".format(topic, e))
sys.exit(1)
|
from django.shortcuts import redirect
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.decorators import login_required
from registration.backends import get_backend
def register(request, backend='default', template_name='registration/registration_form.html'):
backend = get_backend(backend)
# determine is registration is currently allowed. the ``request`` object
# is passed which can be used to selectively disallow registration based on
# the user-agent
if not backend.registration_allowed(request):
return redirect(*backend.registration_closed_redirect(request))
form_class = backend.get_registration_form_class(request)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
user = backend.register(request, form)
return redirect(backend.post_registration_redirect(request, user))
else:
form = form_class()
return render(request, template_name, {'form': form})
@never_cache
def verify(request, backend='default', template_name='registration/registration_verify.html', **kwargs):
backend = get_backend(backend)
profile = backend.get_profile(request, **kwargs)
if profile:
# check to see if moderation for this profile is required and whether or
# not it is a verified account.
if backend.moderation_required(request, profile):
moderation_required = True
backend.verify(request, profile, **kwargs)
else:
moderation_required = False
# attempt to activate this user
backend.activate(request, profile, **kwargs)
else:
moderation_required = None
return render(request, template_name, {
'profile': profile,
'moderation_required': moderation_required,
})
@never_cache
@login_required()
def moderate(request, backend='default', template_name='registration/registration_moderate.html', **kwargs):
backend = get_backend(backend)
profile = backend.get_profile(request, **kwargs)
form_class = backend.get_moderation_form_class(request)
if request.method == 'POST':
form = form_class(request.POST)
if form.is_valid():
backend.moderate(request, form, profile, **kwargs)
return redirect(backend.post_moderation_redirect(request, profile))
else:
form = form_class()
return render(request, template_name, {
'form': form,
'profile': profile,
})
@permission_required('registration.change_registrationprofile')
@login_required()
def moderate_list(request, backend='default', template_name='registration/registration_moderate_list.html'):
backend = get_backend(backend)
profiles = backend.get_unmoderated_profiles(request)
return render(request, template_name, {
'profiles': profiles,
})
|
import happybase
from StringIO import StringIO
from PIL import Image
def decode_image_PIL(binary_data):
""" Returns PIL image from binary buffer.
"""
f = StringIO(binary_data)
img = Image.open(f)
return img
if __name__=="__main__":
tab_image = 'image_cache'
col_image = dict()
col_image['image_cache'] = 'image:binary'
conn = happybase.Connection(host='10.1.94.57')
image_rows = dict()
image_rows['image_cache'] = ['0000007031E3FA80C97940017253BEAB542EA334', '000001EC5DD154E58B72326EFC26A41A4C8E9586',
'0000081A1D6D1A2023DAE07547C242ED3106E7FE']
table = conn.table(tab_image)
for row in table.rows(image_rows[tab_image]):
binary_data = row[1][col_image[tab_image]]
img = decode_image_PIL(binary_data)
print("Saving image to: {}".format(row[0]+'.jpeg'))
img.save(row[0]+'.jpeg',"JPEG")
|
import os
import json
import tempfile
import urllib, urllib2
import requests
from indra.java_vm import autoclass, JavaException
import indra.databases.pmc_client as pmc_client
from processor import ReachProcessor
def process_pmc(pmc_id):
xml_str = pmc_client.get_xml(pmc_id)
with tempfile.NamedTemporaryFile() as fh:
fh.write(xml_str)
fh.flush()
rp = process_nxml(fh.name)
return rp
def process_text(txt, use_tempdir=False, offline=False):
if offline:
nxml_txt = '<article><body><sec><p>%s</p></sec></body></article>' % txt
tmp_file = tempfile.NamedTemporaryFile()
tmp_file.file.write(nxml_txt)
tmp_file.file.flush()
return process_nxml(tmp_file.name)
else:
url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/text'
req = urllib2.Request(url, data=urllib.urlencode({'text': txt}))
res = urllib2.urlopen(req)
json_str = res.read()
json_dict = json.loads(json_str)
events_dict = json_dict['events']
events_json_str = json.dumps(events_dict, indent=1)
with open('reach_output.json', 'wt') as fh:
fh.write(json_str)
return process_json_str(events_json_str)
def process_nxml(file_name, use_tempdir=False, offline=False):
if offline:
base = os.path.basename(file_name)
file_id = os.path.splitext(base)[0]
if use_tempdir:
tmp_dir = tempfile.mkdtemp()
else:
tmp_dir = '.'
try:
paper_reader = autoclass('edu.arizona.sista.reach.ReadPaper')
paper_reader.main([file_name, tmp_dir])
except JavaException:
print 'Could not process file %s.' % file_name
return None
json_file_name = os.path.join(tmp_dir, file_id + '.uaz.events.json')
return process_json_file(json_file_name)
else:
url = 'http://agathon.sista.arizona.edu:8080/odinweb/api/nxml'
txt = open(file_name, 'rt').read()
req = urllib2.Request(url, data=urllib.urlencode({'nxml': txt}))
res = urllib2.urlopen(req)
json_str = res.read()
json_dict = json.loads(json_str)
return process_json_str(json_str, events_only=False)
def process_json_file(file_name):
try:
with open(file_name, 'rt') as fh:
json_str = fh.read()
return process_json_str(json_str)
except IOError:
print 'Could not read file %s.' % file_name
def process_json_str(json_str, events_only=True):
if not events_only:
json_dict = json.loads(json_str)
events_dict = json_dict['events']
events_json_str = json.dumps(events_dict, indent=1)
else:
events_json_str = json_str
events_json_str = events_json_str.replace('frame-id','frame_id')
events_json_str = events_json_str.replace('argument-label','argument_label')
events_json_str = events_json_str.replace('object-meta','object_meta')
events_json_str = events_json_str.replace('doc-id','doc_id')
json_dict = json.loads(events_json_str)
rp = ReachProcessor(json_dict)
rp.get_phosphorylation()
rp.get_complexes()
return rp
if __name__ == '__main__':
rp = process_json_file('PMC0000001.uaz.events.json')
|
import copy
import re
import sys
import tempfile
import unittest
from mock.tests.support import ALWAYS_EQ
from mock.tests.support import is_instance
from mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, AsyncMock,
create_autospec, mock
)
from mock.mock import _Call, _CallList
import mock.mock as mock_module
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None): pass
@classmethod
def cmeth(cls, a, b, c, d=None): pass
@staticmethod
def smeth(a, b, c, d=None): pass
def something(a): pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from mock.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_change_return_value_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.return_value = 1
self.assertEqual(mock(), 1)
def test_change_side_effect_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.side_effect = TypeError()
with self.assertRaises(TypeError):
mock()
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f(): pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
def test_autospec_side_effect_exception(self):
# Test for issue 23661
def f(): pass
mock = create_autospec(f)
mock.side_effect = ValueError('Bazinga!')
self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_reset_mock_on_mock_open_issue_18622(self):
a = mock.mock_open()
a.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incorrect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args.args, (sentinel.Arg,),
"call_args not set")
self.assertEqual(mock.call_args.kwargs, {},
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
self.assertEqual(mock.call_args.args, (sentinel.Arg,))
self.assertEqual(mock.call_args.kwargs, {"kw": sentinel.Kwarg})
# Comparing call_args to a long sequence should not raise
# an exception. See issue 24857.
self.assertFalse(mock.call_args == "a long sequence")
def test_calls_equal_with_any(self):
# Check that equality and non-equality is consistent even when
# comparing with mock.ANY
mm = mock.MagicMock()
self.assertTrue(mm == mm)
self.assertFalse(mm != mm)
self.assertFalse(mm == mock.MagicMock())
self.assertTrue(mm != mock.MagicMock())
self.assertTrue(mm == mock.ANY)
self.assertFalse(mm != mock.ANY)
self.assertTrue(mock.ANY == mm)
self.assertFalse(mock.ANY != mm)
self.assertTrue(mm == ALWAYS_EQ)
self.assertFalse(mm != ALWAYS_EQ)
call1 = mock.call(mock.MagicMock())
call2 = mock.call(mock.ANY)
self.assertTrue(call1 == call2)
self.assertFalse(call1 != call2)
self.assertTrue(call2 == call1)
self.assertFalse(call2 != call1)
self.assertTrue(call1 == ALWAYS_EQ)
self.assertFalse(call1 != ALWAYS_EQ)
self.assertFalse(call1 == 1)
self.assertTrue(call1 != 1)
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_any(self):
m = MagicMock()
m(MagicMock())
m.assert_called_with(mock.ANY)
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_exception_message(self):
msg = "Expected '{0}' to have been called"
with self.assertRaisesRegex(AssertionError, msg.format('mock')):
Mock().assert_called()
with self.assertRaisesRegex(AssertionError, msg.format('test_name')):
Mock(name="test_name").assert_called()
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_call_list(self):
m = Mock()
m(1)
m(2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1), call(2)]"),
lambda: m.assert_called_once_with(2))
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self): pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_prevents_automatic_creation_of_mocks(self):
class Real(object):
pass
real = Real()
mock = Mock(wraps=real)
self.assertRaises(AttributeError, lambda: mock.new_attr())
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_customize_wrapped_object_with_side_effect_iterable_with_default(self):
class Real(object):
def method(self):
return sentinel.ORIGINAL_VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.ORIGINAL_VALUE)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_iterable(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_exception(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = RuntimeError
self.assertRaises(RuntimeError, mock.method)
def test_customize_wrapped_object_with_side_effect_function(self):
class Real(object):
def method(self): pass
def side_effect():
return sentinel.VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = side_effect
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect(self):
# side_effect should always take precedence over return_value.
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
mock.method.return_value = sentinel.WRONG_VALUE
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_return_value_and_side_effect2(self):
# side_effect can return DEFAULT to default to return_value
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = lambda: DEFAULT
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect_default(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
mock.method.return_value = sentinel.RETURN
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.RETURN)
self.assertRaises(StopIteration, mock.method)
def test_magic_method_wraps_dict(self):
# bpo-25597: MagicMock with wrap doesn't call wrapped object's
# method for magic methods with default values.
data = {'foo': 'bar'}
wrapped_dict = MagicMock(wraps=data)
self.assertEqual(wrapped_dict.get('foo'), 'bar')
# Accessing key gives a MagicMock
self.assertIsInstance(wrapped_dict['foo'], MagicMock)
# __contains__ method has a default value of False
self.assertFalse('foo' in wrapped_dict)
# return_value is non-sentinel and takes precedence over wrapped value.
wrapped_dict.get.return_value = 'return_value'
self.assertEqual(wrapped_dict.get('foo'), 'return_value')
# return_value is sentinel and hence wrapped value is returned.
wrapped_dict.get.return_value = sentinel.DEFAULT
self.assertEqual(wrapped_dict.get('foo'), 'bar')
self.assertEqual(wrapped_dict.get('baz'), None)
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
data['baz'] = 'spam'
self.assertEqual(wrapped_dict.get('baz'), 'spam')
self.assertIsInstance(wrapped_dict['baz'], MagicMock)
self.assertFalse('bar' in wrapped_dict)
del data['baz']
self.assertEqual(wrapped_dict.get('baz'), None)
def test_magic_method_wraps_class(self):
class Foo:
def __getitem__(self, index):
return index
def __custom_method__(self):
return "foo"
klass = MagicMock(wraps=Foo)
obj = klass()
self.assertEqual(obj.__getitem__(2), 2)
self.assertEqual(obj[2], 2)
self.assertEqual(obj.__custom_method__(), "foo")
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_spec_class_no_object_base(self):
class X:
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
self._increment_mock_call(a)
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_dir_does_not_include_deleted_attributes(self):
mock = Mock()
mock.child.return_value = 1
self.assertIn('child', dir(mock))
del mock.child
self.assertNotIn('child', dir(mock))
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
with self.assertRaises(exception) as context:
func(*args, **kwargs)
msg = str(context.exception)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
actual = 'not called.'
expected = "mock(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_child_mock_call_equal(self):
m = Mock()
result = m()
result.wibble()
# parent looks like this:
self.assertEqual(m.mock_calls, [call(), call().wibble()])
# but child should look like this:
self.assertEqual(result.mock_calls, [call.wibble()])
def test_mock_call_not_equal_leaf(self):
m = Mock()
m.foo().something()
self.assertNotEqual(m.mock_calls[1], call.foo().different())
self.assertEqual(m.mock_calls[0], call.foo())
def test_mock_call_not_equal_non_leaf(self):
m = Mock()
m.foo().bar()
self.assertNotEqual(m.mock_calls[1], call.baz().bar())
self.assertNotEqual(m.mock_calls[0], call.baz())
def test_mock_call_not_equal_non_leaf_params_different(self):
m = Mock()
m.foo(x=1).bar()
# This isn't ideal, but there's no way to fix it without breaking backwards compatibility:
self.assertEqual(m.mock_calls[1], call.foo(x=2).bar())
def test_mock_call_not_equal_non_leaf_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.baz.bar())
def test_mock_call_not_equal_non_leaf_call_versus_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.foo().bar())
def test_mock_call_repr(self):
m = Mock()
m.foo().bar().baz.bob()
self.assertEqual(repr(m.mock_calls[0]), 'call.foo()')
self.assertEqual(repr(m.mock_calls[1]), 'call.foo().bar()')
self.assertEqual(repr(m.mock_calls[2]), 'call.foo().bar().baz.bob()')
def test_mock_call_repr_loop(self):
m = Mock()
m.foo = m
repr(m.foo())
self.assertRegex(repr(m.foo()), r"<Mock name='mock\(\)' id='\d+'>")
def test_mock_calls_contains(self):
m = Mock()
self.assertFalse([call()] in m.mock_calls)
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
self.assertEqual(mock.call_args.args, (2,))
self.assertEqual(mock.call_args.kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_nested_spec(self):
class Something:
def __init__(self): pass
def meth(self, a, b, c, d=None): pass
class Foo:
def __init__(self, a): pass
def meth1(self, a, b): pass
mock_class = create_autospec(Something)
for m in [mock_class, mock_class()]:
m.meth(1, 2, 3, d=1)
m.assert_has_calls([call.meth(1, 2, 3, d=1)])
m.assert_has_calls([call.meth(1, 2, 3, 1)])
mock_class.reset_mock()
for m in [mock_class, mock_class()]:
self.assertRaises(AssertionError, m.assert_has_calls, [call.Foo()])
m.Foo(1).meth1(1, 2)
m.assert_has_calls([call.Foo(1), call.Foo(1).meth1(1, 2)])
m.Foo.assert_has_calls([call(1), call().meth1(1, 2)])
mock_class.reset_mock()
invalid_calls = [call.meth(1),
call.non_existent(1),
call.Foo().non_existent(1),
call.Foo().meth(1, 2, 3, 4)]
for kall in invalid_calls:
self.assertRaises(AssertionError,
mock_class.assert_has_calls,
[kall]
)
def test_assert_has_calls_nested_without_spec(self):
m = MagicMock()
m().foo().bar().baz()
m.one().two().three()
calls = call.one().two().three().call_list()
m.assert_has_calls(calls)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_has_calls_not_matching_spec_error(self):
def f(x=None): pass
mock = Mock(spec=f)
mock(1)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Calls not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected calls.\n'
"Errors: [None, TypeError('too many positional arguments')]\n"
"Expected: [call(), call(1, 2)]\n"
'Actual: [call(1)]').replace(
"arguments\\'", "arguments\\',?"
))) as cm:
mock.assert_has_calls([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b): pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
#Issue23078
def test_create_autospec_classmethod_and_staticmethod(self):
class TestClass:
@classmethod
def class_method(cls): pass
@staticmethod
def static_method(): pass
for method in ('class_method', 'static_method'):
with self.subTest(method=method):
mock_method = mock.create_autospec(getattr(TestClass, method))
mock_method()
mock_method.assert_called_once_with()
self.assertRaises(TypeError, mock_method, 'extra_arg')
#Issue21238
def test_mock_unsafe(self):
m = Mock()
msg = "Attributes cannot start with 'assert' or its misspellings"
with self.assertRaisesRegex(AttributeError, msg):
m.assert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assret_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.asert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.aseert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assrt_foo_call()
m = Mock(unsafe=True)
m.assert_foo_call()
m.assret_foo_call()
m.asert_foo_call()
m.aseert_foo_call()
m.assrt_foo_call()
#Issue21262
def test_assert_not_called(self):
m = Mock()
m.hello.assert_not_called()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_not_called()
def test_assert_not_called_message(self):
m = Mock()
m(1, 2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2)]"),
m.assert_not_called)
def test_assert_called(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called()
m.hello()
m.hello.assert_called()
m.hello()
m.hello.assert_called()
def test_assert_called_once(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
m.hello()
m.hello.assert_called_once()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
def test_assert_called_once_message(self):
m = Mock()
m(1, 2)
m(3)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2), call(3)]"),
m.assert_called_once)
def test_assert_called_once_message_not_called(self):
m = Mock()
with self.assertRaises(AssertionError) as e:
m.assert_called_once()
self.assertNotIn("Calls:", str(e.exception))
#Issue37212 printout of keyword args now preserves the original order
def test_ordered_call_signature(self):
m = Mock()
m.hello(name='hello', daddy='hero')
text = "call(name='hello', daddy='hero')"
self.assertEqual(repr(m.hello.call_args), text)
#Issue21270 overrides tuple methods for mock.call objects
def test_override_tuple_methods(self):
c = call.count()
i = call.index(132,'hello')
m = Mock()
m.count()
m.index(132,"hello")
self.assertEqual(m.method_calls[0], c)
self.assertEqual(m.method_calls[1], i)
def test_reset_return_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True, side_effect=True)
self.assertIsInstance(m.return_value, Mock)
self.assertEqual(m.side_effect, None)
def test_reset_return(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True)
self.assertIsInstance(m.return_value, Mock)
self.assertNotEqual(m.side_effect, None)
def test_reset_sideeffect(self):
m = Mock(return_value=10, side_effect=[2, 3])
m.reset_mock(side_effect=True)
self.assertEqual(m.return_value, 10)
self.assertEqual(m.side_effect, None)
def test_reset_return_with_children(self):
m = MagicMock(f=MagicMock(return_value=1))
self.assertEqual(m.f(), 1)
m.reset_mock(return_value=True)
self.assertNotEqual(m.f(), 1)
def test_reset_return_with_children_side_effect(self):
m = MagicMock(f=MagicMock(side_effect=[2, 3]))
self.assertNotEqual(m.f.side_effect, None)
m.reset_mock(side_effect=True)
self.assertEqual(m.f.side_effect, None)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in (NonCallableMock, Mock, MagicMock, NonCallableMagicMock,
AsyncMock):
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_open_reuse_issue_21750(self):
mocked_open = mock.mock_open(read_data='data')
f1 = mocked_open('a-name')
f1_data = f1.read()
f2 = mocked_open('another-name')
f2_data = f2.read()
self.assertEqual(f1_data, f2_data)
def test_mock_open_dunder_iter_issue(self):
# Test dunder_iter method generates the expected result and
# consumes the iterator.
mocked_open = mock.mock_open(read_data='Remarkable\nNorwegian Blue')
f1 = mocked_open('a-name')
lines = [line for line in f1]
self.assertEqual(lines[0], 'Remarkable\n')
self.assertEqual(lines[1], 'Norwegian Blue')
self.assertEqual(list(f1), [])
def test_mock_open_using_next(self):
mocked_open = mock.mock_open(read_data='1st line\n2nd line\n3rd line')
f1 = mocked_open('a-name')
line1 = next(f1)
line2 = f1.__next__()
lines = [line for line in f1]
self.assertEqual(line1, '1st line\n')
self.assertEqual(line2, '2nd line\n')
self.assertEqual(lines[0], '3rd line')
self.assertEqual(list(f1), [])
with self.assertRaises(StopIteration):
next(f1)
def test_mock_open_next_with_readline_with_return_value(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.return_value = 'abc'
self.assertEqual('abc', next(mopen()))
def test_mock_open_write(self):
# Test exception in file writing write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
def attempt():
tempfile.NamedTemporaryFile().write('asd')
self.assertRaises(OSError, attempt)
def test_mock_open_alter_readline(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.side_effect = lambda *args:'abc'
first = mopen().readline()
second = mopen().readline()
self.assertEqual('abc', first)
self.assertEqual('abc', second)
def test_mock_open_after_eof(self):
# read, readline and readlines should work after end of file.
_open = mock.mock_open(read_data='foo')
h = _open('bar')
h.read()
self.assertEqual('', h.read())
self.assertEqual('', h.read())
self.assertEqual('', h.readline())
self.assertEqual('', h.readline())
self.assertEqual([], h.readlines())
self.assertEqual([], h.readlines())
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attach_mock_patch_autospec(self):
parent = Mock()
with mock.patch(f'{__name__}.something', autospec=True) as mock_func:
self.assertEqual(mock_func.mock._extract_mock_name(), 'something')
parent.attach_mock(mock_func, 'child')
parent.child(1)
something(2)
mock_func(3)
parent_calls = [call.child(1), call.child(2), call.child(3)]
child_calls = [call(1), call(2), call(3)]
self.assertEqual(parent.mock_calls, parent_calls)
self.assertEqual(parent.child.mock_calls, child_calls)
self.assertEqual(something.mock_calls, child_calls)
self.assertEqual(mock_func.mock_calls, child_calls)
self.assertIn('mock.child', repr(parent.child.mock))
self.assertEqual(mock_func.mock._extract_mock_name(), 'mock.child')
def test_attach_mock_patch_autospec_signature(self):
with mock.patch(f'{__name__}.Something.meth', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_meth')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_meth(mock.ANY, 1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
mocked.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
with mock.patch(f'{__name__}.something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_func')
something(1)
manager.assert_has_calls([call.attach_func(1)])
something.assert_has_calls([call(1)])
mocked.assert_has_calls([call(1)])
with mock.patch(f'{__name__}.Something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_obj')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_obj(),
call.attach_obj().meth(1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(1, 2, 3, d=4)])
mocked.assert_has_calls([call(), call().meth(1, 2, 3, d=4)])
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_mock_does_not_raise_on_repeated_attribute_deletion(self):
# bpo-20239: Assigning and deleting twice an attribute raises.
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
mock.foo = 3
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 3)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
mock.foo = 4
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 4)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
def test_mock_raises_when_deleting_nonexistent_attribute(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
del mock.foo
with self.assertRaises(AttributeError):
del mock.foo
def test_reset_mock_does_not_raise_on_attr_deletion(self):
# bpo-31177: reset_mock should not raise AttributeError when attributes
# were deleted in a mock instance
mock = Mock()
mock.child = True
del mock.child
mock.reset_mock()
self.assertFalse(hasattr(mock, 'child'))
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
def test_name_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".name")
self.assertIsNotNone(call.name)
self.assertEqual(type(call.name), _Call)
self.assertEqual(type(call.name().name), _Call)
def test_parent_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".parent")
self.assertIsNotNone(call.parent)
self.assertEqual(type(call.parent), _Call)
self.assertEqual(type(call.parent().parent), _Call)
def test_parent_propagation_with_create_autospec(self):
def foo(a, b): pass
mock = Mock()
mock.child = create_autospec(foo)
mock.child(1, 2)
self.assertRaises(TypeError, mock.child, 1)
self.assertEqual(mock.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(mock.child.mock))
def test_parent_propagation_with_autospec_attach_mock(self):
def foo(a, b): pass
parent = Mock()
parent.attach_mock(create_autospec(foo, name='bar'), 'child')
parent.child(1, 2)
self.assertRaises(TypeError, parent.child, 1)
self.assertEqual(parent.child.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(parent.child.mock))
def test_isinstance_under_settrace(self):
# bpo-36593 : __class__ is not set for a class that has __class__
# property defined when it's used with sys.settrace(trace) set.
# Delete the module to force reimport with tracing function set
# restore the old reference later since there are other tests that are
# dependent on unittest.mock.patch. In testpatch.PatchTest
# test_patch_dict_test_prefix and test_patch_test_prefix not restoring
# causes the objects patched to go out of sync
old_patch = mock_module.patch
# Directly using __setattr__ on unittest.mock causes current imported
# reference to be updated. Use a lambda so that during cleanup the
# re-imported new reference is updated.
self.addCleanup(lambda patch: setattr(mock_module, 'patch', patch),
old_patch)
with patch.dict('sys.modules'):
del sys.modules['mock']
# This trace will stop coverage being measured ;-)
def trace(frame, event, arg): # pragma: no cover
return trace
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(trace)
from mock.mock import (
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
)
mocks = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock, AsyncMock
]
for mock in mocks:
obj = mock(spec=Something)
self.assertIsInstance(obj, Something)
def test_bool_not_called_when_passing_spec_arg(self):
class Something:
def __init__(self):
self.obj_with_bool_func = mock_module.MagicMock()
obj = Something()
with mock_module.patch.object(obj, 'obj_with_bool_func', autospec=True): pass
self.assertEqual(obj.obj_with_bool_func.__bool__.call_count, 0)
if __name__ == '__main__':
unittest.main()
|
import logging
from redash.query_runner import *
from redash.utils import json_dumps
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json_dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
})
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
|
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from formhelpers2.views import comment
request = testing.DummyRequest()
info = comment(request)
self.assertTrue(hasattr(info['forms'], 'comment_form'))
|
"""
Python module presenting an API to an ELM327 serial interface
(C) 2015 Jamie Fraser <fwaggle@fwaggle.org>
http://github.com/fwaggle/pyELM327
Please see License.txt and Readme.md.
"""
__pids ={
0x01: {
# TODO: ignoring fuel system #2 atm
0x03: {
'Name': 'Fuel system status',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: decode_0x03(int(m.group(1),16))},
0x04: {
'Name': 'Calculated engine load value',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x05: {
'Name': 'Engine coolant temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x06: {
'Name': 'Short term fuel % trim - Bank 1',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x07: {
'Name': 'Long term fuel % trim - Bank 1',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x08: {
'Name': 'Short term fuel % trim - Bank 2',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x09: {
'Name': 'Long term fuel % trim - Bank 2',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16)-128) * 100.0 / 128 },
0x0A: {
'Name': 'Fuel pressure',
'Units': 'kPa (gauge)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 3 },
0x0B: {
'Name': 'Intake manifold absolute pressure',
'Units': 'kPa (absolute)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x0C: {
'Name': 'Engine RPM',
'Units': 'RPM',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16))/4.0},
0x0D: {
'Name': 'Vehicle speed',
'Units': 'km/h',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x0E: {
'Name': 'Timing advance',
'Units': '* rel #1 cylinder',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) - 128) / 2.0 },
0x0F: {
'Name': 'Intake air temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x10: {
'Name': 'MAF Sensor air flow rate',
'Units': 'grams/sec',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16))/100.0},
0x11: {
'Name': 'Throttle position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x12: {
'Name': 'Commanded secondary air status',
'Units': 'Bit-encoded',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x13: {
'Name': 'Oxygen sensors present',
'Units': 'Bit-encoded',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
# NOTE: We currently throw away the fuel trim readings for these PIDs
0x14: {
'Name': 'Bank 1, Sensor 1: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x15: {
'Name': 'Bank 1, Sensor 2: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x16: {
'Name': 'Bank 1, Sensor 3: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x17: {
'Name': 'Bank 1, Sensor 4 Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x18: {
'Name': 'Bank 2, Sensor 1: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x19: {
'Name': 'Bank 2, Sensor 2: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1A: {
'Name': 'Bank 2, Sensor 3: Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1B: {
'Name': 'Bank 2, Sensor 4 Oxygen sensor voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) [A-Z0-9]{2} $',
'Value': lambda m: ((int(m.group(1),16) / 200))},
0x1C: {
'Name': 'OBD standards this vehicle conforms to',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: decode_0x1c(int(m.group(1),16)) },
0x1F: {
'Name': 'Run time since engine start',
'Units': 's',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x21: {
'Name': 'Distance traveled with malfuction indicator lamp on',
'Units': 'km',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x22: {
'Name': 'Fuel Rail Pressure (relative to manifold vacuum)',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 0.079},
0x23: {
'Name': 'Fuel Rail Pressure (diesel, or gasoline direct injection)',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 10},
0x2C: {
'Name': 'Commanded EGR',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x2D: {
'Name': 'EGR Error',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) - 128) * 100.0) / 128 },
0x2E: {
'Name': 'Commanded evaporative purge',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x2F: {
'Name': 'Fuel level input',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 100.0) / 255 },
0x30: {
'Name': '# of warm-ups since codes cleared',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x31: {
'Name': 'Distance traveled since codes cleared',
'Units': 'km',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x33: {
'Name': 'Barometric pressure',
'Units': 'kPa (absolute)',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) },
0x42: {
'Name': 'Control module voltage',
'Units': 'V',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) / 1000.0},
0x43: {
'Name': 'Absolute load value',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) * 100.0 / 255},
0x44: {
'Name': 'Fuel/Air commanded equivalence ratio',
'Units': '',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16) / 32768.0},
0x45: {
'Name': 'Relative throttle position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x46: {
'Name': 'Ambient air temperature',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
0x47: {
'Name': 'Absolute throttle position B',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x48: {
'Name': 'Absolute throttle position C',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x49: {
'Name': 'Absolute throttle position D',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4A: {
'Name': 'Absolute throttle position E',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4B: {
'Name': 'Absolute throttle position F',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4C: {
'Name': 'Commanded throttle actuator',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x4D: {
'Name': 'Time run with MIL on',
'Units': 'minutes',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x4E: {
'Name': 'Time since codes cleared',
'Units': 'minutes',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: (int(m.group(1),16) * 256) + int(m.group(2), 16)},
0x52: {
'Name': 'Fuel ethanol percentage',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x53: {
'Name': 'Absolute evaporative system vapor pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) / 200.0},
0x54: {
'Name': 'Relative evaporative system vapor pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) - 32767},
0x59: {
'Name': 'Absolute fuel rail pressure',
'Units': 'kPa',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) ([A-Z0-9]{2}) $',
'Value': lambda m: ((int(m.group(1),16) * 256) + int(m.group(2), 16)) * 10},
0x5A: {
'Name': 'Relative accelerator pedal position',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x5B: {
'Name': 'Hybrid battery pack remaining life',
'Units': '%',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) * 100.0 / 255 },
0x5C: {
'Name': 'Engine oil temperature ',
'Units': '*C',
'Pattern': '^[A-Z0-9]{2} [A-Z0-9]{2} ([A-Z0-9]{2}) $',
'Value': lambda m: int(m.group(1),16) - 40 },
}
}
def decode_0x03(data):
"""
Decode the bit-encoding of Mode 01, PID 03 and return appropriate string.
This is apparently bit-encoded, but only one bit may be set at any one time.
If you want the raw value, just do int([0:1]) on the string.
"""
if data == 1:
return '01: Open loop due to insufficient engine temperature'
elif data == 2:
return '02: Closed loop, using oxygen sensor feedback to determine fuel mix'
elif data == 4:
return '04: Open loop due to engine load OR fuel cut due to deceleration'
elif data == 8:
return '08: Open loop due to system failure'
elif data == 16:
return '16: Closed loop, using at least one oxygen sensor but there is a fault in the feedback system'
else:
return 'NO DATA'
__standards ={
1: 'OBD-II as defined by the CARB',
2: 'OBD as defined by the EPA',
3: 'OBD and OBD-II',
4: 'OBD-I',
5: 'Not OBD compliant',
6: 'EOBD (Europe)',
7: 'EOBD and OBD-II',
8: 'EOBD and OBD',
9: 'EOBD, OBD and OBD II',
10: 'JOBD (Japan)',
11: 'JOBD and OBD II',
12: 'JOBD and EOBD',
13: 'JOBD, EOBD, and OBD II',
14: 'Reserved',
15: 'Reserved',
16: 'Reserved',
17: 'Engine Manufacturer Diagnostics (EMD)',
18: 'Engine Manufacturer Diagnostics Enhanced (EMD+)',
19: 'Heavy Duty On-Board Diagnostics (Child/Partial) (HD OBD-C)',
20: 'Heavy Duty On-Board Diagnostics (HD OBD)',
21: 'World Wide Harmonized OBD (WWH OBD)',
22: 'Reserved',
23: 'Heavy Duty Euro OBD Stage I without NOx control (HD EOBD-I)',
24: 'Heavy Duty Euro OBD Stage I with NOx control (HD EOBD-I N)',
25: 'Heavy Duty Euro OBD Stage II without NOx control (HD EOBD-II)',
26: 'Heavy Duty Euro OBD Stage II with NOx control (HD EOBD-II N)',
27: 'Reserved',
28: 'Brazil OBD Phase 1 (OBDBr-1)',
29: 'Brazil OBD Phase 2 (OBDBr-2)',
30: 'Korean OBD (KOBD)',
31: 'India OBD I (IOBD I)',
32: 'India OBD II (IOBD II)',
33: 'Heavy Duty Euro OBD Stage VI (HD EOBD-IV)',
}
def decode_0x1c(data):
"""
Decode the bit-encoding of Mode 01, PID 1C.
Returns a string describing the standards adhered to by the ECU.
If you want the raw value, use int([0:2]) on the result.
"""
if data in __standards:
return '%3d: %s' % (data, __standards[data])
else:
return 'NO DATA'
|
import ibm_circuit_object as ico
class IBMInputWire(ico.IBMCircuitObject):
"""
This class represents a single IBM input wire.
"""
def __init__(self, displayname, circuit):
"""Initializes the wire with the display name and circuit specified."""
ico.IBMCircuitObject.__init__(self, displayname, 0.0, 0, circuit)
|
from __future__ import print_function
import time
import numpy as np
from numba import jit, stencil
@stencil
def jacobi_kernel(A):
return 0.25 * (A[0,1] + A[0,-1] + A[-1,0] + A[1,0])
@jit(parallel=True)
def jacobi_relax_core(A, Anew):
error = 0.0
n = A.shape[0]
m = A.shape[1]
Anew = jacobi_kernel(A)
error = np.max(np.abs(Anew - A))
return error
def main():
NN = 3000
NM = 3000
A = np.zeros((NN, NM), dtype=np.float64)
Anew = np.zeros((NN, NM), dtype=np.float64)
n = NN
m = NM
iter_max = 1000
tol = 1.0e-6
error = 1.0
for j in range(n):
A[j, 0] = 1.0
Anew[j, 0] = 1.0
print("Jacobi relaxation Calculation: %d x %d mesh" % (n, m))
timer = time.time()
iter = 0
while error > tol and iter < iter_max:
error = jacobi_relax_core(A, Anew)
# swap A and Anew
tmp = A
A = Anew
Anew = tmp
if iter % 100 == 0:
print("%5d, %0.6f (elapsed: %f s)" % (iter, error, time.time()-timer))
iter += 1
runtime = time.time() - timer
print(" total: %f s" % runtime)
if __name__ == '__main__':
main()
|
"""
Custom module logger
"""
import logging
module_name = 'moflow'
logger = logging.getLogger(module_name)
logger.addHandler(logging.NullHandler()) # best practice to not show anything
def use_basic_config(level=logging.INFO, format=logging.BASIC_FORMAT):
"""Add basic configuration and formatting to the logger
By default, the logger should not be configured in any way. However
users and developers may prefer to see the logger messages.
"""
logger.level = level
if module_name not in [_.name for _ in logger.handlers]:
formatter = logging.Formatter(format)
handler = logging.StreamHandler()
handler.name = module_name
handler.setFormatter(formatter)
logger.addHandler(handler)
|
from airy.core.conf import settings
from mongoengine import *
connect(getattr(settings, 'database_name', 'airy'))
|
from .sizedist import *
from .WD01 import make_WD01_DustSpectrum
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.