code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import copy
from LspAlgorithms.GeneticAlgorithms.Chromosome import Chromosome
from LspAlgorithms.GeneticAlgorithms.LocalSearch.LocalSearchNode import LocalSearchNode
from LspInputDataReading.LspInputDataInstance import InputDataInstance
import random
class CrossOverNode:
"""
"""
def __init__(self, parentChromosomes) -> None:
"""
"""
self.parentChromosomes = parentChromosomes
self.chromosome = Chromosome()
self.pointer = (0, 0)
def children(self):
"""
"""
return self.childrenApproach2()
def childrenApproach2(self):
"""
"""
children = []
# print("node ", self.chromosome)
if self.pointer == (None, None):
return children
item, position = self.pointer[0], self.pointer[1]
pointer = self.nextPointer()
# [print('Parent : ', chromosome.dnaArray) for chromosome in self.parentChromosomes]
# print("child : ", self.pointer, self.chromosome.dnaArray)
genes = [chromosome.dnaArray[item][position] for chromosome in self.parentChromosomes]
lowerLimit = 0 if position == 0 else (self.chromosome.dnaArray[item][position - 1]).period + 1
upperLimit = InputDataInstance.instance.demandsArrayZipped[item][position] + 1
# print("Genes List", genes)
for gene in genes:
# print("Gene - ", gene.item + 1, gene.position, gene.period)
searchMutation = False
if self.chromosome.stringIdentifier[gene.period] == "0":
if (lowerLimit <= gene.period and gene.period < upperLimit):
node = CrossOverNode(self.parentChromosomes)
node.pointer = pointer
dnaArray = copy.deepcopy(self.chromosome.dnaArray)
dnaArray[item][position] = gene
node.chromosome.dnaArray = dnaArray
stringIdentifier = self.chromosome.stringIdentifier
stringIdentifier = stringIdentifier[:gene.period] + str(gene.item + 1) + stringIdentifier[gene.period + 1:]
node.chromosome.stringIdentifier = stringIdentifier
children.append(node)
else:
searchMutation = True
else:
searchMutation = True
if searchMutation:
# print("search mutations")
mutations = LocalSearchNode.allGenePossibleMutations(gene, self.chromosome, False, "null")
# print(mutations)
for mutation in mutations:
node = CrossOverNode(self.parentChromosomes)
node.pointer = pointer
node.chromosome = mutation[1]
children.append(node)
random.shuffle(children)
# print("children ", children)
return children
def nextPointer(self):
"""
"""
item, position = self.pointer[0], self.pointer[1]
if position < len(InputDataInstance.instance.demandsArrayZipped[item]) - 1:
item, position = item, position + 1
else:
if item < InputDataInstance.instance.nItems - 1:
item, position = item + 1, 0
else:
item, position = None, None
return (item, position)
def __repr__(self):
return "{}".format(self.chromosome)
def __lt__(self, node):
return self.chromosome.cost < node.chromosome.cost
def __eq__(self, node):
return self.chromosome.stringIdentifier == node.chromosome.stringIdentifier | src/LspAlgorithms/GeneticAlgorithms/GAOperators/CrossOverNode.py | import copy
from LspAlgorithms.GeneticAlgorithms.Chromosome import Chromosome
from LspAlgorithms.GeneticAlgorithms.LocalSearch.LocalSearchNode import LocalSearchNode
from LspInputDataReading.LspInputDataInstance import InputDataInstance
import random
class CrossOverNode:
"""
"""
def __init__(self, parentChromosomes) -> None:
"""
"""
self.parentChromosomes = parentChromosomes
self.chromosome = Chromosome()
self.pointer = (0, 0)
def children(self):
"""
"""
return self.childrenApproach2()
def childrenApproach2(self):
"""
"""
children = []
# print("node ", self.chromosome)
if self.pointer == (None, None):
return children
item, position = self.pointer[0], self.pointer[1]
pointer = self.nextPointer()
# [print('Parent : ', chromosome.dnaArray) for chromosome in self.parentChromosomes]
# print("child : ", self.pointer, self.chromosome.dnaArray)
genes = [chromosome.dnaArray[item][position] for chromosome in self.parentChromosomes]
lowerLimit = 0 if position == 0 else (self.chromosome.dnaArray[item][position - 1]).period + 1
upperLimit = InputDataInstance.instance.demandsArrayZipped[item][position] + 1
# print("Genes List", genes)
for gene in genes:
# print("Gene - ", gene.item + 1, gene.position, gene.period)
searchMutation = False
if self.chromosome.stringIdentifier[gene.period] == "0":
if (lowerLimit <= gene.period and gene.period < upperLimit):
node = CrossOverNode(self.parentChromosomes)
node.pointer = pointer
dnaArray = copy.deepcopy(self.chromosome.dnaArray)
dnaArray[item][position] = gene
node.chromosome.dnaArray = dnaArray
stringIdentifier = self.chromosome.stringIdentifier
stringIdentifier = stringIdentifier[:gene.period] + str(gene.item + 1) + stringIdentifier[gene.period + 1:]
node.chromosome.stringIdentifier = stringIdentifier
children.append(node)
else:
searchMutation = True
else:
searchMutation = True
if searchMutation:
# print("search mutations")
mutations = LocalSearchNode.allGenePossibleMutations(gene, self.chromosome, False, "null")
# print(mutations)
for mutation in mutations:
node = CrossOverNode(self.parentChromosomes)
node.pointer = pointer
node.chromosome = mutation[1]
children.append(node)
random.shuffle(children)
# print("children ", children)
return children
def nextPointer(self):
"""
"""
item, position = self.pointer[0], self.pointer[1]
if position < len(InputDataInstance.instance.demandsArrayZipped[item]) - 1:
item, position = item, position + 1
else:
if item < InputDataInstance.instance.nItems - 1:
item, position = item + 1, 0
else:
item, position = None, None
return (item, position)
def __repr__(self):
return "{}".format(self.chromosome)
def __lt__(self, node):
return self.chromosome.cost < node.chromosome.cost
def __eq__(self, node):
return self.chromosome.stringIdentifier == node.chromosome.stringIdentifier | 0.26971 | 0.461805 |
import logging
import random
from enum import Enum
from typing import (Any, Dict, Iterable, Iterator, List, Optional, Sequence,
Set, Tuple, Union)
import numpy as np
import torch
from torch import device as t_device
from torch import set_num_threads
from torch.utils.data import Dataset
from pos.constants import BATCH_KEYS
from pos.utils import read_tsv, tokens_to_sentences, write_tsv
log = logging.getLogger(__name__)
Sentence = Tuple[str, ...]
Sentences = Tuple[Sentence, ...]
device = None
def set_device(gpu_flag=False):
"""Set the torch device."""
if gpu_flag and torch.cuda.is_available():
device_set = t_device("cuda") # type: ignore
# Torch will use the allocated GPUs from environment variable CUDA_VISIBLE_DEVICES
log.info(f"Using {torch.cuda.device_count()} GPUs {torch.cuda.get_device_name()}")
else:
device_set = t_device("cpu") # type: ignore
threads = 1
# Set the number of threads to use for CPU
set_num_threads(threads)
log.info(f"Using {threads} CPU threads")
global device
device = device_set
def set_seed(seed=42):
"""Set the seed on all platforms. 0 for no specific seeding."""
if seed:
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class Dicts(Enum):
"""An enum to name all mappings."""
Chars = "c_map"
Pretrained = "p_map"
FullTag = "t_map"
Tokens = "w_map"
MorphLex = "m_map"
class Vocab(set):
"""A Vocab is an unordered set of symbols."""
@staticmethod
def from_symbols(sentences: Iterable[Union[Sentence, str]]):
"""Create a Vocab from a sequence of Symbols."""
return Vocab((tok for sent in sentences for tok in sent))
@staticmethod
def from_file(filepath: str):
"""Create a Vocab from a file with a sequence of Symbols."""
with open(filepath) as f:
return Vocab((symbol for line in f.readlines() for symbol in line.strip().split()))
class VocabMap:
"""A VocabMap stores w2i and i2w for dictionaries."""
# To pad in batches
PAD = "<pad>"
PAD_ID = 0
# For unkown words in testing
UNK = "<unk>"
UNK_ID = 1
# For EOS and SOS in char BiLSTM
EOS = "</s>"
EOS_ID = 2
SOS = "<s>"
SOS_ID = 3
UNK_PAD = [(UNK, UNK_ID), (PAD, PAD_ID)]
UNK_PAD_EOS_SOS = [
(UNK, UNK_ID),
(PAD, PAD_ID),
(EOS, EOS_ID),
(SOS, SOS_ID),
]
w2i: Dict[str, int]
i2w: Dict[int, str]
def __init__(self, vocab: Set[str], special_tokens: Optional[List[Tuple[str, int]]] = None):
"""Build a vocabulary mapping from the provided vocabulary, needs to start at index=0.
If special_tokens is given, will add these tokens first and start from the next index of the highest index provided.
"""
self.w2i = {}
next_idx = 0
if special_tokens:
for symbol, idx in special_tokens:
self.w2i[symbol] = idx
next_idx = max((idx + 1, next_idx))
for idx, symbol in enumerate(vocab, start=next_idx):
self.w2i[symbol] = idx
self.i2w = {i: w for w, i in self.w2i.items()}
def __len__(self):
"""Return the length of the dictionary."""
return len(self.w2i)
class Fields:
"""Common fields used."""
Tokens = "tokens"
Tags = "tags"
Lemmas = "lemmas"
GoldTags = "gold_tags"
GoldLemmas = "gold_lemmas"
class FieldedDataset(Dataset):
"""A generic dataset built from group tsv lines."""
def __init__(self, data: Tuple[Sentences, ...], fields: Tuple[str, ...]):
"""Initialize the dataset."""
self.data: Tuple[Sentences, ...] = data
self.fields: Tuple[str, ...] = fields
# the data is columnar
assert len(data) == len(
fields
), f"The data {len(data)} and fields {len(fields)} do not contain the same number of elements"
# All sequences are of the same length
lengths = -1
for field in fields:
if lengths == -1:
lengths = self._get_field_length(field)
else:
assert lengths == self._get_field_length(field), "All fields should be of the same size"
def __getitem__(self, idx) -> Tuple[Sentence, ...]:
"""Support itemgetter."""
return tuple(data_field[idx] for data_field in self.data)
def __len__(self) -> int:
"""Support len."""
return len(self.data[0])
def __iter__(self) -> Iterator[Tuple[Sentence, ...]]:
"""Support iteration."""
return zip(*self.data)
def __add__(self, other):
"""Support addition."""
new_data = tuple(data + other_data for data, other_data in zip(self.data, other.data))
return self.__class__(new_data, self.fields)
def _get_field_length(self, field: str) -> Tuple[int, ...]:
"""Return the field length."""
return tuple(len(sentence) for sentence in self.data[self.fields.index(field)])
def get_lengths(self) -> Tuple[int, ...]:
"""Return the sentence lengths."""
return self._get_field_length(self.fields[0])
def _shorten_field_length(self, field, lengths: Tuple[int, ...]) -> Sentences:
"""Shorten the field based on lengths."""
elements = self.get_field(field)
# lengths, x
adjusted_sentences = [tuple() for _ in range(len(lengths))]
index = 0
for element in elements:
length = lengths[index]
# Just right
if len(element) == length:
adjusted_sentences[index] = element
index += 1
# the sentence is too long
elif len(element) > length:
partial_element = element
while len(partial_element) > 0:
# shorten it according to the lengths until done
length = lengths[index]
part, partial_element = (
partial_element[:length],
partial_element[length:],
)
adjusted_sentences[index] = part
index += 1
else:
log.error(f"Shortening but element too short {element}, {len(element)}, {length}")
raise ValueError("Bad lengths")
return tuple(adjusted_sentences)
def _lengthen_field_length(self, field, lengths: Tuple[int, ...]) -> Sentences:
"""Lengthen field length back to original."""
elements = self.get_field(field)
# lengths, x
adjusted_sentences = [tuple() for _ in range(len(lengths))]
index = 0
elements_it = iter(elements)
for element in elements_it:
length = lengths[index]
# Just right
if len(element) == length:
adjusted_sentences[index] = element
index += 1
else:
while len(adjusted_sentences[index]) != length:
if len(adjusted_sentences[index]) == 0:
# set it
adjusted_sentences[index] = element
else:
# splice it
adjusted_sentences[index] = adjusted_sentences[index] + element
if len(adjusted_sentences[index]) != length:
element = next(elements_it)
index += 1
return tuple(adjusted_sentences)
def _adjust_field_length(self, field, lengths: Tuple[int, ...], shorten=True) -> Sentences:
if shorten:
return self._shorten_field_length(field, lengths)
else:
return self._lengthen_field_length(field, lengths)
def adjust_lengths(self, lengths: Tuple[int, ...], shorten) -> "FieldedDataset":
"""Adjust the lengths of the dataset according to the given lengths."""
adjusted_data = []
for field in self.fields:
adjusted_data.append(self._adjust_field_length(field, lengths, shorten))
return FieldedDataset(tuple(adjusted_data), self.fields)
def adjust_to_maximum_length(self, maximum_length: int) -> "FieldedDataset":
"""Adjust the dataset so that no sequence has longer length than maximum_length."""
def chunk_length(lenght: int) -> List[int]:
"""Chunk (divide) the given length into a list of lengths, where no length is more than the maximum_length."""
whole_chunks = int(lenght / maximum_length)
remainder = lenght % maximum_length
return [maximum_length] * whole_chunks + [remainder]
adjusted_lengths = []
lengths = self.get_lengths()
for length in lengths:
if length <= maximum_length:
adjusted_lengths.append(length)
else:
adjusted_lengths.extend(chunk_length(length))
return self.adjust_lengths(tuple(adjusted_lengths), shorten=True)
def get_field(self, field=Fields.Tokens) -> Sentences:
"""Get the field."""
return self.data[self.fields.index(field)]
def get_vocab(self, field=Fields.Tokens) -> Vocab:
"""Return the Vocabulary in the dataset."""
return Vocab.from_symbols(self.get_field(field))
def get_vocab_map(self, special_tokens=None, field=Fields.Tokens) -> VocabMap:
"""Return the VocabularyMapping in the dataset."""
return VocabMap(self.get_vocab(field), special_tokens=special_tokens)
def get_char_vocab(self, field=Fields.Tokens) -> Vocab:
"""Return the character Vocabulary in the dataset."""
return Vocab.from_symbols((tok for sent in self.get_field(field) for tok in sent))
def get_char_vocab_map(self, special_tokens=None, field=Fields.Tokens) -> VocabMap:
"""Return the character VocabularyMapping in the dataset."""
return VocabMap(self.get_char_vocab(field), special_tokens=special_tokens)
def get_tag_vocab_map(self, special_tokens=None, field=Fields.GoldTags) -> VocabMap:
"""Return the VocabularyMapping in the dataset."""
return VocabMap(self.get_vocab(field), special_tokens=special_tokens)
def add_field(self, data_field: Sequence[Sentence], field: str):
"""Return a new FieldDataset which has an added data_field."""
return FieldedDataset(self.data + (data_field,), self.fields + (field,))
def _iter_for_tsv(self):
"""Iterate for TSV which includes empty lines between sentences."""
yield_empty = False
for field_sentences in self:
if yield_empty:
# Yield an empty tuple for empty lines between sentences.
yield tuple()
for fields in zip(*field_sentences):
yield fields
yield_empty = True
def to_tsv_file(self, path: str):
"""Write the dataset to a file as TSV."""
with open(path, mode="w") as f:
write_tsv(f, self._iter_for_tsv())
def collate_fn(self, batch: Sequence[Tuple[Sentence, ...]]) -> Dict[str, Any]:
"""Map the inputs to batches."""
batch_dict = {}
for idx, field in enumerate(self.fields):
if field == Fields.Tokens:
batch_dict[BATCH_KEYS.TOKENS] = tuple(element[idx] for element in batch)
elif field == Fields.GoldTags:
batch_dict[BATCH_KEYS.FULL_TAGS] = tuple(element[idx] for element in batch)
elif field == Fields.GoldLemmas:
batch_dict[BATCH_KEYS.LEMMAS] = tuple(element[idx] for element in batch)
batch_dict[BATCH_KEYS.TOKEN_CHARS_LENS] = tuple(
len(token) for sent in batch_dict[BATCH_KEYS.TOKENS] for token in sent
)
batch_dict[BATCH_KEYS.LENGTHS] = tuple(len(x) for x in batch_dict[BATCH_KEYS.TOKENS])
return batch_dict
@staticmethod
def from_file(filepath: str, fields: Tuple[str, ...] = None, sep="\t"):
"""Construct from a file. By default we assume first there are Tokens, GoldTags, GoldLemmas."""
with open(filepath) as f:
sentences = tuple(tokens_to_sentences(read_tsv(f, sep=sep)))
examples = tuple(zip(*sentences))
if not fields:
fields = tuple()
if len(examples) >= 1:
fields = fields + (Fields.Tokens,)
if len(examples) >= 2:
fields = fields + (Fields.GoldLemmas,)
if len(examples) >= 3:
fields = fields + (Fields.GoldTags,)
if len(examples) >= 4:
raise ValueError("Unable to guess fields in TSV file. Please set 'fields'")
return FieldedDataset(examples, fields=fields) | src/pos/core.py | import logging
import random
from enum import Enum
from typing import (Any, Dict, Iterable, Iterator, List, Optional, Sequence,
Set, Tuple, Union)
import numpy as np
import torch
from torch import device as t_device
from torch import set_num_threads
from torch.utils.data import Dataset
from pos.constants import BATCH_KEYS
from pos.utils import read_tsv, tokens_to_sentences, write_tsv
log = logging.getLogger(__name__)
Sentence = Tuple[str, ...]
Sentences = Tuple[Sentence, ...]
device = None
def set_device(gpu_flag=False):
"""Set the torch device."""
if gpu_flag and torch.cuda.is_available():
device_set = t_device("cuda") # type: ignore
# Torch will use the allocated GPUs from environment variable CUDA_VISIBLE_DEVICES
log.info(f"Using {torch.cuda.device_count()} GPUs {torch.cuda.get_device_name()}")
else:
device_set = t_device("cpu") # type: ignore
threads = 1
# Set the number of threads to use for CPU
set_num_threads(threads)
log.info(f"Using {threads} CPU threads")
global device
device = device_set
def set_seed(seed=42):
"""Set the seed on all platforms. 0 for no specific seeding."""
if seed:
random.seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class Dicts(Enum):
"""An enum to name all mappings."""
Chars = "c_map"
Pretrained = "p_map"
FullTag = "t_map"
Tokens = "w_map"
MorphLex = "m_map"
class Vocab(set):
"""A Vocab is an unordered set of symbols."""
@staticmethod
def from_symbols(sentences: Iterable[Union[Sentence, str]]):
"""Create a Vocab from a sequence of Symbols."""
return Vocab((tok for sent in sentences for tok in sent))
@staticmethod
def from_file(filepath: str):
"""Create a Vocab from a file with a sequence of Symbols."""
with open(filepath) as f:
return Vocab((symbol for line in f.readlines() for symbol in line.strip().split()))
class VocabMap:
"""A VocabMap stores w2i and i2w for dictionaries."""
# To pad in batches
PAD = "<pad>"
PAD_ID = 0
# For unkown words in testing
UNK = "<unk>"
UNK_ID = 1
# For EOS and SOS in char BiLSTM
EOS = "</s>"
EOS_ID = 2
SOS = "<s>"
SOS_ID = 3
UNK_PAD = [(UNK, UNK_ID), (PAD, PAD_ID)]
UNK_PAD_EOS_SOS = [
(UNK, UNK_ID),
(PAD, PAD_ID),
(EOS, EOS_ID),
(SOS, SOS_ID),
]
w2i: Dict[str, int]
i2w: Dict[int, str]
def __init__(self, vocab: Set[str], special_tokens: Optional[List[Tuple[str, int]]] = None):
"""Build a vocabulary mapping from the provided vocabulary, needs to start at index=0.
If special_tokens is given, will add these tokens first and start from the next index of the highest index provided.
"""
self.w2i = {}
next_idx = 0
if special_tokens:
for symbol, idx in special_tokens:
self.w2i[symbol] = idx
next_idx = max((idx + 1, next_idx))
for idx, symbol in enumerate(vocab, start=next_idx):
self.w2i[symbol] = idx
self.i2w = {i: w for w, i in self.w2i.items()}
def __len__(self):
"""Return the length of the dictionary."""
return len(self.w2i)
class Fields:
"""Common fields used."""
Tokens = "tokens"
Tags = "tags"
Lemmas = "lemmas"
GoldTags = "gold_tags"
GoldLemmas = "gold_lemmas"
class FieldedDataset(Dataset):
"""A generic dataset built from group tsv lines."""
def __init__(self, data: Tuple[Sentences, ...], fields: Tuple[str, ...]):
"""Initialize the dataset."""
self.data: Tuple[Sentences, ...] = data
self.fields: Tuple[str, ...] = fields
# the data is columnar
assert len(data) == len(
fields
), f"The data {len(data)} and fields {len(fields)} do not contain the same number of elements"
# All sequences are of the same length
lengths = -1
for field in fields:
if lengths == -1:
lengths = self._get_field_length(field)
else:
assert lengths == self._get_field_length(field), "All fields should be of the same size"
def __getitem__(self, idx) -> Tuple[Sentence, ...]:
"""Support itemgetter."""
return tuple(data_field[idx] for data_field in self.data)
def __len__(self) -> int:
"""Support len."""
return len(self.data[0])
def __iter__(self) -> Iterator[Tuple[Sentence, ...]]:
"""Support iteration."""
return zip(*self.data)
def __add__(self, other):
"""Support addition."""
new_data = tuple(data + other_data for data, other_data in zip(self.data, other.data))
return self.__class__(new_data, self.fields)
def _get_field_length(self, field: str) -> Tuple[int, ...]:
"""Return the field length."""
return tuple(len(sentence) for sentence in self.data[self.fields.index(field)])
def get_lengths(self) -> Tuple[int, ...]:
"""Return the sentence lengths."""
return self._get_field_length(self.fields[0])
def _shorten_field_length(self, field, lengths: Tuple[int, ...]) -> Sentences:
"""Shorten the field based on lengths."""
elements = self.get_field(field)
# lengths, x
adjusted_sentences = [tuple() for _ in range(len(lengths))]
index = 0
for element in elements:
length = lengths[index]
# Just right
if len(element) == length:
adjusted_sentences[index] = element
index += 1
# the sentence is too long
elif len(element) > length:
partial_element = element
while len(partial_element) > 0:
# shorten it according to the lengths until done
length = lengths[index]
part, partial_element = (
partial_element[:length],
partial_element[length:],
)
adjusted_sentences[index] = part
index += 1
else:
log.error(f"Shortening but element too short {element}, {len(element)}, {length}")
raise ValueError("Bad lengths")
return tuple(adjusted_sentences)
def _lengthen_field_length(self, field, lengths: Tuple[int, ...]) -> Sentences:
"""Lengthen field length back to original."""
elements = self.get_field(field)
# lengths, x
adjusted_sentences = [tuple() for _ in range(len(lengths))]
index = 0
elements_it = iter(elements)
for element in elements_it:
length = lengths[index]
# Just right
if len(element) == length:
adjusted_sentences[index] = element
index += 1
else:
while len(adjusted_sentences[index]) != length:
if len(adjusted_sentences[index]) == 0:
# set it
adjusted_sentences[index] = element
else:
# splice it
adjusted_sentences[index] = adjusted_sentences[index] + element
if len(adjusted_sentences[index]) != length:
element = next(elements_it)
index += 1
return tuple(adjusted_sentences)
def _adjust_field_length(self, field, lengths: Tuple[int, ...], shorten=True) -> Sentences:
if shorten:
return self._shorten_field_length(field, lengths)
else:
return self._lengthen_field_length(field, lengths)
def adjust_lengths(self, lengths: Tuple[int, ...], shorten) -> "FieldedDataset":
"""Adjust the lengths of the dataset according to the given lengths."""
adjusted_data = []
for field in self.fields:
adjusted_data.append(self._adjust_field_length(field, lengths, shorten))
return FieldedDataset(tuple(adjusted_data), self.fields)
def adjust_to_maximum_length(self, maximum_length: int) -> "FieldedDataset":
"""Adjust the dataset so that no sequence has longer length than maximum_length."""
def chunk_length(lenght: int) -> List[int]:
"""Chunk (divide) the given length into a list of lengths, where no length is more than the maximum_length."""
whole_chunks = int(lenght / maximum_length)
remainder = lenght % maximum_length
return [maximum_length] * whole_chunks + [remainder]
adjusted_lengths = []
lengths = self.get_lengths()
for length in lengths:
if length <= maximum_length:
adjusted_lengths.append(length)
else:
adjusted_lengths.extend(chunk_length(length))
return self.adjust_lengths(tuple(adjusted_lengths), shorten=True)
def get_field(self, field=Fields.Tokens) -> Sentences:
"""Get the field."""
return self.data[self.fields.index(field)]
def get_vocab(self, field=Fields.Tokens) -> Vocab:
"""Return the Vocabulary in the dataset."""
return Vocab.from_symbols(self.get_field(field))
def get_vocab_map(self, special_tokens=None, field=Fields.Tokens) -> VocabMap:
"""Return the VocabularyMapping in the dataset."""
return VocabMap(self.get_vocab(field), special_tokens=special_tokens)
def get_char_vocab(self, field=Fields.Tokens) -> Vocab:
"""Return the character Vocabulary in the dataset."""
return Vocab.from_symbols((tok for sent in self.get_field(field) for tok in sent))
def get_char_vocab_map(self, special_tokens=None, field=Fields.Tokens) -> VocabMap:
"""Return the character VocabularyMapping in the dataset."""
return VocabMap(self.get_char_vocab(field), special_tokens=special_tokens)
def get_tag_vocab_map(self, special_tokens=None, field=Fields.GoldTags) -> VocabMap:
"""Return the VocabularyMapping in the dataset."""
return VocabMap(self.get_vocab(field), special_tokens=special_tokens)
def add_field(self, data_field: Sequence[Sentence], field: str):
"""Return a new FieldDataset which has an added data_field."""
return FieldedDataset(self.data + (data_field,), self.fields + (field,))
def _iter_for_tsv(self):
"""Iterate for TSV which includes empty lines between sentences."""
yield_empty = False
for field_sentences in self:
if yield_empty:
# Yield an empty tuple for empty lines between sentences.
yield tuple()
for fields in zip(*field_sentences):
yield fields
yield_empty = True
def to_tsv_file(self, path: str):
"""Write the dataset to a file as TSV."""
with open(path, mode="w") as f:
write_tsv(f, self._iter_for_tsv())
def collate_fn(self, batch: Sequence[Tuple[Sentence, ...]]) -> Dict[str, Any]:
"""Map the inputs to batches."""
batch_dict = {}
for idx, field in enumerate(self.fields):
if field == Fields.Tokens:
batch_dict[BATCH_KEYS.TOKENS] = tuple(element[idx] for element in batch)
elif field == Fields.GoldTags:
batch_dict[BATCH_KEYS.FULL_TAGS] = tuple(element[idx] for element in batch)
elif field == Fields.GoldLemmas:
batch_dict[BATCH_KEYS.LEMMAS] = tuple(element[idx] for element in batch)
batch_dict[BATCH_KEYS.TOKEN_CHARS_LENS] = tuple(
len(token) for sent in batch_dict[BATCH_KEYS.TOKENS] for token in sent
)
batch_dict[BATCH_KEYS.LENGTHS] = tuple(len(x) for x in batch_dict[BATCH_KEYS.TOKENS])
return batch_dict
@staticmethod
def from_file(filepath: str, fields: Tuple[str, ...] = None, sep="\t"):
"""Construct from a file. By default we assume first there are Tokens, GoldTags, GoldLemmas."""
with open(filepath) as f:
sentences = tuple(tokens_to_sentences(read_tsv(f, sep=sep)))
examples = tuple(zip(*sentences))
if not fields:
fields = tuple()
if len(examples) >= 1:
fields = fields + (Fields.Tokens,)
if len(examples) >= 2:
fields = fields + (Fields.GoldLemmas,)
if len(examples) >= 3:
fields = fields + (Fields.GoldTags,)
if len(examples) >= 4:
raise ValueError("Unable to guess fields in TSV file. Please set 'fields'")
return FieldedDataset(examples, fields=fields) | 0.900913 | 0.383699 |
import numpy as np
class ParameterSet(object):
T_POWER = (lambda x: 10.0**float(x), lambda y: np.log10(y))
T_NEGPOWER = (lambda x: 10.0**float(-x), lambda y: -1.0*np.log10(y))
T_NANO = (lambda x: x*1e9, lambda y: y*1e-9)
T_MICRO = (lambda x: x*1e6, lambda y: y*1e-6)
T_MILLI = (lambda x: x*1e3, lambda y: y*1e-3)
T_ONE = (lambda x: x, lambda y: y)
def __init__(self, names, constraints, values):
self._names = names
self._constraints = constraints
self._mask = [True] * len(values)
self._transform = [self.T_ONE[0]] * len(values)
self._untransform = [self.T_ONE[1]] * len(values)
self.updateValues(values)
@classmethod
def fromTree(cls, tree, p):
return cls(tree.pNames, tree.constraints, p)
def updateValues(self, p):
self._values = p
self._tvalues = self.applyTransform(p)
def updateTransformedValues(self, tp):
self._tvalues = tp
self._values = self.applyUntransform(tp)
def updateUnmaskedTransformedValues(self, param):
tp = []
for i, v in enumerate(self._mask):
if v:
tp.append(param[0])
param = param[1:]
else:
tp.append(self._tvalues[i])
self.updateTransformedValues(tp)
def updateUnmaskedValues(self, param):
for i, v in enumerate(self._mask):
if v:
p.append(param[0])
param = param[1:]
else:
p.append(self._values[i])
self.updateValues(p)
def getUnmaskedTransformedValues(self):
res = []
for i, v in enumerate(self._mask):
if v: res.append(self._tvalues[i])
return res
def getUnmaskedValues(self):
res = []
for i, v in enumerate(self._mask):
if v: res.append(self._values[i])
return res
def setParameterMapping(self, pName, functions):
for i, v in enumerate(self._names):
if v == pName:
self._transform[i] = functions[0]
self._untransform[i] = functions[1]
def applyConstraints(self):
for i, v in enumerate(self._constraints):
if self._values[i] < v[0]: self._values[i] = v[0]
if self._values[i] > v[1]: self._values[i] = v[1]
self.updateValues(self._values)
def applyTransform(self, p):
return [self._transform[i](v) for i, v in enumerate(p)]
def applyUntransform(self, p):
return [self._untransform[i](v) for i, v in enumerate(p)]
def setMask(self, mask):
self._mask = mask
def maskAll(self):
self._mask = [False] * len(self._values)
def unmaskAll(self):
self.maskAll()
self.invertMask()
def maskParameter(self, pName):
for i, v in enumerate(self._names):
if v == pName:
self._mask[i] = False
def invertMask(self):
self._mask = map(lambda x: not x, self._mask)
def __str__(self):
s = ''
for i, v in enumerate(self._values):
s += '{}{}\n'.format(self._names[i].ljust(30), v)
return s | src/system/parameterset.py |
import numpy as np
class ParameterSet(object):
T_POWER = (lambda x: 10.0**float(x), lambda y: np.log10(y))
T_NEGPOWER = (lambda x: 10.0**float(-x), lambda y: -1.0*np.log10(y))
T_NANO = (lambda x: x*1e9, lambda y: y*1e-9)
T_MICRO = (lambda x: x*1e6, lambda y: y*1e-6)
T_MILLI = (lambda x: x*1e3, lambda y: y*1e-3)
T_ONE = (lambda x: x, lambda y: y)
def __init__(self, names, constraints, values):
self._names = names
self._constraints = constraints
self._mask = [True] * len(values)
self._transform = [self.T_ONE[0]] * len(values)
self._untransform = [self.T_ONE[1]] * len(values)
self.updateValues(values)
@classmethod
def fromTree(cls, tree, p):
return cls(tree.pNames, tree.constraints, p)
def updateValues(self, p):
self._values = p
self._tvalues = self.applyTransform(p)
def updateTransformedValues(self, tp):
self._tvalues = tp
self._values = self.applyUntransform(tp)
def updateUnmaskedTransformedValues(self, param):
tp = []
for i, v in enumerate(self._mask):
if v:
tp.append(param[0])
param = param[1:]
else:
tp.append(self._tvalues[i])
self.updateTransformedValues(tp)
def updateUnmaskedValues(self, param):
for i, v in enumerate(self._mask):
if v:
p.append(param[0])
param = param[1:]
else:
p.append(self._values[i])
self.updateValues(p)
def getUnmaskedTransformedValues(self):
res = []
for i, v in enumerate(self._mask):
if v: res.append(self._tvalues[i])
return res
def getUnmaskedValues(self):
res = []
for i, v in enumerate(self._mask):
if v: res.append(self._values[i])
return res
def setParameterMapping(self, pName, functions):
for i, v in enumerate(self._names):
if v == pName:
self._transform[i] = functions[0]
self._untransform[i] = functions[1]
def applyConstraints(self):
for i, v in enumerate(self._constraints):
if self._values[i] < v[0]: self._values[i] = v[0]
if self._values[i] > v[1]: self._values[i] = v[1]
self.updateValues(self._values)
def applyTransform(self, p):
return [self._transform[i](v) for i, v in enumerate(p)]
def applyUntransform(self, p):
return [self._untransform[i](v) for i, v in enumerate(p)]
def setMask(self, mask):
self._mask = mask
def maskAll(self):
self._mask = [False] * len(self._values)
def unmaskAll(self):
self.maskAll()
self.invertMask()
def maskParameter(self, pName):
for i, v in enumerate(self._names):
if v == pName:
self._mask[i] = False
def invertMask(self):
self._mask = map(lambda x: not x, self._mask)
def __str__(self):
s = ''
for i, v in enumerate(self._values):
s += '{}{}\n'.format(self._names[i].ljust(30), v)
return s | 0.574275 | 0.626038 |
from typing import List, Dict, Any, Callable, Union, Optional
from yarl import URL
true = True
class Block:
make_block: Callable[..., Dict[str, Any]]
has_state: bool = True
value_from_state: Callable[..., Union[str, List[str]]]
class ButtonBlock(Block):
def __init__(self):
self.has_state = False
def make_block(
self,
*,
action_id: str,
text: str,
url: Optional[URL],
value: Optional[str],
style: Optional[str]
) -> Dict[str, Any]:
block = {
"type": "actions",
"elements": [
{
"type": "button",
"text": {"type": "plain_text", "text": text, "emoji": true},
"action_id": action_id,
}
],
}
if url:
block["url"] = str(url)
if value:
block["value"] = value
if style and style in (
"default",
"primary",
"danger",
):
block["style"] = style
return block
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return block_state["value"]
class PlainTextBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
multiline: bool,
bound_data: str,
placeholder: Optional[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "plain_text_input",
"multiline": multiline,
"action_id": action_id,
"placeholder": {
"type": "plain_text",
"text": placeholder if placeholder is not None else "Enter text",
},
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_value"] = bound_data
return block
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return "" if block_state["value"] is None else block_state["value"]
class CheckboxBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "checkboxes",
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> List[str]:
return [option["value"] for option in block_state["selected_options"]]
class RadioButtonBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "radio_buttons",
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return (
block_state["selected_option"]["value"]
if block_state["selected_option"] is not None
else None
)
class MultiSelectBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str],
placeholder: Optional[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "multi_static_select",
"placeholder": {
"type": "plain_text",
"text": placeholder if placeholder is not None else "Select items",
"emoji": true,
},
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> List[str]:
return [option["value"] for option in block_state["selected_options"]] | slack_forms/forms/blocks.py | from typing import List, Dict, Any, Callable, Union, Optional
from yarl import URL
true = True
class Block:
make_block: Callable[..., Dict[str, Any]]
has_state: bool = True
value_from_state: Callable[..., Union[str, List[str]]]
class ButtonBlock(Block):
def __init__(self):
self.has_state = False
def make_block(
self,
*,
action_id: str,
text: str,
url: Optional[URL],
value: Optional[str],
style: Optional[str]
) -> Dict[str, Any]:
block = {
"type": "actions",
"elements": [
{
"type": "button",
"text": {"type": "plain_text", "text": text, "emoji": true},
"action_id": action_id,
}
],
}
if url:
block["url"] = str(url)
if value:
block["value"] = value
if style and style in (
"default",
"primary",
"danger",
):
block["style"] = style
return block
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return block_state["value"]
class PlainTextBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
multiline: bool,
bound_data: str,
placeholder: Optional[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "plain_text_input",
"multiline": multiline,
"action_id": action_id,
"placeholder": {
"type": "plain_text",
"text": placeholder if placeholder is not None else "Enter text",
},
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_value"] = bound_data
return block
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return "" if block_state["value"] is None else block_state["value"]
class CheckboxBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "checkboxes",
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> List[str]:
return [option["value"] for option in block_state["selected_options"]]
class RadioButtonBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "radio_buttons",
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> str:
return (
block_state["selected_option"]["value"]
if block_state["selected_option"] is not None
else None
)
class MultiSelectBlock(Block):
def make_block(
self,
*,
action_id: str,
block_id: str,
optional: bool,
label: str,
options: List[str],
bound_data: List[str],
placeholder: Optional[str]
) -> Dict[str, Any]:
block: Dict[str, Any] = {
"type": "input",
"optional": optional,
"block_id": block_id,
"element": {
"type": "multi_static_select",
"placeholder": {
"type": "plain_text",
"text": placeholder if placeholder is not None else "Select items",
"emoji": true,
},
"options": [self._make_option(value, value) for value in options],
"action_id": action_id,
},
"label": {"type": "plain_text", "text": label, "emoji": true},
}
if bound_data:
block["element"]["initial_options"] = [
self._make_initial_options(value, value) for value in bound_data
]
return block
def _make_option(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text, "emoji": true},
"value": value,
}
def _make_initial_options(self, value: str, text: str) -> Dict[str, Any]:
return {
"text": {"type": "plain_text", "text": text},
"value": value,
}
def value_from_state(self, block_state: Dict[str, Any]) -> List[str]:
return [option["value"] for option in block_state["selected_options"]] | 0.882725 | 0.417568 |
import serial
import re
import logging
import argparse
logger = logging.getLogger(__name__)
class BenqSerial(object):
def __init__(self, device):
self._ser = serial.serial_for_url(device, baudrate=115200, timeout=0.5)
def __del__(self):
self._ser.close()
def _get_answer(self, command):
# logger.debug('command is \'%s\'', command)
self._ser.write('\r*%s=?#\r' % command)
answer = self._ser.read(32)
# logger.debug(
# 'answer returned is \'%s\'',
# answer.encode('string_escape'),
# )
answer = re.findall(
'^\>\*%s=\?\#\r\r\n\*(.*)\#\r\n' % command,
answer
)
# logger.debug('real answer is \'%s\'', answer)
if len(answer):
return answer[0]
else:
logger.warn('Command %s returned nothing', command)
return ''
def _get_bool(self, command):
return self._get_answer(command).count('ON') > 0
@property
def power(self):
return self._get_bool('pow')
@property
def source(self):
return self._get_answer('sour')
@property
def audio_mute(self):
return self._get_bool('mute')
@property
def audio_volume(self):
return self._get_answer('vol')
@property
def audio_micvolume(self):
return self._get_answer('micvol')
@property
def lamp_hours(self):
return self._get_answer('ltim')
def run(argv):
parser = argparse.ArgumentParser(description="control BenQ beamers via serial interface")
parser.add_argument('--device', '-d', type=str, default='/dev/ttyUSB0', help='serial device to open')
args = parser.parse_args(argv[1:])
logging.basicConfig(level=logging.DEBUG)
beamer = BenqSerial(args.device)
print('beamer is on? %s' % beamer.power)
print('audio is muted? %s' % beamer.audio_mute)
print('audio volume? speaker: %s microphone: %s' % (
beamer.audio_volume, beamer.audio_micvolume
))
print('selected source? %s' % beamer.source)
print('lamp hours? %s' % beamer.lamp_hours)
if __name__ == '__main__':
import sys
run(sys.argv) | pybenqserial.py |
import serial
import re
import logging
import argparse
logger = logging.getLogger(__name__)
class BenqSerial(object):
def __init__(self, device):
self._ser = serial.serial_for_url(device, baudrate=115200, timeout=0.5)
def __del__(self):
self._ser.close()
def _get_answer(self, command):
# logger.debug('command is \'%s\'', command)
self._ser.write('\r*%s=?#\r' % command)
answer = self._ser.read(32)
# logger.debug(
# 'answer returned is \'%s\'',
# answer.encode('string_escape'),
# )
answer = re.findall(
'^\>\*%s=\?\#\r\r\n\*(.*)\#\r\n' % command,
answer
)
# logger.debug('real answer is \'%s\'', answer)
if len(answer):
return answer[0]
else:
logger.warn('Command %s returned nothing', command)
return ''
def _get_bool(self, command):
return self._get_answer(command).count('ON') > 0
@property
def power(self):
return self._get_bool('pow')
@property
def source(self):
return self._get_answer('sour')
@property
def audio_mute(self):
return self._get_bool('mute')
@property
def audio_volume(self):
return self._get_answer('vol')
@property
def audio_micvolume(self):
return self._get_answer('micvol')
@property
def lamp_hours(self):
return self._get_answer('ltim')
def run(argv):
parser = argparse.ArgumentParser(description="control BenQ beamers via serial interface")
parser.add_argument('--device', '-d', type=str, default='/dev/ttyUSB0', help='serial device to open')
args = parser.parse_args(argv[1:])
logging.basicConfig(level=logging.DEBUG)
beamer = BenqSerial(args.device)
print('beamer is on? %s' % beamer.power)
print('audio is muted? %s' % beamer.audio_mute)
print('audio volume? speaker: %s microphone: %s' % (
beamer.audio_volume, beamer.audio_micvolume
))
print('selected source? %s' % beamer.source)
print('lamp hours? %s' % beamer.lamp_hours)
if __name__ == '__main__':
import sys
run(sys.argv) | 0.501221 | 0.062046 |
app_config: dict = {
'name': 'R2MD',
'log_level': 2,
'description':
"R2MD is a tool that wraps version details from all PODs via\n"
"Rancher API and prettify results using Markdown notation.",
'title':
'\n'
' * * \n'
' ** **\n'
' ******************************** ****************** ***\n'
' ******************************** ****************** ******\n'
' ************************************......************ \n'
' *** ************************************************** \n'
' ************************************** *********** \n'
' ************************************** *********** \n'
' ************************************** .********** \n'
' *********** *********** \n'
' *********** *********** \n'
' *********** *********** \n'
'\n'
f"{'-' * 63}\n"
'\n'
' ██████╗░██████╗░███╗░░░███╗██████╗░\n'
' ██╔══██╗╚════██╗████╗░████║██╔══██╗\n'
' ██████╔╝░░███╔═╝██╔████╔██║██║░░██║\n'
' ██╔══██╗██╔══╝░░██║╚██╔╝██║██║░░██║\n'
' ██║░░██║███████╗██║░╚═╝░██║██████╔╝\n'
' ╚═╝░░╚═╝╚══════╝╚═╝░░░░░╚═╝╚═════╝░',
'rancher': {
'base_url': None, # this is replaced during runtime
'endpoint': None, # this is replaced during runtime
'username': None, # this is replaced during runtime
'password': None # this is replaced during runtime
},
'static': {
'api_keys': '/apikeys',
'clusters': '/clusters'
},
'templates': {
'reports': {
'main': 'templates/reports/main.md',
'project': 'templates/reports/project.md'
},
'tables': {
'clusters': {
'header': 'templates/tables/clusters_header.md',
'entry': 'templates/tables/clusters_entry.md',
'footer': 'templates/tables/clusters_footer.md'
},
'projects': {
'header': 'templates/tables/projects_header.md',
'entry': 'templates/tables/projects_entry.md',
'footer': 'templates/tables/projects_footer.md'
},
'workloads': {
'header': 'templates/tables/workloads_header.md',
'entry': 'templates/tables/workloads_entry.md',
'footer': 'templates/tables/workloads_footer.md'
}
}
},
'internal': {} # this is replaced during runtime
} | config.py | app_config: dict = {
'name': 'R2MD',
'log_level': 2,
'description':
"R2MD is a tool that wraps version details from all PODs via\n"
"Rancher API and prettify results using Markdown notation.",
'title':
'\n'
' * * \n'
' ** **\n'
' ******************************** ****************** ***\n'
' ******************************** ****************** ******\n'
' ************************************......************ \n'
' *** ************************************************** \n'
' ************************************** *********** \n'
' ************************************** *********** \n'
' ************************************** .********** \n'
' *********** *********** \n'
' *********** *********** \n'
' *********** *********** \n'
'\n'
f"{'-' * 63}\n"
'\n'
' ██████╗░██████╗░███╗░░░███╗██████╗░\n'
' ██╔══██╗╚════██╗████╗░████║██╔══██╗\n'
' ██████╔╝░░███╔═╝██╔████╔██║██║░░██║\n'
' ██╔══██╗██╔══╝░░██║╚██╔╝██║██║░░██║\n'
' ██║░░██║███████╗██║░╚═╝░██║██████╔╝\n'
' ╚═╝░░╚═╝╚══════╝╚═╝░░░░░╚═╝╚═════╝░',
'rancher': {
'base_url': None, # this is replaced during runtime
'endpoint': None, # this is replaced during runtime
'username': None, # this is replaced during runtime
'password': None # this is replaced during runtime
},
'static': {
'api_keys': '/apikeys',
'clusters': '/clusters'
},
'templates': {
'reports': {
'main': 'templates/reports/main.md',
'project': 'templates/reports/project.md'
},
'tables': {
'clusters': {
'header': 'templates/tables/clusters_header.md',
'entry': 'templates/tables/clusters_entry.md',
'footer': 'templates/tables/clusters_footer.md'
},
'projects': {
'header': 'templates/tables/projects_header.md',
'entry': 'templates/tables/projects_entry.md',
'footer': 'templates/tables/projects_footer.md'
},
'workloads': {
'header': 'templates/tables/workloads_header.md',
'entry': 'templates/tables/workloads_entry.md',
'footer': 'templates/tables/workloads_footer.md'
}
}
},
'internal': {} # this is replaced during runtime
} | 0.34798 | 0.059537 |
from model.dao.abstractdao import AbstractDAO
from model.dao.daoexception import DAOException
from model.player import Player
R_INSERT = """
INSERT INTO Joueur
VALUES (null, ?, ?, ?, ?)"""
R_INSERT2 = """
INSERT INTO Composition
VALUES (?, ?, ?)"""
R_READ_CH_INDEX = """
SELECT id_championnat
FROM vJoueur
WHERE id_equipe = ?
"""
R_UPDATE = """
UPDATE Joueur
SET
nom_joueur = ?,
prenom_joueur = ?,
datenaissance_joueur = ?,
commentaire_joueur = ?,
WHERE id_joueur = ?
"""
R_DELETE = """
DELETE FROM Joueur
WHERE id_joueur = ?"""
R_READBYNAME = """
SELECT *
FROM vJoueur
WHERE id_championnat = ? and nom_joueur = ?"""
R_READBYID = """
SELECT *
FROM vJoueur
WHERE id_joueur = ?"""
R_READALL = """
SELECT *
FROM vJoueur
WHERE id_championnat = ?"""
R_READBYTEAM = """
SELECT *
FROM vJoueur
WHERE id_equipe = ?
"""
class DaoPlayer(AbstractDAO):
def __init__(self, conn):
super().__init__(conn)
def insert(self, o_player, comment=None):
if isinstance(o_player, Player):
with self._conn as conn:
try:
c = conn.cursor()
param_rq1 = [o_player.lname,
o_player.fname,
o_player.birthdate,
o_player.comment]
c.execute(R_INSERT, param_rq1)
index = c.lastrowid
param_rq2 = [o_player.idteam,
index,
comment]
c.execute(R_INSERT2, param_rq2)
conn.commit()
o_player.id = index
except Exception as ex:
conn.rollback()
DAOException(self, ex)
def update(self, o_player):
if isinstance(o_player, Player):
lparam = [o_player.lname,
o_player.fname,
o_player.birthdate,
o_player.comment,
o_player.id]
rep = AbstractDAO._update(self, R_UPDATE, lparam)
return rep
def delete(self, o_player):
if isinstance(o_player, Player):
lparam = [str(o_player.id)]
rep = AbstractDAO._delete(self, R_DELETE, lparam)
return rep
def getById(self, id_player):
if id_player is not None:
lparam = [str(id_player)]
rep = AbstractDAO._read(self, R_READBYID, lparam)
return self.__fetch_to_object(rep, True)
def getByTeam(self, o_team):
if o_team is not None:
lparam = [str(o_team.id)]
rep = AbstractDAO._read(self, R_READBYTEAM, lparam)
return self.__fetch_to_object(rep)
def getAll(self, o_competition):
if o_competition is not None:
param = [str(o_competition.id)]
rep = AbstractDAO._read(self, R_READALL, param)
return self.__fetch_to_object(rep)
def __fetch_to_object(self, fetchresult, return_one=False):
"""
fetch[0] : id_championnat
fetch[1] : id_equipe
fetch[2] : id_joueur
fetch[3] : nom_joueur
fetch[4] : prenom_joueur
fetch[5] : datenaissance_joueur
fetch[6] : commentaire_joueur
:param fetchresult:
:param return_one:
:return:
"""
liste = []
try:
if len(fetchresult) > 0:
for fetch in fetchresult:
obj = Player(fetch[1],
fetch[3],
fetch[4],
fetch[5],
fetch[6])
obj.id = fetch[2]
liste.append(obj)
return liste if not return_one else liste[0]
else:
return None
except Exception as ex:
DAOException(self, ex) | model/dao/sqlite/dao_player.py | from model.dao.abstractdao import AbstractDAO
from model.dao.daoexception import DAOException
from model.player import Player
R_INSERT = """
INSERT INTO Joueur
VALUES (null, ?, ?, ?, ?)"""
R_INSERT2 = """
INSERT INTO Composition
VALUES (?, ?, ?)"""
R_READ_CH_INDEX = """
SELECT id_championnat
FROM vJoueur
WHERE id_equipe = ?
"""
R_UPDATE = """
UPDATE Joueur
SET
nom_joueur = ?,
prenom_joueur = ?,
datenaissance_joueur = ?,
commentaire_joueur = ?,
WHERE id_joueur = ?
"""
R_DELETE = """
DELETE FROM Joueur
WHERE id_joueur = ?"""
R_READBYNAME = """
SELECT *
FROM vJoueur
WHERE id_championnat = ? and nom_joueur = ?"""
R_READBYID = """
SELECT *
FROM vJoueur
WHERE id_joueur = ?"""
R_READALL = """
SELECT *
FROM vJoueur
WHERE id_championnat = ?"""
R_READBYTEAM = """
SELECT *
FROM vJoueur
WHERE id_equipe = ?
"""
class DaoPlayer(AbstractDAO):
def __init__(self, conn):
super().__init__(conn)
def insert(self, o_player, comment=None):
if isinstance(o_player, Player):
with self._conn as conn:
try:
c = conn.cursor()
param_rq1 = [o_player.lname,
o_player.fname,
o_player.birthdate,
o_player.comment]
c.execute(R_INSERT, param_rq1)
index = c.lastrowid
param_rq2 = [o_player.idteam,
index,
comment]
c.execute(R_INSERT2, param_rq2)
conn.commit()
o_player.id = index
except Exception as ex:
conn.rollback()
DAOException(self, ex)
def update(self, o_player):
if isinstance(o_player, Player):
lparam = [o_player.lname,
o_player.fname,
o_player.birthdate,
o_player.comment,
o_player.id]
rep = AbstractDAO._update(self, R_UPDATE, lparam)
return rep
def delete(self, o_player):
if isinstance(o_player, Player):
lparam = [str(o_player.id)]
rep = AbstractDAO._delete(self, R_DELETE, lparam)
return rep
def getById(self, id_player):
if id_player is not None:
lparam = [str(id_player)]
rep = AbstractDAO._read(self, R_READBYID, lparam)
return self.__fetch_to_object(rep, True)
def getByTeam(self, o_team):
if o_team is not None:
lparam = [str(o_team.id)]
rep = AbstractDAO._read(self, R_READBYTEAM, lparam)
return self.__fetch_to_object(rep)
def getAll(self, o_competition):
if o_competition is not None:
param = [str(o_competition.id)]
rep = AbstractDAO._read(self, R_READALL, param)
return self.__fetch_to_object(rep)
def __fetch_to_object(self, fetchresult, return_one=False):
"""
fetch[0] : id_championnat
fetch[1] : id_equipe
fetch[2] : id_joueur
fetch[3] : nom_joueur
fetch[4] : prenom_joueur
fetch[5] : datenaissance_joueur
fetch[6] : commentaire_joueur
:param fetchresult:
:param return_one:
:return:
"""
liste = []
try:
if len(fetchresult) > 0:
for fetch in fetchresult:
obj = Player(fetch[1],
fetch[3],
fetch[4],
fetch[5],
fetch[6])
obj.id = fetch[2]
liste.append(obj)
return liste if not return_one else liste[0]
else:
return None
except Exception as ex:
DAOException(self, ex) | 0.309963 | 0.116061 |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ..foundations import hparams
from ..lottery.desc import LotteryDesc
from ..models import base
from ..pruning import sparse_global
class Model(base.Model):
def __init__(self, plan, initializer, outputs=10):
super(Model, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, 10)
self.apply(initializer)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
@property
def output_layer_names(self):
return ['fc.weight', 'fc.bias']
@staticmethod
def is_valid_model_name(model_name):
return (model_name.startswith('fashion_cnn'))
@staticmethod
def get_model_from_name(model_name, initializer, outputs=10):
if not Model.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
outputs = outputs or 10
plan = None
return Model(plan, initializer, outputs)
@property
def loss_criterion(self):
return self.criterion
@staticmethod
def default_hparams():
model_hparams = hparams.ModelHparams(
model_name='fashion_cnn',
model_init='kaiming_normal',
batchnorm_init='uniform',
)
dataset_hparams = hparams.DatasetHparams(
dataset_name='fasion_mnist',
batch_size=128
)
training_hparams = hparams.TrainingHparams(
optimizer_name='adam',
lr=0.01,
training_steps='40ep',
momentum=0.5
)
pruning_hparams = sparse_global.PruningHparams(
pruning_strategy='sparse_global',
pruning_fraction=0.2,
pruning_layers_to_ignore='fc.weight'
)
return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams) | models/fashion_cnn.py |
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ..foundations import hparams
from ..lottery.desc import LotteryDesc
from ..models import base
from ..pruning import sparse_global
class Model(base.Model):
def __init__(self, plan, initializer, outputs=10):
super(Model, self).__init__()
self.criterion = nn.CrossEntropyLoss()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, 10)
self.apply(initializer)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
@property
def output_layer_names(self):
return ['fc.weight', 'fc.bias']
@staticmethod
def is_valid_model_name(model_name):
return (model_name.startswith('fashion_cnn'))
@staticmethod
def get_model_from_name(model_name, initializer, outputs=10):
if not Model.is_valid_model_name(model_name):
raise ValueError('Invalid model name: {}'.format(model_name))
outputs = outputs or 10
plan = None
return Model(plan, initializer, outputs)
@property
def loss_criterion(self):
return self.criterion
@staticmethod
def default_hparams():
model_hparams = hparams.ModelHparams(
model_name='fashion_cnn',
model_init='kaiming_normal',
batchnorm_init='uniform',
)
dataset_hparams = hparams.DatasetHparams(
dataset_name='fasion_mnist',
batch_size=128
)
training_hparams = hparams.TrainingHparams(
optimizer_name='adam',
lr=0.01,
training_steps='40ep',
momentum=0.5
)
pruning_hparams = sparse_global.PruningHparams(
pruning_strategy='sparse_global',
pruning_fraction=0.2,
pruning_layers_to_ignore='fc.weight'
)
return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams) | 0.925175 | 0.297457 |
from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
class DiffNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.OUTPUT_PORT_NAME = 'out'
self.DIFF_A = 'diff_a'
self.DIFF_B = 'diff_b'
port_type = PortsSpecSchema.port_type
port_inports = {
self.DIFF_A: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.DIFF_B: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:diff_a}"
},
}
col_required = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_inports = {
self.DIFF_A: col_required,
self.DIFF_B: col_required
}
output_meta = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output_meta
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Calculate Sharpe diff",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df_a = inputs[self.DIFF_A].set_index('sample_id')
df_b = inputs[self.DIFF_B].set_index('sample_id')
# df = df.drop('datetime', axis=1)
output = {}
diff = df_a - df_b
output.update({self.OUTPUT_PORT_NAME: diff.reset_index()})
return output | gQuant/plugins/hrp_plugin/greenflow_hrp_plugin/diffNode.py | from greenflow.dataframe_flow import (ConfSchema, PortsSpecSchema)
from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin
from greenflow.dataframe_flow import Node
from greenflow.dataframe_flow.metaSpec import MetaDataSchema
class DiffNode(TemplateNodeMixin, Node):
def init(self):
TemplateNodeMixin.init(self)
self.delayed_process = True
self.infer_meta = False
self.OUTPUT_PORT_NAME = 'out'
self.DIFF_A = 'diff_a'
self.DIFF_B = 'diff_b'
port_type = PortsSpecSchema.port_type
port_inports = {
self.DIFF_A: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
},
self.DIFF_B: {
port_type: [
"pandas.DataFrame", "cudf.DataFrame",
"dask_cudf.DataFrame", "dask.dataframe.DataFrame"
]
}
}
port_outports = {
self.OUTPUT_PORT_NAME: {
port_type: "${port:diff_a}"
},
}
col_required = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_inports = {
self.DIFF_A: col_required,
self.DIFF_B: col_required
}
output_meta = {
'sample_id': 'int64',
'portfolio': 'float64',
}
meta_outports = {
self.OUTPUT_PORT_NAME: {
MetaDataSchema.META_OP: MetaDataSchema.META_OP_RETENTION,
MetaDataSchema.META_DATA: output_meta
}
}
self.template_ports_setup(in_ports=port_inports,
out_ports=port_outports)
self.template_meta_setup(
in_ports=meta_inports,
out_ports=meta_outports
)
def conf_schema(self):
json = {
"title": "Calculate Sharpe diff",
"type": "object",
"properties": {
},
}
ui = {
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
df_a = inputs[self.DIFF_A].set_index('sample_id')
df_b = inputs[self.DIFF_B].set_index('sample_id')
# df = df.drop('datetime', axis=1)
output = {}
diff = df_a - df_b
output.update({self.OUTPUT_PORT_NAME: diff.reset_index()})
return output | 0.63114 | 0.235108 |
from random import randint
class Person:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.contacts = {}
self.interests = {}
def add_contact(self, contact):
self.contacts[contact.first_name] = contact
return self
def remove_contact(self, contact):
self.contacts.pop(contact)
return self
def add_interest(self, interest):
self.interests[interest.name] = interest
return self
def remove_interest(self, interest):
self.interests.pop(interest)
return self
class Interest:
def __init__(self):
self.name = ""
self.activity = None
moral = 5
amoral = 0
immoral = -5
def wheel_of_morality():
morality = [moral, amoral, immoral]
while True:
values = morality[randint(0, len(morality)-1)]
yield values
class State:
name=''
morality = wheel_of_morality()
class Values:
def __init__(self):
self.loyalties={"state":{'name':'', 'morality':wheel_of_morality()}, "people":[]}
self.love = randint(1,100)
max_love = 100
self.empathy = randint(1, 10)
self.honesty = self.empathy+next(self.loyalties['state']['morality'])
self.respect={'for':{'others':self.empathy+self.love, 'self':self.love+self.honesty}}
class Session:
def __init__(self):
self.list_of_interests = []
self.list_of_people = []
self.page_width = 50
self.filler_character = '='
def main_menu(self):
print("Social Sim".center(self.page_width, self.filler_character))
print("New Game".center(self.page_width, " "))
print("Load Game".center(self.page_width, " "))
print("Options".center(self.page_width, " "))
print("Quit".center(self.page_width, " "))
nav = input('>')
if(nav.lower() == "new game"):
self.newgame()
def newgame(self):
print("You Got to the new game")
new_person = Values()
self.list_of_people.append(new_person)
print(self.list_of_people[0])
'''
begin = Session()
begin.main_menu()
'''
newperson = Values()
print(newperson.honesty) | source.py | from random import randint
class Person:
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
self.contacts = {}
self.interests = {}
def add_contact(self, contact):
self.contacts[contact.first_name] = contact
return self
def remove_contact(self, contact):
self.contacts.pop(contact)
return self
def add_interest(self, interest):
self.interests[interest.name] = interest
return self
def remove_interest(self, interest):
self.interests.pop(interest)
return self
class Interest:
def __init__(self):
self.name = ""
self.activity = None
moral = 5
amoral = 0
immoral = -5
def wheel_of_morality():
morality = [moral, amoral, immoral]
while True:
values = morality[randint(0, len(morality)-1)]
yield values
class State:
name=''
morality = wheel_of_morality()
class Values:
def __init__(self):
self.loyalties={"state":{'name':'', 'morality':wheel_of_morality()}, "people":[]}
self.love = randint(1,100)
max_love = 100
self.empathy = randint(1, 10)
self.honesty = self.empathy+next(self.loyalties['state']['morality'])
self.respect={'for':{'others':self.empathy+self.love, 'self':self.love+self.honesty}}
class Session:
def __init__(self):
self.list_of_interests = []
self.list_of_people = []
self.page_width = 50
self.filler_character = '='
def main_menu(self):
print("Social Sim".center(self.page_width, self.filler_character))
print("New Game".center(self.page_width, " "))
print("Load Game".center(self.page_width, " "))
print("Options".center(self.page_width, " "))
print("Quit".center(self.page_width, " "))
nav = input('>')
if(nav.lower() == "new game"):
self.newgame()
def newgame(self):
print("You Got to the new game")
new_person = Values()
self.list_of_people.append(new_person)
print(self.list_of_people[0])
'''
begin = Session()
begin.main_menu()
'''
newperson = Values()
print(newperson.honesty) | 0.379378 | 0.164953 |
import megengine as mge
import megengine.functional as F
import megengine.module as M
from basecore.config import ConfigDict
__all__ = ["build_loss", "BinaryCrossEntropy", "CrossEntropy"]
def build_loss(cfg: ConfigDict) -> M.Module:
"""The factory function to build loss.
Args:
cfg: config for building loss function.
Returns:
A loss function.
"""
loss_args = cfg.loss.to_dict()
loss_name = loss_args.pop("name", None)
if loss_name is None:
raise ValueError("Loss name is missing")
if callable(loss_name):
return loss_name(**loss_args)
if isinstance(loss_name, str):
loss_funcs = {
"BinaryCrossEntropy": BinaryCrossEntropy,
"CrossEntropy": CrossEntropy,
}
if loss_name in loss_funcs:
return loss_funcs[loss_name](**loss_args)
raise ValueError(f"Loss '{loss_name}' not supported")
class BinaryCrossEntropy(M.Module):
"""The module for binary cross entropy.
See :py:func:`~megengine.functional.loss.binary_cross_entropy` for more details.
"""
def __init__(self, **kwargs):
super().__init__()
def forward(self, x: mge.Tensor, y: mge.Tensor) -> mge.Tensor:
return F.loss.binary_cross_entropy(x, y)
class CrossEntropy(M.Module):
"""The module for cross entropy.
It supports both categorical labels and one-hot labels.
See :py:func:`~megengine.functional.loss.cross_entropy` for more details.
Args:
axis: reduced axis. Default: ``1``
label_smooth: label smooth factor. Default: ``0.0``
"""
def __init__(self, axis: int = 1, label_smooth: float = 0.0):
super().__init__()
self.axis = axis
self.label_smooth = label_smooth
def forward(self, x: mge.Tensor, y: mge.Tensor) -> mge.Tensor:
if x.ndim == y.ndim + 1:
return F.loss.cross_entropy(x, y, axis=self.axis, label_smooth=self.label_smooth)
else:
assert x.ndim == y.ndim
if self.label_smooth != 0:
y = y * (1 - self.label_smooth) + self.label_smooth / y.shape[self.axis]
return (-y * F.logsoftmax(x, axis=self.axis)).sum(self.axis).mean() | basecls/layers/losses.py | import megengine as mge
import megengine.functional as F
import megengine.module as M
from basecore.config import ConfigDict
__all__ = ["build_loss", "BinaryCrossEntropy", "CrossEntropy"]
def build_loss(cfg: ConfigDict) -> M.Module:
"""The factory function to build loss.
Args:
cfg: config for building loss function.
Returns:
A loss function.
"""
loss_args = cfg.loss.to_dict()
loss_name = loss_args.pop("name", None)
if loss_name is None:
raise ValueError("Loss name is missing")
if callable(loss_name):
return loss_name(**loss_args)
if isinstance(loss_name, str):
loss_funcs = {
"BinaryCrossEntropy": BinaryCrossEntropy,
"CrossEntropy": CrossEntropy,
}
if loss_name in loss_funcs:
return loss_funcs[loss_name](**loss_args)
raise ValueError(f"Loss '{loss_name}' not supported")
class BinaryCrossEntropy(M.Module):
"""The module for binary cross entropy.
See :py:func:`~megengine.functional.loss.binary_cross_entropy` for more details.
"""
def __init__(self, **kwargs):
super().__init__()
def forward(self, x: mge.Tensor, y: mge.Tensor) -> mge.Tensor:
return F.loss.binary_cross_entropy(x, y)
class CrossEntropy(M.Module):
"""The module for cross entropy.
It supports both categorical labels and one-hot labels.
See :py:func:`~megengine.functional.loss.cross_entropy` for more details.
Args:
axis: reduced axis. Default: ``1``
label_smooth: label smooth factor. Default: ``0.0``
"""
def __init__(self, axis: int = 1, label_smooth: float = 0.0):
super().__init__()
self.axis = axis
self.label_smooth = label_smooth
def forward(self, x: mge.Tensor, y: mge.Tensor) -> mge.Tensor:
if x.ndim == y.ndim + 1:
return F.loss.cross_entropy(x, y, axis=self.axis, label_smooth=self.label_smooth)
else:
assert x.ndim == y.ndim
if self.label_smooth != 0:
y = y * (1 - self.label_smooth) + self.label_smooth / y.shape[self.axis]
return (-y * F.logsoftmax(x, axis=self.axis)).sum(self.axis).mean() | 0.927133 | 0.512998 |
import requests
rubrik_host = ''
username = ''
api_token = ''
base_url = 'https://{}/api/'.format(rubrik_host)
headers = {
'accept': 'application/json',
'Authorization': 'Bearer {}'.format(api_token),
'Content-Type': 'application/json'
}
# VARIABLES
sql_db = 'AdventureWorks2014'
sql_instance = 'MSSQLSERVER'
sql_host = 'am1-stevtong-w1.rubrikdemo.com'
live_mount_name = 'adcopy'
def get_mssql_db_id(sql_host, sql_instance, sql_db):
"""
Returns the database ID of the given host, instance, and DB.
:sql_host: SQL DB host
:sql_instance: SQL DB instance
:sql_db: SQL DB name
"""
endpoint = 'v1/mssql/db'
filters = '?name={}'.format(sql_db)
url = '{}{}{}'.format(base_url, endpoint, filters)
resp = requests.get(url, headers=headers, verify=False)
for db in resp.json()['data']:
if (db['rootProperties']['rootName'] == sql_host and db['instanceName'] == sql_instance):
sql_id = db['id']
return sql_id
def get_mssql_instance_id(sql_host, sql_instance):
"""
Returns the instance ID of the given host and instance.
:sql_host: SQL DB host
:sql_instance: SQL DB instance
"""
endpoint = 'v1/mssql/instance'
filters = ''
url = '{}{}{}'.format(base_url, endpoint, filters)
resp = requests.get(url, headers=headers, verify=False)
for instance in resp.json()['data']:
if (instance['rootProperties']['rootName'] == sql_host and instance['name'] == sql_instance):
instance_id = instance['id']
return instance_id
def invoke_mssql_live_mount(source_db_id, target_instance_id, recovery_date, live_mount_name):
"""
Invokes a SQL Live Mount and returns the result of the request.
:source_db_id: Source SQL DB ID to Live Mount from
:target_instance_id: Target DB instance to Live Mount to
:recovery_date: Recovery date & time to Live Mount from, use "YYYY-MM-DD HH:MM:SS" format
"""
format_date = '{}T{}.000Z'.format(recovery_date.split()[0], recovery_date.split()[1])
endpoint = 'v1/mssql/db/{}/mount'.format(source_db_id)
filters = ''
url = '{}{}{}'.format(base_url, endpoint, filters)
payload = {
'recoveryPoint': {
# "timestampMs": 1630609433000
'date': format_date
},
'mountedDatabaseName': live_mount_name,
'targetInstanceId': target_instance_id
}
resp = requests.post(url, headers=headers, json=payload, verify=False)
try:
status = {
'http_status_code': resp.status_code,
'status': resp.json()['status'],
'start_time': resp.json()['startTime'],
'href': resp.json()['links'][0]['href'],
'id': resp.json()['id']
}
except:
status = {
'http_status_code': resp.status_code,
'status': resp.json()['message']
}
return status
def get_task_status(href):
"""
Returns the current task status.
:href: Link to the status URL
"""
resp = requests.get(href, headers=headers, verify=False)
return resp.json() | python/hackathon.py | import requests
rubrik_host = ''
username = ''
api_token = ''
base_url = 'https://{}/api/'.format(rubrik_host)
headers = {
'accept': 'application/json',
'Authorization': 'Bearer {}'.format(api_token),
'Content-Type': 'application/json'
}
# VARIABLES
sql_db = 'AdventureWorks2014'
sql_instance = 'MSSQLSERVER'
sql_host = 'am1-stevtong-w1.rubrikdemo.com'
live_mount_name = 'adcopy'
def get_mssql_db_id(sql_host, sql_instance, sql_db):
"""
Returns the database ID of the given host, instance, and DB.
:sql_host: SQL DB host
:sql_instance: SQL DB instance
:sql_db: SQL DB name
"""
endpoint = 'v1/mssql/db'
filters = '?name={}'.format(sql_db)
url = '{}{}{}'.format(base_url, endpoint, filters)
resp = requests.get(url, headers=headers, verify=False)
for db in resp.json()['data']:
if (db['rootProperties']['rootName'] == sql_host and db['instanceName'] == sql_instance):
sql_id = db['id']
return sql_id
def get_mssql_instance_id(sql_host, sql_instance):
"""
Returns the instance ID of the given host and instance.
:sql_host: SQL DB host
:sql_instance: SQL DB instance
"""
endpoint = 'v1/mssql/instance'
filters = ''
url = '{}{}{}'.format(base_url, endpoint, filters)
resp = requests.get(url, headers=headers, verify=False)
for instance in resp.json()['data']:
if (instance['rootProperties']['rootName'] == sql_host and instance['name'] == sql_instance):
instance_id = instance['id']
return instance_id
def invoke_mssql_live_mount(source_db_id, target_instance_id, recovery_date, live_mount_name):
"""
Invokes a SQL Live Mount and returns the result of the request.
:source_db_id: Source SQL DB ID to Live Mount from
:target_instance_id: Target DB instance to Live Mount to
:recovery_date: Recovery date & time to Live Mount from, use "YYYY-MM-DD HH:MM:SS" format
"""
format_date = '{}T{}.000Z'.format(recovery_date.split()[0], recovery_date.split()[1])
endpoint = 'v1/mssql/db/{}/mount'.format(source_db_id)
filters = ''
url = '{}{}{}'.format(base_url, endpoint, filters)
payload = {
'recoveryPoint': {
# "timestampMs": 1630609433000
'date': format_date
},
'mountedDatabaseName': live_mount_name,
'targetInstanceId': target_instance_id
}
resp = requests.post(url, headers=headers, json=payload, verify=False)
try:
status = {
'http_status_code': resp.status_code,
'status': resp.json()['status'],
'start_time': resp.json()['startTime'],
'href': resp.json()['links'][0]['href'],
'id': resp.json()['id']
}
except:
status = {
'http_status_code': resp.status_code,
'status': resp.json()['message']
}
return status
def get_task_status(href):
"""
Returns the current task status.
:href: Link to the status URL
"""
resp = requests.get(href, headers=headers, verify=False)
return resp.json() | 0.480479 | 0.103658 |
from datetime import datetime
from asnake.aspace import ASpace
from asnake.jsonmodel import JSONModelObject
from cartographer_backend import settings
from django.core.exceptions import FieldError
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from django.utils.timezone import make_aware
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.schemas.openapi import AutoSchema
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from .models import (ArrangementMap, ArrangementMapComponent,
DeletedArrangementMap)
from .serializers import (ArrangementMapComponentListSerializer,
ArrangementMapComponentSerializer,
ArrangementMapListSerializer,
ArrangementMapSerializer,
DeletedArrangementMapSerializer)
def process_params(view):
"""Filters querysets based on parameters.
If `last_modified` is present, only returns objects which have been modified
since that timestamp.
If `published` is present, only returns ArrangementMaps which have been
explicitly published or ArrangementMapComponents which belong to a published
map.
"""
modified_since = int(view.request.query_params.get('modified_since', 0))
queryset = view.model.objects.filter(
modified__gte=make_aware(datetime.fromtimestamp(modified_since))).order_by('title')
if 'published' in view.request.query_params:
try:
queryset.exclude(publish=False)
except FieldError:
queryset.exclude(map__publish=False)
return queryset
class ArrangementMapViewset(ModelViewSet):
"""ArrangementMap endpoints.
retrieve:
Returns data about an ArrangementMap object, identified by a primary key.
list:
Returns paginated data about all ArrangementMap objects. Allows for two
URL parameters:
`modified_since` - only returns records modified after this time
(formatted as a UTC timestamp)
`published` - returns only published ArrangementMap objects
"""
model = ArrangementMap
def update(self, request, pk=None, *args, **kwargs):
"""Overrides default update method.
Publishes or unpublishes resource records in ArchivesSpace based on
publish attribute of parent ArrangementMap.
"""
response = super(ArrangementMapViewset, self).update(request, *args, **kwargs)
try:
map = ArrangementMap.objects.get(pk=pk)
aspace = ASpace(baseurl=settings.ASPACE['baseurl'],
username=settings.ASPACE['username'],
password=settings.ASPACE['password'])
for component in ArrangementMapComponent.objects.filter(map=map):
resource = aspace.client.get(component.archivesspace_uri).json()
resource["publish"] = map.publish
updated = aspace.client.post(component.archivesspace_uri, json=resource)
updated.raise_for_status()
return response
except Exception as e:
return Response(f"Error handling publish action in ArchivesSpace: {e}", status=500)
def get_serializer_class(self):
if self.action == 'list':
return ArrangementMapListSerializer
return ArrangementMapSerializer
def get_queryset(self):
return process_params(self)
class ArrangementMapComponentViewset(ModelViewSet):
"""ArrangementMapComponent endpoints.
retrieve:
Returns data about an ArrangementMapComponent object, identified by a primary key.
list:
Returns paginated data about all ArrangementMapComponent objects.
URL parameters:
`modified_since` - only returns records modified after this time
(formatted as a UTC timestamp)
`published` - returns only published ArrangementMap objects
"""
model = ArrangementMapComponent
queryset = ArrangementMapComponent.objects.all().order_by('-modified')
def get_serializer_class(self):
if self.action == 'list':
return ArrangementMapComponentListSerializer
return ArrangementMapComponentSerializer
def get_queryset(self):
return process_params(self)
@action(detail=True)
def objects_before(self, request, pk=None):
"""Returns the total number of objects before the target component."""
obj = get_object_or_404(ArrangementMapComponent, pk=pk)
try:
previous_components = ArrangementMapComponent.objects.filter(
map=obj.map, tree_index__lt=obj.tree_index)
count = len(previous_components)
if count:
count += previous_components.aggregate(Sum("child_count")).get("child_count__sum", 0)
return Response({"count": count}, status=200)
except Exception as e:
return Response(str(e), status=500)
class DeletedArrangementMapView(ListAPIView):
"""Returns deleted ArrangementMap and ArrangementMapComponent objects.
list:
Return paginated data about all Deleted ArrangementMap Objects.
Params:
deleted_since (timestamp): an optional argument which limits return to
objects deleted since.
"""
model = DeletedArrangementMap
serializer_class = DeletedArrangementMapSerializer
def get_queryset(self):
deleted_since = int(self.request.query_params.get('deleted_since', 0))
return DeletedArrangementMap.objects.filter(
deleted__gte=make_aware(datetime.fromtimestamp(deleted_since))).order_by('-deleted')
class ResourceFetcherView(APIView):
"""Fetches a resource from ArchivesSpace which matches a given ID.
Params:
resource_id (int): an ArchivesSpace identifier for a resource record.
"""
def get(self, request, *args, **kwargs):
try:
self.repo = ASpace(baseurl=settings.ASPACE['baseurl'],
username=settings.ASPACE['username'],
password=settings.ASPACE['password']).repositories(settings.ASPACE['repo_id'])
resource = self.repo.resources(kwargs.get('resource_id'))
if isinstance(resource, JSONModelObject):
return Response(resource.json(), status=200)
return Response(resource['error'], status=404)
except Exception as e:
return Response(str(e), status=500)
class FindByURISchema(AutoSchema):
"""Returns a custom operationId."""
def get_operation_id(self, path, method):
return 'findByUri'
class FindByURIView(ListAPIView):
"""Returns all ArrangementMapComponent objects whose `archivesspace_uri`
property matches a submitted URI.
Params:
uri (str): an ArchivesSpace URI
"""
model = ArrangementMapComponent
serializer_class = ArrangementMapComponentSerializer
schema = FindByURISchema()
def get_queryset(self):
try:
uri = self.request.GET["uri"]
return ArrangementMapComponent.objects.filter(archivesspace_uri=uri)
except KeyError:
raise ParseError("Required URL parameter `uri` missing.") | maps/views.py | from datetime import datetime
from asnake.aspace import ASpace
from asnake.jsonmodel import JSONModelObject
from cartographer_backend import settings
from django.core.exceptions import FieldError
from django.db.models import Sum
from django.shortcuts import get_object_or_404
from django.utils.timezone import make_aware
from rest_framework.decorators import action
from rest_framework.exceptions import ParseError
from rest_framework.generics import ListAPIView
from rest_framework.response import Response
from rest_framework.schemas.openapi import AutoSchema
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from .models import (ArrangementMap, ArrangementMapComponent,
DeletedArrangementMap)
from .serializers import (ArrangementMapComponentListSerializer,
ArrangementMapComponentSerializer,
ArrangementMapListSerializer,
ArrangementMapSerializer,
DeletedArrangementMapSerializer)
def process_params(view):
"""Filters querysets based on parameters.
If `last_modified` is present, only returns objects which have been modified
since that timestamp.
If `published` is present, only returns ArrangementMaps which have been
explicitly published or ArrangementMapComponents which belong to a published
map.
"""
modified_since = int(view.request.query_params.get('modified_since', 0))
queryset = view.model.objects.filter(
modified__gte=make_aware(datetime.fromtimestamp(modified_since))).order_by('title')
if 'published' in view.request.query_params:
try:
queryset.exclude(publish=False)
except FieldError:
queryset.exclude(map__publish=False)
return queryset
class ArrangementMapViewset(ModelViewSet):
"""ArrangementMap endpoints.
retrieve:
Returns data about an ArrangementMap object, identified by a primary key.
list:
Returns paginated data about all ArrangementMap objects. Allows for two
URL parameters:
`modified_since` - only returns records modified after this time
(formatted as a UTC timestamp)
`published` - returns only published ArrangementMap objects
"""
model = ArrangementMap
def update(self, request, pk=None, *args, **kwargs):
"""Overrides default update method.
Publishes or unpublishes resource records in ArchivesSpace based on
publish attribute of parent ArrangementMap.
"""
response = super(ArrangementMapViewset, self).update(request, *args, **kwargs)
try:
map = ArrangementMap.objects.get(pk=pk)
aspace = ASpace(baseurl=settings.ASPACE['baseurl'],
username=settings.ASPACE['username'],
password=settings.ASPACE['password'])
for component in ArrangementMapComponent.objects.filter(map=map):
resource = aspace.client.get(component.archivesspace_uri).json()
resource["publish"] = map.publish
updated = aspace.client.post(component.archivesspace_uri, json=resource)
updated.raise_for_status()
return response
except Exception as e:
return Response(f"Error handling publish action in ArchivesSpace: {e}", status=500)
def get_serializer_class(self):
if self.action == 'list':
return ArrangementMapListSerializer
return ArrangementMapSerializer
def get_queryset(self):
return process_params(self)
class ArrangementMapComponentViewset(ModelViewSet):
"""ArrangementMapComponent endpoints.
retrieve:
Returns data about an ArrangementMapComponent object, identified by a primary key.
list:
Returns paginated data about all ArrangementMapComponent objects.
URL parameters:
`modified_since` - only returns records modified after this time
(formatted as a UTC timestamp)
`published` - returns only published ArrangementMap objects
"""
model = ArrangementMapComponent
queryset = ArrangementMapComponent.objects.all().order_by('-modified')
def get_serializer_class(self):
if self.action == 'list':
return ArrangementMapComponentListSerializer
return ArrangementMapComponentSerializer
def get_queryset(self):
return process_params(self)
@action(detail=True)
def objects_before(self, request, pk=None):
"""Returns the total number of objects before the target component."""
obj = get_object_or_404(ArrangementMapComponent, pk=pk)
try:
previous_components = ArrangementMapComponent.objects.filter(
map=obj.map, tree_index__lt=obj.tree_index)
count = len(previous_components)
if count:
count += previous_components.aggregate(Sum("child_count")).get("child_count__sum", 0)
return Response({"count": count}, status=200)
except Exception as e:
return Response(str(e), status=500)
class DeletedArrangementMapView(ListAPIView):
"""Returns deleted ArrangementMap and ArrangementMapComponent objects.
list:
Return paginated data about all Deleted ArrangementMap Objects.
Params:
deleted_since (timestamp): an optional argument which limits return to
objects deleted since.
"""
model = DeletedArrangementMap
serializer_class = DeletedArrangementMapSerializer
def get_queryset(self):
deleted_since = int(self.request.query_params.get('deleted_since', 0))
return DeletedArrangementMap.objects.filter(
deleted__gte=make_aware(datetime.fromtimestamp(deleted_since))).order_by('-deleted')
class ResourceFetcherView(APIView):
"""Fetches a resource from ArchivesSpace which matches a given ID.
Params:
resource_id (int): an ArchivesSpace identifier for a resource record.
"""
def get(self, request, *args, **kwargs):
try:
self.repo = ASpace(baseurl=settings.ASPACE['baseurl'],
username=settings.ASPACE['username'],
password=settings.ASPACE['password']).repositories(settings.ASPACE['repo_id'])
resource = self.repo.resources(kwargs.get('resource_id'))
if isinstance(resource, JSONModelObject):
return Response(resource.json(), status=200)
return Response(resource['error'], status=404)
except Exception as e:
return Response(str(e), status=500)
class FindByURISchema(AutoSchema):
"""Returns a custom operationId."""
def get_operation_id(self, path, method):
return 'findByUri'
class FindByURIView(ListAPIView):
"""Returns all ArrangementMapComponent objects whose `archivesspace_uri`
property matches a submitted URI.
Params:
uri (str): an ArchivesSpace URI
"""
model = ArrangementMapComponent
serializer_class = ArrangementMapComponentSerializer
schema = FindByURISchema()
def get_queryset(self):
try:
uri = self.request.GET["uri"]
return ArrangementMapComponent.objects.filter(archivesspace_uri=uri)
except KeyError:
raise ParseError("Required URL parameter `uri` missing.") | 0.822011 | 0.234472 |
from __future__ import print_function
import numpy as np
from lensit.misc.misc_utils import timer
from lensit.ffs_deflect.ffs_deflect import ffs_id_displacement
from lensit.ffs_covs import ffs_specmat as SM
verbose = False
typs = ['T', 'QU', 'TQU']
def get_qlms_wl(typ, lib_sky, TQU_Mlik, ResTQU_Mlik, lib_qlm, f=None,lib_sky2 =None, subtract_zeromode=False, use_Pool=0, **kwargs):
"""
Stand alone qlm estimator starting from lib_sky and unlensed Cls
Likelihood gradient (from the quadratic part).
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this.
Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Only forward displacement is needed.
Res lms is here D^t B^t Cov^-1 data. This can be written in general
as D^t B^t Ni (data - B D MLIK(data)) in T E B space. For non-singular modes
this may be written as P_TEB^{-1} MLIK. (but we can't use pseudo inverse
for singular modes.)
P_a Res lms are always the max. likelihood modes however.
N0 * output is normalized qest for MV estimates
1/2 (VX WY + VY WX)
1/2 VX WY + VY WX
1/4 (VVt WW^t + VVt WWt + WV^t VW^t + V W^t WV^t)
We can get something without having to lens any weird maps through
( B^t Ni (data - B D Xmap))(z) (D ika Xmap)(z)
"""
lib_sky2 = lib_sky if lib_sky2 is None else lib_sky
if typ in ['EE','EB','BE','BB']:
TEB_Mlik = lib_sky.QUlms2EBalms(TQU_Mlik)
TEB_Res = lib_sky.QUlms2EBalms(ResTQU_Mlik)
TEB_Mlik[{'E':1,'B':0}[typ[0]]] *= 0.
TEB_Res[{'E':1,'B':0}[typ[1]]] *= 0.
return get_qlms_wl('QU',lib_sky,lib_sky.EBlms2QUalms(TEB_Mlik),lib_sky2.EBlms2QUalms(TEB_Res),lib_qlm,
f = f,use_Pool=use_Pool,lib_sky2 = lib_sky2)
assert len(TQU_Mlik) == len(typ) and len(ResTQU_Mlik) == len(typ)
t = timer(verbose, prefix=__name__)
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
def left(id):
assert id in range(len(typ)), (id, typ)
return lib_sky.alm2map(ResTQU_Mlik[id])
def Right(S_id, axis):
assert S_id in range(len(typ)), (S_id, typ)
assert axis in [0, 1]
kfunc = lib_sky2.get_ikx if axis == 1 else lib_sky2.get_iky
return f.alm2lenmap(lib_sky2, TQU_Mlik[S_id] * kfunc(), use_Pool=use_Pool)
retdx = left(0) * Right(0, 1)
for _i in range(1, len(typ)): retdx += left(_i) * Right(_i, 1)
t.checkpoint("get_likgrad::Cart. gr. x done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
retdy = left(0) * Right(0, 0)
for _i in range(1, len(typ)): retdy += left(_i) * Right(_i, 0)
t.checkpoint("get_likgrad::Cart. gr. y done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
if subtract_zeromode:
zro_x = np.sum(retdx)
zro_y = np.sum(retdy)
print('zero mode:', zro_x,zro_y)
retdx[0, 0] -= zro_x
retdy[0, 0] -= zro_y
retdx = lib_qlm.map2alm(retdx)
retdy = lib_qlm.map2alm(retdy)
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is normalized qest
def _Mlik2ResTQUMlik_diag(field, ninv_filt, TQUMlik, data, f, fi):
"""
Produces B^t Ni (data - B D Mlik) in TQU space,
that is fed into the qlm estimator.
"""
assert field in ['T', 'Q', 'U']
f_id = ffs_id_displacement(ninv_filt.lib_skyalm.shape, ninv_filt.lib_skyalm.lsides)
ninv_filt.set_ffi(f, fi)
_map = data - ninv_filt.apply_R(field, TQUMlik)
ninv_filt.apply_map(f, _map, inplace=True)
ninv_filt.set_ffi(f_id, f_id)
return ninv_filt.apply_Rt(field, _map)
def get_response(typ,lib_datalm,cls_len,NlevT_uKamin,NlevP_uKamin,cl_transf,
wAl = None,wBl = None,fAl = None,fBl = None,lib_qlm = None):
""" Q. estimator response """
assert typ[0] in ['T','E','B'] and typ[1] in ['T','E','B']
assert typ[0] in ['E','B'] and typ[1] in ['E','B'], "T not implemented"
assert 'eb' not in cls_len.keys() and 'be' not in cls_len.keys()
assert 'tb' not in cls_len.keys() and 'bt' not in cls_len.keys()
lmax = lib_datalm.ellmax
if wAl is None: wAl = np.ones(lmax + 1,dtype = float)
if wBl is None: wBl = cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
if fAl is None:
Nlev = NlevT_uKamin if typ[0] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fAl = np.zeros(lmax + 1,dtype = float)
fAl[ii] = 1./ (cls_len[(typ[0] + typ[0]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if fBl is None:
Nlev = NlevT_uKamin if typ[1] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fBl = np.zeros(lmax + 1,dtype = float)
fBl[ii] = 1./ (cls_len[(typ[1] + typ[1]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if lib_qlm is None: lib_qlm = lib_datalm
def get_pmat(A, i, j, clA):
if A == 'T':
if i == 0 and j == 0:
return clA[lib_datalm.reduced_ellmat()]
else:
assert 0,('zero',i,j)
elif A == 'E':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 1 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
elif A == 'B':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 1 and j == 2:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 2 and j == 1:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
else:
assert 0,(A,['T','E','B'])
retxx = np.zeros(lib_datalm.shape,dtype = float)
retyy = np.zeros(lib_datalm.shape,dtype = float)
retxy = np.zeros(lib_datalm.shape,dtype = float)
retyx = np.zeros(lib_datalm.shape,dtype = float)
_2map = lambda alm : lib_datalm.alm2map(alm)
ikx = lambda : lib_datalm.get_ikx()
iky = lambda: lib_datalm.get_iky()
clB = wBl * fBl * cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
clA = wAl * fAl
for i, j in [(1, 1),(1, 2),(2, 1),(2, 2)]:
retxx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() ** 2 * get_pmat(typ[1],j,i,clB ))
retyy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(iky() ** 2 * get_pmat(typ[1],j,i,clB ))
retxy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
retyx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
clB = wBl * fBl
clA = wAl * fAl * cls_len[(typ[0] + typ[0]).lower()][:lmax + 1]
for i, j in [(1, 1), (1, 2), (2, 1), (2, 2)]:
retxx += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
retyy += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retxy += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retyx += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
fac = 1. / np.sqrt(np.prod(lib_datalm.lsides))
_2alm = lambda _map : lib_qlm.map2alm(_map)
retxx = _2alm(retxx)
retyy = _2alm(retyy)
retxy = _2alm(retxy)
retyx = _2alm(retyx)
ikx = lambda : lib_qlm.get_ikx()
iky = lambda : lib_qlm.get_iky()
return np.array([fac * (retxx * ikx() ** 2 + retyy * iky() ** 2 + (retxy + retyx) * ikx() * iky()),
fac * (retxx * iky() ** 2 + retyy * ikx() ** 2 - (retxy + retyx) * ikx() * iky()) ])
class MFestimator:
def __init__(self, ninv_filt, opfilt, mchain, lib_qlm, pix_pha=None, cmb_pha=None, use_Pool=0):
self.ninv_filt = ninv_filt
self.opfilt = opfilt
self.mchain = mchain
self.lib_qlm = lib_qlm
self.pix_pha = pix_pha
self.cmb_pha = cmb_pha
self.use_Pool = use_Pool
def npix(self):
return self.ninv_filt.npix
def get_MFqlms(self, typ, MFkey, idx, soltn=None):
lib_sky = self.ninv_filt.lib_skyalm
lib_dat = self.ninv_filt.lib_datalm
assert lib_sky.lsides == lib_dat.lsides
self.opfilt.typ = typ
if hasattr(self.ninv_filt, 'f'):
print("******* I am using displacement for ninvfilt in MFest")
else:
print("******* Using id displacement in MFest")
f = getattr(self.ninv_filt, 'f', ffs_id_displacement(lib_sky.shape, lib_sky.lsides))
if MFkey == 12:
# B^t M^t X (x) (D ika P D^t B^t Covi X )(x). Second term are just the deflected gradients of the recontructed
assert self.pix_pha is not None
if soltn is None:
soltn = np.zeros((self.opfilt.TEBlen(typ), self.ninv_filt.lib_skyalm.alm_size), dtype=complex)
phas = self.pix_pha.get_sim(idx)[0:len(typ)]
for i, _f in enumerate(typ): phas[i] *= self.ninv_filt.get_mask(_f.lower())
self.mchain.solve(soltn, phas, finiop='MLIK')
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.ninv_filt)
norm = np.prod(lib_dat.shape) / (np.prod(lib_dat.lsides))
def Left(id):
_alm = lib_sky.udgrade(lib_dat, lib_dat.map2alm(phas[id]))
return lib_sky.alm2map(lib_sky.almxfl(_alm, norm * self.ninv_filt.cl_transf))
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUMlik[id] * kfunc(), use_Pool=self.use_Pool)
elif MFkey == 2:
# X unit variance random phases dat map shaped
# X (x) (D ika P D^t B^t Covi B X )(x). Second term are just the deflected gradients of the recontructed
# B X given in input
norm = np.prod(lib_dat.shape) / (np.prod(lib_sky.lsides))
phas = self.pix_pha.get_sim(idx)[0:len(typ)]
if soltn is None:
soltn = np.zeros((self.opfilt.TEBlen(typ), self.ninv_filt.lib_skyalm.alm_size), dtype=complex)
inp = np.array([lib_sky.almxfl(lib_sky.map2alm(_p), self.ninv_filt.cl_transf) for _p in phas])
inp = np.array([lib_dat.alm2map(lib_dat.udgrade(lib_sky, _p)) * self.ninv_filt.get_mask(_f) for _p, _f in
zip(inp, typ)])
self.mchain.solve(soltn, inp, finiop='MLIK')
soltn = self.opfilt.soltn2TQUMlik(soltn, self.ninv_filt)
def Left(id):
return phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, norm * soltn[id] * kfunc(), use_Pool=self.use_Pool)
elif MFkey == 22:
# D ika b X (x) (B^t Covi B D P 1/b X )(x). TEB phas
assert 0
else:
assert 'not implemented'
retdx = Left(0) * Right(0, 1)
for i in range(1, len(typ)): retdx += Left(i) * Right(i, 1)
retdx = self.lib_qlm.map2alm(retdx)
retdy = Left(0) * Right(0, 0)
for i in range(1, len(typ)): retdy += Left(i) * Right(i, 0)
retdy = self.lib_qlm.map2alm(retdy)
return np.array([- retdx * self.lib_qlm.get_ikx() - retdy * self.lib_qlm.get_iky(),
retdx * self.lib_qlm.get_iky() - retdy * self.lib_qlm.get_ikx()]) # N0 * output is normalized qest
def get_MFqlms(typ, MFkey, lib_dat, lib_sky, pix_phas, TQUMlik_pha, cl_transf, lib_qlm, f=None, use_Pool=0):
assert lib_dat.lsides == lib_sky.lsides
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
if MFkey == 12:
# X unit variance random phases ! on the unmasked pixels !
# B^t M^t X (x) (D ika P D^t B^t Covi X )(x). Second term are just the deflected gradients of the recontructed
# X
norm = np.prod(lib_dat.shape) / (np.prod(lib_dat.lsides))
def Left(id):
_alm = lib_sky.udgrade(lib_dat, lib_dat.map2alm(pix_phas[id]))
return lib_sky.alm2map(lib_sky.almxfl(_alm, norm * cl_transf))
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUMlik_pha[id] * kfunc(), use_Pool=use_Pool)
elif MFkey == 2:
# X unit variance random phases dat map shaped
# X (x) (D ika P D^t B^t Covi B X )(x). Second term are just the deflected gradients of the recontructed
# B X given in input
norm = np.prod(lib_dat.shape) / (np.prod(lib_sky.lsides))
def Left(id):
return pix_phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, norm * TQUMlik_pha[id] * kfunc(), use_Pool=use_Pool)
elif MFkey == 22:
# FIXME : need TEB pha
# X unit variance TEB sky-shaped.
# D ika b X (x) (B^t Covi B D P 1/b X )(x). Second term given by pix_pha
# X TEB shap
# TQU_mlik must be TQUskylm shaped for this.
# pix_phas is TQU dat-shaped and the solution of B^t Covi B D P 1/b X
norm = 1.
assert np.all([_m.shape == lib_dat.shape for _m in pix_phas])
assert np.all([_m.size == lib_sky.alm_size for _m in TQUMlik_pha])
def Left(id):
return pix_phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, lib_sky.almxfl(TQUMlik_pha[id] * kfunc(), cl_transf * norm), use_Pool=use_Pool)
else:
assert 0, 'not implemented'
retdx = Left(0) * Right(0, 1)
for i in range(1, len(typ)): retdx += Left(i) * Right(i, 1)
retdx = lib_qlm.map2alm(retdx)
retdy = Left(0) * Right(0, 0)
for i in range(1, len(typ)): retdy += Left(i) * Right(i, 0)
retdy = lib_qlm.map2alm(retdy)
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is unnormalized qest
def get_qlms(typ, lib_sky, Res_TEBlms, cls_unl, lib_qlm, Res_TEBlms2=None, f=None, use_Pool=0, **kwargs):
# FIXME : Seems to work but D_f to Reslm is a super small scale map in close to in configuration with little noise.
# FIXME : The map B^t Covi d has spec 1 / (P + N/B^2) which can peak at a farily small scale.
# FIXME there is probably a better way.
"""
Stand alone qlm estimator starting from lib_sky and unlensed Cls
Likelihood gradient (from the quadratic part).
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this.
Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Only forward displacement is needed.
Res lms is here D^t B^t Cov^-1 data. This can be written in general
as D^t B^t Ni (data - B D MLIK(data)) in T E B space. For non-singular modes
this may be written as P_TEB^{-1} MLIK. (but we can't use pseudo inverse
for singular modes.)
P_a Res lms are always the max. likelihood modes however.
N0 * output is normalized qest for MV estimates
1/2 (VX WY + VY WX)
1/2 VX WY + VY WX
1/4 (VVt WW^t + VVt WWt + WV^t VW^t + V W^t WV^t)
We can get something without having to lens any weird maps through
( B^t Ni (data - B D Xmap))(z) (D Xmap)(z)
"""
_Res_TEBlms2 = Res_TEBlms if Res_TEBlms2 is None else Res_TEBlms2
assert len(Res_TEBlms) == len(typ) and len(_Res_TEBlms2) == len(typ)
t = timer(verbose, prefix=__name__)
if f is not None: print(" qlms.py :: consider using get_qlms_wl for qlms with lensing, to avoid lensing noisy maps")
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
TQUmlik = SM.TEB2TQUlms(typ, lib_sky, SM.apply_TEBmat(typ, lib_sky, cls_unl, _Res_TEBlms2))
def left(S_id):
assert S_id in range(len(typ)), (S_id, typ)
return f.alm2lenmap(lib_sky, SM.get_SlmfromTEBlms(typ, lib_sky, Res_TEBlms, typ[S_id]), use_Pool=use_Pool)
def Right(S_id, axis):
assert S_id in range(len(typ)), (S_id, typ)
assert axis in [0, 1]
kfunc = lib_sky.get_ikx if axis == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUmlik[S_id] * kfunc(), use_Pool=use_Pool)
retdx = left(0) * Right(0, 1)
for _i in range(1, len(typ)): retdx += left(_i) * Right(_i, 1)
retdx = lib_qlm.map2alm(f.mult_wmagn(retdx))
t.checkpoint("get_likgrad::Cart. gr. x done. (%s map(s) lensed, %s fft(s)) " % (2 * len(typ), 2 * len(typ) + 1))
retdy = left(0) * Right(0, 0)
for _i in range(1, len(typ)): retdy += left(_i) * Right(_i, 0)
retdy = lib_qlm.map2alm(f.mult_wmagn(retdy))
t.checkpoint("get_likgrad::Cart. gr. y done. (%s map(s) lensed, %s fft(s)) " % (2 * len(typ), 2 * len(typ) + 1))
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is normalized qest
def get_response_flexible(lib_tlm, lib_elm, lib_blm, cls, cls_transf, cls_noise, lib_qlm, isoN0s = True):
"""
N0 calc, allowing for abritrary aniso filtering.
-(xi,a K) (xi,b K) - (K)ab (xi,a K xi,b) with K = B^t Covi B
"""
assert lib_tlm.ell_mat == lib_elm.ell_mat and lib_tlm.ell_mat == lib_blm.ell_mat
assert 'tt' in cls_noise.keys() and 'ee' in cls_noise.keys() and 'bb' in cls_noise.keys()
ellmat = lib_tlm.ell_mat
lmax = max(lib_tlm.ellmax,lib_elm.ellmax,lib_blm.ellmax)
Ki_cls = {}
w_cls = {}
t_cls = {}
for k in ['tt','ee','te','bb']:
Ki_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
Ki_cls[k][:lmax + 1] = cls[k][:lmax + 1] * cls_transf[k[0]][:lmax + 1] * cls_transf[k[1]][:lmax + 1]
if k in cls_noise.keys(): Ki_cls[k][:lmax + 1] += cls_noise[k][:lmax + 1]
w_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
w_cls[k][:lmax + 1] = cls[k][:lmax + 1]
for k in ['t','e','b']:
t_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
t_cls[k][:lmax + 1] = np.copy(cls_transf[k][:lmax + 1])
if lib_tlm.ellmax > 0: t_cls['t'][:lib_tlm.ellmax + 1] *= (lib_tlm.get_Nell() > 0)
if lib_elm.ellmax > 0: t_cls['e'][:lib_elm.ellmax + 1] *= (lib_elm.get_Nell() > 0)
if lib_blm.ellmax > 0: t_cls['b'][:lib_blm.ellmax + 1] *= (lib_blm.get_Nell() > 0)
K_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
K_almmap[:,:,0, 0] = Ki_cls['tt'][ellmat()] * lib_tlm._cond()
K_almmap[:,:,1, 1] = Ki_cls['ee'][ellmat()] * lib_elm._cond()
K_almmap[:,:,2, 2] = Ki_cls['bb'][ellmat()] * lib_blm._cond()
K_almmap[:,:,0, 1] = Ki_cls['te'][ellmat()] * lib_tlm._cond()* lib_elm._cond()
K_almmap[:,:,1, 0] = Ki_cls['te'][ellmat()] * lib_tlm._cond()* lib_elm._cond()
if np.__version__ >= '1.14':
K_almmap = np.linalg.pinv(K_almmap) # B^t Covi B
else:
for l1 in range(ellmat.rshape[0]):
for l2 in range(ellmat.rshape[1]):
K_almmap[l1,l2,:,:] = np.linalg.pinv(K_almmap[l1,l2,:,:])
K_almmap[:, :, 0, 0] *= t_cls['t'][ellmat()] ** 2
K_almmap[:, :, 1, 1] *= t_cls['e'][ellmat()] ** 2
K_almmap[:, :, 0, 1] *= t_cls['t'][ellmat()] * t_cls['e'][ellmat()]
K_almmap[:, :, 1, 0] *= t_cls['e'][ellmat()] * t_cls['t'][ellmat()]
K_almmap[:, :, 2, 2] *= t_cls['b'][ellmat()] ** 2
xiK_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
xiK_almmap[:, :, 0, 0] = w_cls['tt'][ellmat()] * K_almmap[:, :, 0, 0] + w_cls['te'][ellmat()] * K_almmap[:, :, 1, 0]
xiK_almmap[:, :, 1, 1] = w_cls['te'][ellmat()] * K_almmap[:, :, 0, 1] + w_cls['ee'][ellmat()] * K_almmap[:, :, 1, 1]
xiK_almmap[:, :, 2, 2] = w_cls['bb'][ellmat()] * K_almmap[:, :, 2, 2]
xiK_almmap[:, :, 0, 1] = w_cls['tt'][ellmat()] * K_almmap[:, :, 0, 1] + w_cls['te'][ellmat()] * K_almmap[:, :, 1, 1]
xiK_almmap[:, :, 1, 0] = w_cls['te'][ellmat()] * K_almmap[:, :, 0, 0] + w_cls['ee'][ellmat()] * K_almmap[:, :, 1, 0]
xiKxi_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
xiKxi_almmap[:, :, 0, 0] = w_cls['tt'][ellmat()] * xiK_almmap[:, :, 0, 0] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 0, 1]
xiKxi_almmap[:, :, 1, 1] = w_cls['te'][ellmat()] * xiK_almmap[:, :, 1, 0] + w_cls['ee'][ellmat()] * xiK_almmap[:, :, 1, 1]
xiKxi_almmap[:, :, 2, 2] = w_cls['bb'][ellmat()] * xiK_almmap[:, :, 2, 2]
xiKxi_almmap[:, :, 0, 1] = w_cls['ee'][ellmat()] * xiK_almmap[:, :, 0, 1] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 0, 0]
xiKxi_almmap[:, :, 1, 0] = w_cls['tt'][ellmat()] * xiK_almmap[:, :, 1, 0] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 1, 1]
cos,sin = ellmat.get_cossin_2iphi_mat()
def apply_RSX(almmap,iS,iX):
""" T Q U = (1 0 0
0 c -s
0 s c) T E B"""
if iS == 0:
return almmap.copy() if iX == 0 else np.zeros_like(almmap)
elif iS == 1:
if iX == 1:
return cos * almmap
elif iX == 2:
return -sin * almmap
else :
return np.zeros_like(almmap)
elif iS == 2:
if iX == 1:
return sin * almmap
elif iX == 2:
return cos * almmap
else :
return np.zeros_like(almmap)
else:
assert 0
def TEB2TQU(iS,jS,TEBmat):
""" R_sx R_ty Y Pxy """
assert TEBmat.shape == (ellmat.rshape[0],ellmat.rshape[1],3,3)
ret = np.zeros(ellmat.rshape)
for iX in range(3):
for jX in range(3):
ret += apply_RSX(apply_RSX(TEBmat[:,:,iX,jX],iS,iX),jS,jX)
return ret
# turn TEB to TQU:
xiK = np.zeros_like(xiK_almmap)
for iS in range(3):
for jS in range(3):
xiK[:,:,iS,jS] = TEB2TQU(iS,jS,xiK_almmap)
del xiK_almmap
xiKxi = np.zeros_like(xiKxi_almmap)
for iS in range(3):
for jS in range(3):
xiKxi[:, :, iS, jS] = TEB2TQU(iS, jS, xiKxi_almmap)
del xiKxi_almmap
K = np.zeros_like(K_almmap)
for iS in range(3):
for jS in range(3):
K[:, :, iS, jS] = TEB2TQU(iS, jS, K_almmap)
del K_almmap
Fxx = np.zeros(ellmat.shape, dtype=float)
Fyy = np.zeros(ellmat.shape, dtype=float)
Fxy = np.zeros(ellmat.shape, dtype=float)
Fyx = np.zeros(ellmat.shape, dtype=float)
_2map = lambda almmap:np.fft.irfft2(almmap.astype(complex))
ikx = lambda :ellmat.get_ikx_mat()
iky = lambda: ellmat.get_iky_mat()
#-(xi, a K)(xi, b K) - (K)(xi, a K xi, b)
for iS in range(3):
for jS in range(3):
Fxx += _2map(xiK[:,:,iS,jS] * ikx()) * _2map(xiK[:,:,jS,iS] * ikx())
Fyy += _2map(xiK[:,:,iS,jS] * iky()) * _2map(xiK[:,:,jS,iS] * iky())
Fxy += _2map(xiK[:,:,iS,jS] * ikx()) * _2map(xiK[:,:,jS,iS] * iky())
Fyx += _2map(xiK[:,:,iS,jS] * iky()) * _2map(xiK[:,:,jS,iS] * ikx())
Fxx += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * ikx() * ikx())
Fyy += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * iky() * iky())
Fxy += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * ikx() * iky())
Fyx += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * iky() * ikx())
fac = 1. / np.sqrt(np.prod(ellmat.lsides)) * lib_tlm.fac_alm2rfft ** 2
Fxx = lib_qlm.map2alm(Fxx)
Fyy = lib_qlm.map2alm(Fyy)
Fxy = lib_qlm.map2alm(Fxy)
Fyx = lib_qlm.map2alm(Fyx)
ikx = lambda : lib_qlm.get_ikx()
iky = lambda : lib_qlm.get_iky()
assert isoN0s,'implement this (non anisotropic N0 2d cls)' #this affects only the following line:
return (fac*lib_qlm.bin_realpart_inell(Fxx * ikx() ** 2 + Fyy * iky() ** 2 + (Fxy + Fyx) * ikx() * iky()),
fac * lib_qlm.bin_realpart_inell( (Fxx * iky() ** 2 + Fyy * ikx() ** 2 - (Fxy + Fyx) * ikx() * iky()))) | lensit/ffs_qlms/qlms.py | from __future__ import print_function
import numpy as np
from lensit.misc.misc_utils import timer
from lensit.ffs_deflect.ffs_deflect import ffs_id_displacement
from lensit.ffs_covs import ffs_specmat as SM
verbose = False
typs = ['T', 'QU', 'TQU']
def get_qlms_wl(typ, lib_sky, TQU_Mlik, ResTQU_Mlik, lib_qlm, f=None,lib_sky2 =None, subtract_zeromode=False, use_Pool=0, **kwargs):
"""
Stand alone qlm estimator starting from lib_sky and unlensed Cls
Likelihood gradient (from the quadratic part).
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this.
Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Only forward displacement is needed.
Res lms is here D^t B^t Cov^-1 data. This can be written in general
as D^t B^t Ni (data - B D MLIK(data)) in T E B space. For non-singular modes
this may be written as P_TEB^{-1} MLIK. (but we can't use pseudo inverse
for singular modes.)
P_a Res lms are always the max. likelihood modes however.
N0 * output is normalized qest for MV estimates
1/2 (VX WY + VY WX)
1/2 VX WY + VY WX
1/4 (VVt WW^t + VVt WWt + WV^t VW^t + V W^t WV^t)
We can get something without having to lens any weird maps through
( B^t Ni (data - B D Xmap))(z) (D ika Xmap)(z)
"""
lib_sky2 = lib_sky if lib_sky2 is None else lib_sky
if typ in ['EE','EB','BE','BB']:
TEB_Mlik = lib_sky.QUlms2EBalms(TQU_Mlik)
TEB_Res = lib_sky.QUlms2EBalms(ResTQU_Mlik)
TEB_Mlik[{'E':1,'B':0}[typ[0]]] *= 0.
TEB_Res[{'E':1,'B':0}[typ[1]]] *= 0.
return get_qlms_wl('QU',lib_sky,lib_sky.EBlms2QUalms(TEB_Mlik),lib_sky2.EBlms2QUalms(TEB_Res),lib_qlm,
f = f,use_Pool=use_Pool,lib_sky2 = lib_sky2)
assert len(TQU_Mlik) == len(typ) and len(ResTQU_Mlik) == len(typ)
t = timer(verbose, prefix=__name__)
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
def left(id):
assert id in range(len(typ)), (id, typ)
return lib_sky.alm2map(ResTQU_Mlik[id])
def Right(S_id, axis):
assert S_id in range(len(typ)), (S_id, typ)
assert axis in [0, 1]
kfunc = lib_sky2.get_ikx if axis == 1 else lib_sky2.get_iky
return f.alm2lenmap(lib_sky2, TQU_Mlik[S_id] * kfunc(), use_Pool=use_Pool)
retdx = left(0) * Right(0, 1)
for _i in range(1, len(typ)): retdx += left(_i) * Right(_i, 1)
t.checkpoint("get_likgrad::Cart. gr. x done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
retdy = left(0) * Right(0, 0)
for _i in range(1, len(typ)): retdy += left(_i) * Right(_i, 0)
t.checkpoint("get_likgrad::Cart. gr. y done. (%s map(s) lensed, %s fft(s)) " % (len(typ), 2 * len(typ) + 1))
if subtract_zeromode:
zro_x = np.sum(retdx)
zro_y = np.sum(retdy)
print('zero mode:', zro_x,zro_y)
retdx[0, 0] -= zro_x
retdy[0, 0] -= zro_y
retdx = lib_qlm.map2alm(retdx)
retdy = lib_qlm.map2alm(retdy)
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is normalized qest
def _Mlik2ResTQUMlik_diag(field, ninv_filt, TQUMlik, data, f, fi):
"""
Produces B^t Ni (data - B D Mlik) in TQU space,
that is fed into the qlm estimator.
"""
assert field in ['T', 'Q', 'U']
f_id = ffs_id_displacement(ninv_filt.lib_skyalm.shape, ninv_filt.lib_skyalm.lsides)
ninv_filt.set_ffi(f, fi)
_map = data - ninv_filt.apply_R(field, TQUMlik)
ninv_filt.apply_map(f, _map, inplace=True)
ninv_filt.set_ffi(f_id, f_id)
return ninv_filt.apply_Rt(field, _map)
def get_response(typ,lib_datalm,cls_len,NlevT_uKamin,NlevP_uKamin,cl_transf,
wAl = None,wBl = None,fAl = None,fBl = None,lib_qlm = None):
""" Q. estimator response """
assert typ[0] in ['T','E','B'] and typ[1] in ['T','E','B']
assert typ[0] in ['E','B'] and typ[1] in ['E','B'], "T not implemented"
assert 'eb' not in cls_len.keys() and 'be' not in cls_len.keys()
assert 'tb' not in cls_len.keys() and 'bt' not in cls_len.keys()
lmax = lib_datalm.ellmax
if wAl is None: wAl = np.ones(lmax + 1,dtype = float)
if wBl is None: wBl = cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
if fAl is None:
Nlev = NlevT_uKamin if typ[0] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fAl = np.zeros(lmax + 1,dtype = float)
fAl[ii] = 1./ (cls_len[(typ[0] + typ[0]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if fBl is None:
Nlev = NlevT_uKamin if typ[1] == 'T' else NlevP_uKamin
ii = np.where(cl_transf[:lmax + 1] > 0.)
fBl = np.zeros(lmax + 1,dtype = float)
fBl[ii] = 1./ (cls_len[(typ[1] + typ[1]).lower()][ii] + ( (Nlev / 60. /180. * np.pi)/ cl_transf[ii]) ** 2)
if lib_qlm is None: lib_qlm = lib_datalm
def get_pmat(A, i, j, clA):
if A == 'T':
if i == 0 and j == 0:
return clA[lib_datalm.reduced_ellmat()]
else:
assert 0,('zero',i,j)
elif A == 'E':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 1 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
elif A == 'B':
cos, sin = lib_datalm.get_cossin_2iphi()
if i == 1 and j == 1:
return clA[lib_datalm.reduced_ellmat()] * sin ** 2
elif i == 2 and j == 2:
return clA[lib_datalm.reduced_ellmat()] * cos ** 2
elif i == 1 and j == 2:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
elif i == 2 and j == 1:
return -clA[lib_datalm.reduced_ellmat()] * cos * sin
else:
assert 0,('zero',i,j)
else:
assert 0,(A,['T','E','B'])
retxx = np.zeros(lib_datalm.shape,dtype = float)
retyy = np.zeros(lib_datalm.shape,dtype = float)
retxy = np.zeros(lib_datalm.shape,dtype = float)
retyx = np.zeros(lib_datalm.shape,dtype = float)
_2map = lambda alm : lib_datalm.alm2map(alm)
ikx = lambda : lib_datalm.get_ikx()
iky = lambda: lib_datalm.get_iky()
clB = wBl * fBl * cls_len[(typ[1] + typ[1]).lower()][:lmax + 1]
clA = wAl * fAl
for i, j in [(1, 1),(1, 2),(2, 1),(2, 2)]:
retxx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() ** 2 * get_pmat(typ[1],j,i,clB ))
retyy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(iky() ** 2 * get_pmat(typ[1],j,i,clB ))
retxy += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
retyx += _2map(get_pmat(typ[0],i,j, clA)) * _2map(ikx() * iky() * get_pmat(typ[1],j,i,clB ))
clB = wBl * fBl
clA = wAl * fAl * cls_len[(typ[0] + typ[0]).lower()][:lmax + 1]
for i, j in [(1, 1), (1, 2), (2, 1), (2, 2)]:
retxx += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
retyy += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retxy += _2map(ikx() * get_pmat(typ[0], i, j, clA)) * _2map(iky() * get_pmat(typ[1], j, i, clB))
retyx += _2map(iky() * get_pmat(typ[0], i, j, clA)) * _2map(ikx() * get_pmat(typ[1], j, i, clB))
fac = 1. / np.sqrt(np.prod(lib_datalm.lsides))
_2alm = lambda _map : lib_qlm.map2alm(_map)
retxx = _2alm(retxx)
retyy = _2alm(retyy)
retxy = _2alm(retxy)
retyx = _2alm(retyx)
ikx = lambda : lib_qlm.get_ikx()
iky = lambda : lib_qlm.get_iky()
return np.array([fac * (retxx * ikx() ** 2 + retyy * iky() ** 2 + (retxy + retyx) * ikx() * iky()),
fac * (retxx * iky() ** 2 + retyy * ikx() ** 2 - (retxy + retyx) * ikx() * iky()) ])
class MFestimator:
def __init__(self, ninv_filt, opfilt, mchain, lib_qlm, pix_pha=None, cmb_pha=None, use_Pool=0):
self.ninv_filt = ninv_filt
self.opfilt = opfilt
self.mchain = mchain
self.lib_qlm = lib_qlm
self.pix_pha = pix_pha
self.cmb_pha = cmb_pha
self.use_Pool = use_Pool
def npix(self):
return self.ninv_filt.npix
def get_MFqlms(self, typ, MFkey, idx, soltn=None):
lib_sky = self.ninv_filt.lib_skyalm
lib_dat = self.ninv_filt.lib_datalm
assert lib_sky.lsides == lib_dat.lsides
self.opfilt.typ = typ
if hasattr(self.ninv_filt, 'f'):
print("******* I am using displacement for ninvfilt in MFest")
else:
print("******* Using id displacement in MFest")
f = getattr(self.ninv_filt, 'f', ffs_id_displacement(lib_sky.shape, lib_sky.lsides))
if MFkey == 12:
# B^t M^t X (x) (D ika P D^t B^t Covi X )(x). Second term are just the deflected gradients of the recontructed
assert self.pix_pha is not None
if soltn is None:
soltn = np.zeros((self.opfilt.TEBlen(typ), self.ninv_filt.lib_skyalm.alm_size), dtype=complex)
phas = self.pix_pha.get_sim(idx)[0:len(typ)]
for i, _f in enumerate(typ): phas[i] *= self.ninv_filt.get_mask(_f.lower())
self.mchain.solve(soltn, phas, finiop='MLIK')
TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.ninv_filt)
norm = np.prod(lib_dat.shape) / (np.prod(lib_dat.lsides))
def Left(id):
_alm = lib_sky.udgrade(lib_dat, lib_dat.map2alm(phas[id]))
return lib_sky.alm2map(lib_sky.almxfl(_alm, norm * self.ninv_filt.cl_transf))
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUMlik[id] * kfunc(), use_Pool=self.use_Pool)
elif MFkey == 2:
# X unit variance random phases dat map shaped
# X (x) (D ika P D^t B^t Covi B X )(x). Second term are just the deflected gradients of the recontructed
# B X given in input
norm = np.prod(lib_dat.shape) / (np.prod(lib_sky.lsides))
phas = self.pix_pha.get_sim(idx)[0:len(typ)]
if soltn is None:
soltn = np.zeros((self.opfilt.TEBlen(typ), self.ninv_filt.lib_skyalm.alm_size), dtype=complex)
inp = np.array([lib_sky.almxfl(lib_sky.map2alm(_p), self.ninv_filt.cl_transf) for _p in phas])
inp = np.array([lib_dat.alm2map(lib_dat.udgrade(lib_sky, _p)) * self.ninv_filt.get_mask(_f) for _p, _f in
zip(inp, typ)])
self.mchain.solve(soltn, inp, finiop='MLIK')
soltn = self.opfilt.soltn2TQUMlik(soltn, self.ninv_filt)
def Left(id):
return phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, norm * soltn[id] * kfunc(), use_Pool=self.use_Pool)
elif MFkey == 22:
# D ika b X (x) (B^t Covi B D P 1/b X )(x). TEB phas
assert 0
else:
assert 'not implemented'
retdx = Left(0) * Right(0, 1)
for i in range(1, len(typ)): retdx += Left(i) * Right(i, 1)
retdx = self.lib_qlm.map2alm(retdx)
retdy = Left(0) * Right(0, 0)
for i in range(1, len(typ)): retdy += Left(i) * Right(i, 0)
retdy = self.lib_qlm.map2alm(retdy)
return np.array([- retdx * self.lib_qlm.get_ikx() - retdy * self.lib_qlm.get_iky(),
retdx * self.lib_qlm.get_iky() - retdy * self.lib_qlm.get_ikx()]) # N0 * output is normalized qest
def get_MFqlms(typ, MFkey, lib_dat, lib_sky, pix_phas, TQUMlik_pha, cl_transf, lib_qlm, f=None, use_Pool=0):
assert lib_dat.lsides == lib_sky.lsides
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
if MFkey == 12:
# X unit variance random phases ! on the unmasked pixels !
# B^t M^t X (x) (D ika P D^t B^t Covi X )(x). Second term are just the deflected gradients of the recontructed
# X
norm = np.prod(lib_dat.shape) / (np.prod(lib_dat.lsides))
def Left(id):
_alm = lib_sky.udgrade(lib_dat, lib_dat.map2alm(pix_phas[id]))
return lib_sky.alm2map(lib_sky.almxfl(_alm, norm * cl_transf))
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUMlik_pha[id] * kfunc(), use_Pool=use_Pool)
elif MFkey == 2:
# X unit variance random phases dat map shaped
# X (x) (D ika P D^t B^t Covi B X )(x). Second term are just the deflected gradients of the recontructed
# B X given in input
norm = np.prod(lib_dat.shape) / (np.prod(lib_sky.lsides))
def Left(id):
return pix_phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, norm * TQUMlik_pha[id] * kfunc(), use_Pool=use_Pool)
elif MFkey == 22:
# FIXME : need TEB pha
# X unit variance TEB sky-shaped.
# D ika b X (x) (B^t Covi B D P 1/b X )(x). Second term given by pix_pha
# X TEB shap
# TQU_mlik must be TQUskylm shaped for this.
# pix_phas is TQU dat-shaped and the solution of B^t Covi B D P 1/b X
norm = 1.
assert np.all([_m.shape == lib_dat.shape for _m in pix_phas])
assert np.all([_m.size == lib_sky.alm_size for _m in TQUMlik_pha])
def Left(id):
return pix_phas[id]
def Right(id, ax):
assert ax in [0, 1], ax
kfunc = lib_sky.get_ikx if ax == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, lib_sky.almxfl(TQUMlik_pha[id] * kfunc(), cl_transf * norm), use_Pool=use_Pool)
else:
assert 0, 'not implemented'
retdx = Left(0) * Right(0, 1)
for i in range(1, len(typ)): retdx += Left(i) * Right(i, 1)
retdx = lib_qlm.map2alm(retdx)
retdy = Left(0) * Right(0, 0)
for i in range(1, len(typ)): retdy += Left(i) * Right(i, 0)
retdy = lib_qlm.map2alm(retdy)
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is unnormalized qest
def get_qlms(typ, lib_sky, Res_TEBlms, cls_unl, lib_qlm, Res_TEBlms2=None, f=None, use_Pool=0, **kwargs):
# FIXME : Seems to work but D_f to Reslm is a super small scale map in close to in configuration with little noise.
# FIXME : The map B^t Covi d has spec 1 / (P + N/B^2) which can peak at a farily small scale.
# FIXME there is probably a better way.
"""
Stand alone qlm estimator starting from lib_sky and unlensed Cls
Likelihood gradient (from the quadratic part).
(B^t F^t Cov^-1 d)^a(z) (D dxi_unl/da D^t B^t F^t Cov^-1 d)_a(z)
Only lib_skys enter this.
Sign is correct for pot. estimate, not gradient.
This can written as (D_f (Res lms))(z) (D_f P_a Res lms)(z) * |M_f|(z)
Only forward displacement is needed.
Res lms is here D^t B^t Cov^-1 data. This can be written in general
as D^t B^t Ni (data - B D MLIK(data)) in T E B space. For non-singular modes
this may be written as P_TEB^{-1} MLIK. (but we can't use pseudo inverse
for singular modes.)
P_a Res lms are always the max. likelihood modes however.
N0 * output is normalized qest for MV estimates
1/2 (VX WY + VY WX)
1/2 VX WY + VY WX
1/4 (VVt WW^t + VVt WWt + WV^t VW^t + V W^t WV^t)
We can get something without having to lens any weird maps through
( B^t Ni (data - B D Xmap))(z) (D Xmap)(z)
"""
_Res_TEBlms2 = Res_TEBlms if Res_TEBlms2 is None else Res_TEBlms2
assert len(Res_TEBlms) == len(typ) and len(_Res_TEBlms2) == len(typ)
t = timer(verbose, prefix=__name__)
if f is not None: print(" qlms.py :: consider using get_qlms_wl for qlms with lensing, to avoid lensing noisy maps")
if f is None: f = ffs_id_displacement(lib_sky.shape, lib_sky.lsides)
TQUmlik = SM.TEB2TQUlms(typ, lib_sky, SM.apply_TEBmat(typ, lib_sky, cls_unl, _Res_TEBlms2))
def left(S_id):
assert S_id in range(len(typ)), (S_id, typ)
return f.alm2lenmap(lib_sky, SM.get_SlmfromTEBlms(typ, lib_sky, Res_TEBlms, typ[S_id]), use_Pool=use_Pool)
def Right(S_id, axis):
assert S_id in range(len(typ)), (S_id, typ)
assert axis in [0, 1]
kfunc = lib_sky.get_ikx if axis == 1 else lib_sky.get_iky
return f.alm2lenmap(lib_sky, TQUmlik[S_id] * kfunc(), use_Pool=use_Pool)
retdx = left(0) * Right(0, 1)
for _i in range(1, len(typ)): retdx += left(_i) * Right(_i, 1)
retdx = lib_qlm.map2alm(f.mult_wmagn(retdx))
t.checkpoint("get_likgrad::Cart. gr. x done. (%s map(s) lensed, %s fft(s)) " % (2 * len(typ), 2 * len(typ) + 1))
retdy = left(0) * Right(0, 0)
for _i in range(1, len(typ)): retdy += left(_i) * Right(_i, 0)
retdy = lib_qlm.map2alm(f.mult_wmagn(retdy))
t.checkpoint("get_likgrad::Cart. gr. y done. (%s map(s) lensed, %s fft(s)) " % (2 * len(typ), 2 * len(typ) + 1))
return np.array([- retdx * lib_qlm.get_ikx() - retdy * lib_qlm.get_iky(),
retdx * lib_qlm.get_iky() - retdy * lib_qlm.get_ikx()]) # N0 * output is normalized qest
def get_response_flexible(lib_tlm, lib_elm, lib_blm, cls, cls_transf, cls_noise, lib_qlm, isoN0s = True):
"""
N0 calc, allowing for abritrary aniso filtering.
-(xi,a K) (xi,b K) - (K)ab (xi,a K xi,b) with K = B^t Covi B
"""
assert lib_tlm.ell_mat == lib_elm.ell_mat and lib_tlm.ell_mat == lib_blm.ell_mat
assert 'tt' in cls_noise.keys() and 'ee' in cls_noise.keys() and 'bb' in cls_noise.keys()
ellmat = lib_tlm.ell_mat
lmax = max(lib_tlm.ellmax,lib_elm.ellmax,lib_blm.ellmax)
Ki_cls = {}
w_cls = {}
t_cls = {}
for k in ['tt','ee','te','bb']:
Ki_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
Ki_cls[k][:lmax + 1] = cls[k][:lmax + 1] * cls_transf[k[0]][:lmax + 1] * cls_transf[k[1]][:lmax + 1]
if k in cls_noise.keys(): Ki_cls[k][:lmax + 1] += cls_noise[k][:lmax + 1]
w_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
w_cls[k][:lmax + 1] = cls[k][:lmax + 1]
for k in ['t','e','b']:
t_cls[k] = np.zeros(ellmat.ellmax + 1,dtype = float)
t_cls[k][:lmax + 1] = np.copy(cls_transf[k][:lmax + 1])
if lib_tlm.ellmax > 0: t_cls['t'][:lib_tlm.ellmax + 1] *= (lib_tlm.get_Nell() > 0)
if lib_elm.ellmax > 0: t_cls['e'][:lib_elm.ellmax + 1] *= (lib_elm.get_Nell() > 0)
if lib_blm.ellmax > 0: t_cls['b'][:lib_blm.ellmax + 1] *= (lib_blm.get_Nell() > 0)
K_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
K_almmap[:,:,0, 0] = Ki_cls['tt'][ellmat()] * lib_tlm._cond()
K_almmap[:,:,1, 1] = Ki_cls['ee'][ellmat()] * lib_elm._cond()
K_almmap[:,:,2, 2] = Ki_cls['bb'][ellmat()] * lib_blm._cond()
K_almmap[:,:,0, 1] = Ki_cls['te'][ellmat()] * lib_tlm._cond()* lib_elm._cond()
K_almmap[:,:,1, 0] = Ki_cls['te'][ellmat()] * lib_tlm._cond()* lib_elm._cond()
if np.__version__ >= '1.14':
K_almmap = np.linalg.pinv(K_almmap) # B^t Covi B
else:
for l1 in range(ellmat.rshape[0]):
for l2 in range(ellmat.rshape[1]):
K_almmap[l1,l2,:,:] = np.linalg.pinv(K_almmap[l1,l2,:,:])
K_almmap[:, :, 0, 0] *= t_cls['t'][ellmat()] ** 2
K_almmap[:, :, 1, 1] *= t_cls['e'][ellmat()] ** 2
K_almmap[:, :, 0, 1] *= t_cls['t'][ellmat()] * t_cls['e'][ellmat()]
K_almmap[:, :, 1, 0] *= t_cls['e'][ellmat()] * t_cls['t'][ellmat()]
K_almmap[:, :, 2, 2] *= t_cls['b'][ellmat()] ** 2
xiK_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
xiK_almmap[:, :, 0, 0] = w_cls['tt'][ellmat()] * K_almmap[:, :, 0, 0] + w_cls['te'][ellmat()] * K_almmap[:, :, 1, 0]
xiK_almmap[:, :, 1, 1] = w_cls['te'][ellmat()] * K_almmap[:, :, 0, 1] + w_cls['ee'][ellmat()] * K_almmap[:, :, 1, 1]
xiK_almmap[:, :, 2, 2] = w_cls['bb'][ellmat()] * K_almmap[:, :, 2, 2]
xiK_almmap[:, :, 0, 1] = w_cls['tt'][ellmat()] * K_almmap[:, :, 0, 1] + w_cls['te'][ellmat()] * K_almmap[:, :, 1, 1]
xiK_almmap[:, :, 1, 0] = w_cls['te'][ellmat()] * K_almmap[:, :, 0, 0] + w_cls['ee'][ellmat()] * K_almmap[:, :, 1, 0]
xiKxi_almmap = np.zeros((ellmat.rshape[0],ellmat.rshape[1],3,3),dtype = float)
xiKxi_almmap[:, :, 0, 0] = w_cls['tt'][ellmat()] * xiK_almmap[:, :, 0, 0] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 0, 1]
xiKxi_almmap[:, :, 1, 1] = w_cls['te'][ellmat()] * xiK_almmap[:, :, 1, 0] + w_cls['ee'][ellmat()] * xiK_almmap[:, :, 1, 1]
xiKxi_almmap[:, :, 2, 2] = w_cls['bb'][ellmat()] * xiK_almmap[:, :, 2, 2]
xiKxi_almmap[:, :, 0, 1] = w_cls['ee'][ellmat()] * xiK_almmap[:, :, 0, 1] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 0, 0]
xiKxi_almmap[:, :, 1, 0] = w_cls['tt'][ellmat()] * xiK_almmap[:, :, 1, 0] + w_cls['te'][ellmat()] * xiK_almmap[:, :, 1, 1]
cos,sin = ellmat.get_cossin_2iphi_mat()
def apply_RSX(almmap,iS,iX):
""" T Q U = (1 0 0
0 c -s
0 s c) T E B"""
if iS == 0:
return almmap.copy() if iX == 0 else np.zeros_like(almmap)
elif iS == 1:
if iX == 1:
return cos * almmap
elif iX == 2:
return -sin * almmap
else :
return np.zeros_like(almmap)
elif iS == 2:
if iX == 1:
return sin * almmap
elif iX == 2:
return cos * almmap
else :
return np.zeros_like(almmap)
else:
assert 0
def TEB2TQU(iS,jS,TEBmat):
""" R_sx R_ty Y Pxy """
assert TEBmat.shape == (ellmat.rshape[0],ellmat.rshape[1],3,3)
ret = np.zeros(ellmat.rshape)
for iX in range(3):
for jX in range(3):
ret += apply_RSX(apply_RSX(TEBmat[:,:,iX,jX],iS,iX),jS,jX)
return ret
# turn TEB to TQU:
xiK = np.zeros_like(xiK_almmap)
for iS in range(3):
for jS in range(3):
xiK[:,:,iS,jS] = TEB2TQU(iS,jS,xiK_almmap)
del xiK_almmap
xiKxi = np.zeros_like(xiKxi_almmap)
for iS in range(3):
for jS in range(3):
xiKxi[:, :, iS, jS] = TEB2TQU(iS, jS, xiKxi_almmap)
del xiKxi_almmap
K = np.zeros_like(K_almmap)
for iS in range(3):
for jS in range(3):
K[:, :, iS, jS] = TEB2TQU(iS, jS, K_almmap)
del K_almmap
Fxx = np.zeros(ellmat.shape, dtype=float)
Fyy = np.zeros(ellmat.shape, dtype=float)
Fxy = np.zeros(ellmat.shape, dtype=float)
Fyx = np.zeros(ellmat.shape, dtype=float)
_2map = lambda almmap:np.fft.irfft2(almmap.astype(complex))
ikx = lambda :ellmat.get_ikx_mat()
iky = lambda: ellmat.get_iky_mat()
#-(xi, a K)(xi, b K) - (K)(xi, a K xi, b)
for iS in range(3):
for jS in range(3):
Fxx += _2map(xiK[:,:,iS,jS] * ikx()) * _2map(xiK[:,:,jS,iS] * ikx())
Fyy += _2map(xiK[:,:,iS,jS] * iky()) * _2map(xiK[:,:,jS,iS] * iky())
Fxy += _2map(xiK[:,:,iS,jS] * ikx()) * _2map(xiK[:,:,jS,iS] * iky())
Fyx += _2map(xiK[:,:,iS,jS] * iky()) * _2map(xiK[:,:,jS,iS] * ikx())
Fxx += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * ikx() * ikx())
Fyy += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * iky() * iky())
Fxy += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * ikx() * iky())
Fyx += _2map(K[:, :, iS, jS]) * _2map(xiKxi[:, :, jS, iS] * iky() * ikx())
fac = 1. / np.sqrt(np.prod(ellmat.lsides)) * lib_tlm.fac_alm2rfft ** 2
Fxx = lib_qlm.map2alm(Fxx)
Fyy = lib_qlm.map2alm(Fyy)
Fxy = lib_qlm.map2alm(Fxy)
Fyx = lib_qlm.map2alm(Fyx)
ikx = lambda : lib_qlm.get_ikx()
iky = lambda : lib_qlm.get_iky()
assert isoN0s,'implement this (non anisotropic N0 2d cls)' #this affects only the following line:
return (fac*lib_qlm.bin_realpart_inell(Fxx * ikx() ** 2 + Fyy * iky() ** 2 + (Fxy + Fyx) * ikx() * iky()),
fac * lib_qlm.bin_realpart_inell( (Fxx * iky() ** 2 + Fyy * ikx() ** 2 - (Fxy + Fyx) * ikx() * iky()))) | 0.713531 | 0.550184 |
from unit_tester import test
def share_diagonal(x0, y0, x1, y1):
""" Is (x0, y0) on a shared diagonal with (x1, y1)? """
dy = abs(y1 - y0) # Calc the absolute y distance
dx = abs(x1 - x0) # CXalc the absolute x distance
return dx == dy # They clash if dx == dy
test(not share_diagonal(5,2,2,0))
test(share_diagonal(5,2,3,0))
test(share_diagonal(5,2,4,3))
test(share_diagonal(5,2,4,1))
def col_clashes(bs, c):
""" Return True if the queen at column c clashes
with any queen to its left.
"""
for i in range(c): # Look at all columns to the left of c
if share_diagonal(i, bs[i], c, bs[c]):
return True
return False # No clashes - col c has a safe placement.
# Solutions cases that should not have any clashes
test(not col_clashes([6,4,2,0,5], 4))
test(not col_clashes([6,4,2,0,5,7,1,3], 7))
# More test cases that should mostly clash
test(col_clashes([0,1], 1))
test(col_clashes([5,6], 1))
test(col_clashes([6,5], 1))
test(col_clashes([0,6,4,3], 3))
test(col_clashes([5,0,7], 2))
test(not col_clashes([2,0,1,3], 1))
test(col_clashes([2,0,1,3], 2))
def has_clashes(the_board):
""" Determine whether we have any queens clashing on the diagonals.
We're assuming here that the_board is a permutation of column
numbers, so we're not explicitly checking row or column clashes.
"""
for col in range(1,len(the_board)):
if col_clashes(the_board, col):
return True
return False
test(not has_clashes([6,4,2,0,5,7,1,3])) # Solution from above
test(has_clashes([4,6,2,0,5,7,1,3])) # Swap rows of first two
test(has_clashes([0,1,2,3])) # Try small 4x4 board
test(not has_clashes([2,0,3,1])) # Solution to 4x4 case
# brute force alternative
def main():
import random
rng = random.Random() # Instantiate a generator
bd = list(range(8)) # Generate the initial permutation
num_found = 0
tries = 0
while num_found < 10:
rng.shuffle(bd)
tries += 1
if not has_clashes(bd):
print("Found solution {0} in {1} tries.".format(bd, tries))
tries = 0
num_found += 1
main() | src/Fourteenth Chapter/Example6.py | from unit_tester import test
def share_diagonal(x0, y0, x1, y1):
""" Is (x0, y0) on a shared diagonal with (x1, y1)? """
dy = abs(y1 - y0) # Calc the absolute y distance
dx = abs(x1 - x0) # CXalc the absolute x distance
return dx == dy # They clash if dx == dy
test(not share_diagonal(5,2,2,0))
test(share_diagonal(5,2,3,0))
test(share_diagonal(5,2,4,3))
test(share_diagonal(5,2,4,1))
def col_clashes(bs, c):
""" Return True if the queen at column c clashes
with any queen to its left.
"""
for i in range(c): # Look at all columns to the left of c
if share_diagonal(i, bs[i], c, bs[c]):
return True
return False # No clashes - col c has a safe placement.
# Solutions cases that should not have any clashes
test(not col_clashes([6,4,2,0,5], 4))
test(not col_clashes([6,4,2,0,5,7,1,3], 7))
# More test cases that should mostly clash
test(col_clashes([0,1], 1))
test(col_clashes([5,6], 1))
test(col_clashes([6,5], 1))
test(col_clashes([0,6,4,3], 3))
test(col_clashes([5,0,7], 2))
test(not col_clashes([2,0,1,3], 1))
test(col_clashes([2,0,1,3], 2))
def has_clashes(the_board):
""" Determine whether we have any queens clashing on the diagonals.
We're assuming here that the_board is a permutation of column
numbers, so we're not explicitly checking row or column clashes.
"""
for col in range(1,len(the_board)):
if col_clashes(the_board, col):
return True
return False
test(not has_clashes([6,4,2,0,5,7,1,3])) # Solution from above
test(has_clashes([4,6,2,0,5,7,1,3])) # Swap rows of first two
test(has_clashes([0,1,2,3])) # Try small 4x4 board
test(not has_clashes([2,0,3,1])) # Solution to 4x4 case
# brute force alternative
def main():
import random
rng = random.Random() # Instantiate a generator
bd = list(range(8)) # Generate the initial permutation
num_found = 0
tries = 0
while num_found < 10:
rng.shuffle(bd)
tries += 1
if not has_clashes(bd):
print("Found solution {0} in {1} tries.".format(bd, tries))
tries = 0
num_found += 1
main() | 0.749179 | 0.701311 |
import datetime
from flask import Flask, render_template, redirect, url_for, request, flash
from classes import db, app, Teacher, Paper, Version, Fixed_Invigilation, Invigilation, Pool
from sqlalchemy.orm import noload
from generate import generate_largest_pools, generate_department_pools
@app.route('/papers', methods=["GET", "POST"])
def papers():
if request.method == "POST":
if request.form.get('edit'):
paper_id = request.form.get('edit')
paper = Paper.query.options(noload('Invigilators')).get(int(paper_id))
paper.Name = request.form.get(paper_id + '-name')
paper.Subject = request.form.get(paper_id + '-subject')
paper.SubjectCode = request.form.get(paper_id + '-subjcode')
paper.Level = request.form.get(paper_id + '-level')
paper.Department = request.form.get(paper_id + '-department')
paper.Count = int(request.form.get(paper_id + '-count'))
paper.Date = datetime.datetime.strptime(request.form.get(paper_id + '-date'), '%d-%m-%Y').date()
paper.StartTime = datetime.datetime.strptime(request.form.get(paper_id + '-starttime'), '%I:%M %p').time()
paper.EndTime = datetime.datetime.strptime(request.form.get(paper_id + '-endtime'), '%I:%M %p').time()
if request.form.get('add'):
name = request.form.get('add-name')
subject = request.form.get('add-subject')
subjcode = request.form.get('add-subjcode')
level = request.form.get('add-level')
department = request.form.get('add-department')
count = int(request.form.get('add-count'))
date = datetime.datetime.strptime(request.form.get('add-date'), '%d-%m-%Y').date()
starttime = datetime.datetime.strptime(request.form.get('add-starttime'), '%I:%M %p').time()
endtime = datetime.datetime.strptime(request.form.get('add-endtime'), '%I:%M %p').time()
Paper(subject, subjcode, name, department, count, date, starttime, endtime, level)
if request.form.get('delete'):
paper_id = request.form.get('delete')
paper = Paper.query.options(noload('Invigilators')).get(int(paper_id))
db.session.delete(paper)
db.session.commit()
papers = Paper.query.options(noload('Invigilators')).all()
for paper in papers:
paper.date_str = paper.Date.strftime('%d-%m-%Y')
paper.starttime_str = paper.StartTime.strftime('%I:%M %p')
paper.endtime_str = paper.EndTime.strftime('%I:%M %p')
return render_template('papers.html', papers=papers)
@app.route('/teachers', methods=["GET", "POST"])
def teachers():
return
@app.route('/selection', methods=["GET", "POST"])
def selection():
papers = Paper.query.options(noload('Invigilators')).all()
for paper in papers:
paper.teachers = []
clashes = []
for paper_clash in papers:
if paper != paper_clash and paper_clash.EndTime > paper.StartTime and paper_clash.StartTime < paper.EndTime and paper_clash.Date == paper.Date:
clashes.append(paper_clash)
paper.not_included = Teacher.query.options(noload('Invigilations')).all()
for pool_ob in paper.Pool:
teacher = Teacher.query.options(noload('Invigilations')).get(pool_ob.TeacherId)
paper.teachers.append(teacher)
paper.not_included.remove(teacher)
for clash in clashes:
for pool_ob in clash.Pool:
teacher = Teacher.query.options(noload('Invigilations')).get(pool_ob.TeacherId)
if teacher in paper.not_included:
paper.not_included.remove(teacher)
for teacher in paper.not_included:
if teacher.check_availability(paper.Date, paper.StartTime, paper.EndTime) == False:
paper.not_included.remove(teacher)
paper.teachers.sort(key=lambda teacher: teacher.Name)
paper.not_included.sort(key=lambda teacher: teacher.Name)
paper.pool_count = len(paper.teachers)
if request.method == "POST":
if request.form.get('largest'):
generate_largest_pools()
if request.form.get('department'):
generate_department_pools()
if request.form.get('remove'):
remove = request.form.get('remove').split('-')
paper_id, teacher_id = int(remove[0]), int(remove[1])
pool_ob = Pool.query.filter(Pool.PaperId == paper_id and Pool.TeacherId == teacher_id).first()
db.session.delete(pool_ob)
db.session.commit()
if request.form.get('add'):
paper_id = request.form.get('add')
for paper_temp in papers:
if paper_temp.id == int(paper_id):
paper = paper_temp
break
for teacher in paper.not_included:
print(teacher.id)
if request.form.get('add' + paper_id + '-' + str(teacher.id)):
Pool(teacher, paper)
db.session.commit()
return redirect(url_for('selection'))
return render_template('selection.html', papers= papers)
@app.route('/generate', methods=["GET", "POST"])
def generate():
return
@app.route('/saves', methods=["GET", "POST"])
def saves():
return
if __name__ == "__main__":
app.run(debug=True) | flask_app.py | import datetime
from flask import Flask, render_template, redirect, url_for, request, flash
from classes import db, app, Teacher, Paper, Version, Fixed_Invigilation, Invigilation, Pool
from sqlalchemy.orm import noload
from generate import generate_largest_pools, generate_department_pools
@app.route('/papers', methods=["GET", "POST"])
def papers():
if request.method == "POST":
if request.form.get('edit'):
paper_id = request.form.get('edit')
paper = Paper.query.options(noload('Invigilators')).get(int(paper_id))
paper.Name = request.form.get(paper_id + '-name')
paper.Subject = request.form.get(paper_id + '-subject')
paper.SubjectCode = request.form.get(paper_id + '-subjcode')
paper.Level = request.form.get(paper_id + '-level')
paper.Department = request.form.get(paper_id + '-department')
paper.Count = int(request.form.get(paper_id + '-count'))
paper.Date = datetime.datetime.strptime(request.form.get(paper_id + '-date'), '%d-%m-%Y').date()
paper.StartTime = datetime.datetime.strptime(request.form.get(paper_id + '-starttime'), '%I:%M %p').time()
paper.EndTime = datetime.datetime.strptime(request.form.get(paper_id + '-endtime'), '%I:%M %p').time()
if request.form.get('add'):
name = request.form.get('add-name')
subject = request.form.get('add-subject')
subjcode = request.form.get('add-subjcode')
level = request.form.get('add-level')
department = request.form.get('add-department')
count = int(request.form.get('add-count'))
date = datetime.datetime.strptime(request.form.get('add-date'), '%d-%m-%Y').date()
starttime = datetime.datetime.strptime(request.form.get('add-starttime'), '%I:%M %p').time()
endtime = datetime.datetime.strptime(request.form.get('add-endtime'), '%I:%M %p').time()
Paper(subject, subjcode, name, department, count, date, starttime, endtime, level)
if request.form.get('delete'):
paper_id = request.form.get('delete')
paper = Paper.query.options(noload('Invigilators')).get(int(paper_id))
db.session.delete(paper)
db.session.commit()
papers = Paper.query.options(noload('Invigilators')).all()
for paper in papers:
paper.date_str = paper.Date.strftime('%d-%m-%Y')
paper.starttime_str = paper.StartTime.strftime('%I:%M %p')
paper.endtime_str = paper.EndTime.strftime('%I:%M %p')
return render_template('papers.html', papers=papers)
@app.route('/teachers', methods=["GET", "POST"])
def teachers():
return
@app.route('/selection', methods=["GET", "POST"])
def selection():
papers = Paper.query.options(noload('Invigilators')).all()
for paper in papers:
paper.teachers = []
clashes = []
for paper_clash in papers:
if paper != paper_clash and paper_clash.EndTime > paper.StartTime and paper_clash.StartTime < paper.EndTime and paper_clash.Date == paper.Date:
clashes.append(paper_clash)
paper.not_included = Teacher.query.options(noload('Invigilations')).all()
for pool_ob in paper.Pool:
teacher = Teacher.query.options(noload('Invigilations')).get(pool_ob.TeacherId)
paper.teachers.append(teacher)
paper.not_included.remove(teacher)
for clash in clashes:
for pool_ob in clash.Pool:
teacher = Teacher.query.options(noload('Invigilations')).get(pool_ob.TeacherId)
if teacher in paper.not_included:
paper.not_included.remove(teacher)
for teacher in paper.not_included:
if teacher.check_availability(paper.Date, paper.StartTime, paper.EndTime) == False:
paper.not_included.remove(teacher)
paper.teachers.sort(key=lambda teacher: teacher.Name)
paper.not_included.sort(key=lambda teacher: teacher.Name)
paper.pool_count = len(paper.teachers)
if request.method == "POST":
if request.form.get('largest'):
generate_largest_pools()
if request.form.get('department'):
generate_department_pools()
if request.form.get('remove'):
remove = request.form.get('remove').split('-')
paper_id, teacher_id = int(remove[0]), int(remove[1])
pool_ob = Pool.query.filter(Pool.PaperId == paper_id and Pool.TeacherId == teacher_id).first()
db.session.delete(pool_ob)
db.session.commit()
if request.form.get('add'):
paper_id = request.form.get('add')
for paper_temp in papers:
if paper_temp.id == int(paper_id):
paper = paper_temp
break
for teacher in paper.not_included:
print(teacher.id)
if request.form.get('add' + paper_id + '-' + str(teacher.id)):
Pool(teacher, paper)
db.session.commit()
return redirect(url_for('selection'))
return render_template('selection.html', papers= papers)
@app.route('/generate', methods=["GET", "POST"])
def generate():
return
@app.route('/saves', methods=["GET", "POST"])
def saves():
return
if __name__ == "__main__":
app.run(debug=True) | 0.134463 | 0.069038 |
import collections
import random
import numpy as np
import config
import obj_canvas
class Paddle(object):
""" Represents the breakout paddle. """
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the paddle on. """
self.__canvas = canvas
# Old position measurements to average with.
self.__old_positions = collections.deque()
# Figure out where the paddle will be located.
paddle_x = config.SCREEN_WIDTH / 2
paddle_y = config.SCREEN_HEIGHT - config.SCREEN_HEIGHT * 0.1
paddle_width = config.SCREEN_WIDTH / 4
paddle_height = config.SCREEN_HEIGHT * 0.015
paddle_color = config.BreakoutColors.PADDLE_COLOR
# The actual paddle is a rectangle.
self.__paddle = obj_canvas.Rectangle(self.__canvas, (paddle_x, paddle_y),
(paddle_width, paddle_height),
fill=paddle_color,
outline=paddle_color)
def update_position(self, new_x):
""" Updates the x position of the paddle.
Args:
new_x: The new x position. """
_, y_pos = self.__paddle.get_pos()
# Put it through the averaging filter.
self.__old_positions.append(new_x)
if len(self.__old_positions) > config.AVERAGE_POINTS:
self.__old_positions.popleft()
filtered_x = np.mean(self.__old_positions)
self.__paddle.set_pos(filtered_x, y_pos)
def handle_collision(self, ball):
""" Handles collisions between the paddle and the ball.
Args:
ball: The ball that could be colliding. """
ball.handle_collision(self.__paddle)
class Walls(object):
""" This class controls the static walls. """
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the walls on. """
self.__canvas = canvas
# Figure out the wall sizes.
top_wall_x = config.SCREEN_WIDTH / 2
top_wall_y = config.SCREEN_HEIGHT * 0.15
top_wall_w = config.SCREEN_WIDTH - config.SCREEN_HEIGHT * 0.2
top_wall_h = config.SCREEN_HEIGHT * 0.1
left_wall_x = config.SCREEN_WIDTH * 0.05
left_wall_y = config.SCREEN_HEIGHT / 2
left_wall_w = config.SCREEN_WIDTH * 0.1
left_wall_h = config.SCREEN_HEIGHT * 0.8
right_wall_x = config.SCREEN_WIDTH - left_wall_x
right_wall_y = left_wall_y
right_wall_w = left_wall_w
right_wall_h = left_wall_h
wall_color = config.BreakoutColors.WALL_COLOR
self.__wall_top = \
obj_canvas.Rectangle(self.__canvas, (top_wall_x, top_wall_y),
(top_wall_w, top_wall_h),
fill=wall_color,
outline=wall_color)
self.__wall_left = \
obj_canvas.Rectangle(self.__canvas, (left_wall_x, left_wall_y),
(left_wall_w, left_wall_h),
fill=wall_color,
outline=wall_color)
self.__wall_right = \
obj_canvas.Rectangle(self.__canvas, (right_wall_x, right_wall_y),
(right_wall_w, right_wall_h),
fill=wall_color,
outline=wall_color)
def handle_collision(self, ball):
""" Handles collisions between the walls and the ball.
Args:
ball: The ball that could be colliding. """
# This is pretty straightforward, because it just needs to bounce when it
# hits.
ball.handle_collision(self.__wall_top)
ball.handle_collision(self.__wall_left)
ball.handle_collision(self.__wall_right)
class ScoreBox(object):
""" Shows the user's score and number of turns remaining. """
class _Digit(object):
""" A superclass for digit objects. """
def __init__(self, canvas, pos, scale):
"""
Args:
canvas: The canvas to draw on.
pos: The center positon of the digit.
scale: A tuple indicating the horizontal and vertical size of the digit.
"""
self._canvas = canvas
self._pos_x, self._pos_y = pos
self._scale_x, self._scale_y = scale
self._color = config.BreakoutColors.WALL_COLOR
self._bg_color = config.BreakoutColors.BG_COLOR
# Shapes the make up the digit.
self._shapes = []
# Draw the digit.
self._draw_digit()
def _draw_digit(self):
""" Draws the digit. Must be overidden by subclasses. """
raise NotImplementedError("_draw_digit must be implemented by subclass.")
def delete(self):
""" Delete the digit. """
for shape in self._shapes:
shape.delete()
class Zero(_Digit):
""" A zero digit. """
def _draw_digit(self):
""" Draws the zero digit. """
# Calculate sizes for rectangular components.
outer_rect_pos = (self._pos_x, self._pos_y)
outer_rect_size = (self._scale_x, self._scale_y)
inner_rect_pos = outer_rect_pos
inner_rect_size = (self._scale_x * 0.4, self._scale_y * 0.4)
# Create rectangles.
outer_rect = obj_canvas.Rectangle(self._canvas, outer_rect_pos,
outer_rect_size,
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas, inner_rect_pos,
inner_rect_size,
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([outer_rect, inner_rect])
class One(_Digit):
""" A one digit. """
def _draw_digit(self):
""" Draws the one digit. """
# Calculate sizes for rectangular components.
rect_pos = (self._pos_x, self._pos_y)
rect_size = (self._scale_x * 0.2, self._scale_y)
# Create rectangles.
outer_rect = obj_canvas.Rectangle(self._canvas, rect_pos,
rect_size,
fill=self._color,
outline=self._color)
self._shapes.append(outer_rect)
class Two(_Digit):
""" A two digit. """
def _draw_digit(self):
""" Draws the two digit. """
# Calculate sizes for rectangular components.
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.4
top_rect_w = self._scale_x
top_rect_h = self._scale_y * 0.2
mid_rect_x = top_rect_x
mid_rect_y = self._pos_y
mid_rect_w = top_rect_w
mid_rect_h = top_rect_h
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.4
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
left_rect_x = self._pos_x - self._scale_x / 2 + top_rect_h / 2
left_rect_y = mid_rect_y + (bot_rect_y - mid_rect_y) / 2
left_rect_w = top_rect_h
left_rect_h = self._scale_y / 2
right_rect_x = self._pos_x + self._scale_x / 2 - top_rect_h / 2
right_rect_y = top_rect_y + (mid_rect_y - top_rect_y) / 2
right_rect_w = top_rect_h
right_rect_h = self._scale_y / 2
# Create rectangles.
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
mid_rect = obj_canvas.Rectangle(self._canvas,
(mid_rect_x, mid_rect_y),
(mid_rect_w, mid_rect_h),
fill=self._color,
outline=self._color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._color,
outline=self._color)
left_rect = obj_canvas.Rectangle(self._canvas,
(left_rect_x, left_rect_y),
(left_rect_w, left_rect_h),
fill=self._color,
outline=self._color)
right_rect = obj_canvas.Rectangle(self._canvas,
(right_rect_x, right_rect_y),
(right_rect_w, right_rect_h),
fill=self._color,
outline=self._color)
# Save these so we can switch them for fives.
self._left_rect = left_rect
self._right_rect = right_rect
self._shapes.extend([top_rect, mid_rect, bot_rect, left_rect, right_rect])
class Three(_Digit):
""" A three digit. """
def _draw_digit(self):
""" Draws the three digit. """
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x - self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas,
back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
class Four(_Digit):
""" A four digit. """
def _draw_digit(self):
""" Draws the four digit. """
# Calculate the size for rectangular components.
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.3
top_rect_w = self._scale_x
top_rect_h = self._scale_y * 0.4
inner_rect_x = top_rect_x
inner_rect_y = self._pos_y - self._scale_y * 0.4
inner_rect_w = self._scale_x * 0.6
inner_rect_h = self._scale_y * 0.2
bot_rect_x = self._pos_x + self._scale_x * 0.4
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = self._scale_x * 0.2
bot_rect_h = self._scale_y * 0.6
# Create rectangles.
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas,
(inner_rect_x, inner_rect_y),
(inner_rect_w, inner_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._color,
outline=self._color)
self._shapes.extend([top_rect, inner_rect, bot_rect])
class Five(Two):
""" A five digit. """
def _draw_digit(self):
# The five is very similar to the two. We just have to flip it.
super(ScoreBox.Five, self)._draw_digit()
# Switch side rectangles.
left_pos_x, left_pos_y = self._left_rect.get_pos()
right_pos_x, right_pos_y = self._right_rect.get_pos()
self._left_rect.set_pos(right_pos_x, left_pos_y)
self._right_rect.set_pos(left_pos_x, right_pos_y)
class Six(_Digit):
""" A six digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
inner_rect_x = self._pos_x
inner_rect_y = self._pos_y + self._scale_y * 0.2
inner_rect_w = self._scale_x * 0.6
inner_rect_h = self._scale_y * 0.2
top_rect_x = self._pos_x - self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas,
(inner_rect_x, inner_rect_y),
(inner_rect_w, inner_rect_h),
fill=self._bg_color,
outline=self._bg_color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, inner_rect, top_rect])
class Seven(_Digit):
""" A seven digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
side_rect_x = self._pos_x + self._scale_x * 0.4
side_rect_y = self._pos_y
side_rect_w = self._scale_x * 0.2
side_rect_h = self._scale_y
top_rect_x = self._pos_x + self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.4
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
# Create rectangles.
side_rect = obj_canvas.Rectangle(self._canvas,
(side_rect_x, side_rect_y),
(side_rect_w, side_rect_h),
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
self._shapes.extend([side_rect, top_rect])
class Eight(_Digit):
""" An eight digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.6
top_rect_h = self._scale_y * 0.2
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
class Nine(_Digit):
""" A nine digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.6
top_rect_h = self._scale_y * 0.2
bot_rect_x = self._pos_x - self._scale_x * 0.1
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = self._scale_x * 0.8
bot_rect_h = self._scale_y * 0.2
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the walls on. """
self.__canvas = canvas
# Calculate position and size for turn counter.
turn_count_x = config.SCREEN_WIDTH * 0.75
turn_count_y = config.SCREEN_HEIGHT * 0.05
turn_count_w = config.SCREEN_WIDTH * 0.05
turn_count_h = config.SCREEN_HEIGHT * 0.08
# Draw the counter.
self.__turn_count = 3
self.__disp_turns = None
self.__update_turn_count()
# Draw the score.
self.__score = 0
self.__ones_digit = None
self.__tens_digit = None
self.__hundreds_digit = None
self.__update_score()
def __get_digit(self, digit):
""" A helper function that selects the correct digit class for a number.
Args:
digit: The number, between 0 and 9.
Returns:
The digit class. """
lut = [ScoreBox.Zero, ScoreBox.One, ScoreBox.Two, ScoreBox.Three,
ScoreBox.Four, ScoreBox.Five, ScoreBox.Six, ScoreBox.Seven,
ScoreBox.Eight, ScoreBox.Nine]
return lut[digit]
def __update_turn_count(self):
""" Update the displayed turn counter. """
if self.__disp_turns:
# Delete the previous number.
self.__disp_turns.delete()
# Calculate position and size for turn counter.
turn_count_x = config.SCREEN_WIDTH * 0.75
turn_count_y = config.SCREEN_HEIGHT * 0.05
turn_count_w = config.SCREEN_WIDTH * 0.05
turn_count_h = config.SCREEN_HEIGHT * 0.08
# Draw it.
digit = self.__get_digit(self.__turn_count)
self.__disp_turns = digit(self.__canvas,
(turn_count_x, turn_count_y),
(turn_count_w, turn_count_h))
def __update_score(self):
""" Updates the displayed score. """
# Calculate position and size of score digits.
score_right_x = config.SCREEN_WIDTH * 0.62
score_mid_x = config.SCREEN_WIDTH * 0.55
score_left_x = config.SCREEN_WIDTH * 0.48
score_y = config.SCREEN_HEIGHT * 0.05
score_w = config.SCREEN_WIDTH * 0.05
score_h = config.SCREEN_HEIGHT * 0.08
# Draw hundreds digit.
if self.__hundreds_digit:
self.__hundreds_digit.delete()
digit = self.__get_digit(self.__score / 100)
self.__hundreds_digit = digit(self.__canvas,
(score_left_x, score_y),
(score_w, score_h))
# Draw tens digit.
if self.__tens_digit:
self.__tens_digit.delete()
digit = self.__get_digit((self.__score % 100) / 10)
self.__tens_digit = digit(self.__canvas,
(score_mid_x, score_y),
(score_w, score_h))
# Draw ones digit.
if self.__ones_digit:
self.__ones_digit.delete()
digit = self.__get_digit(self.__score % 10)
self.__ones_digit = digit(self.__canvas,
(score_right_x, score_y),
(score_w, score_h))
def decrement_turns(self):
""" Decrements the number of turns a user has.
Returns:
True if the user had a turn, False if there were none left. """
if not self.__turn_count:
# Out of turns.
return False
self.__turn_count -= 1
self.__update_turn_count()
return True
def increase_score(self, amount):
""" Increase the user's score by a given amount. """
self.__score += amount
self.__update_score()
class Brick(object):
""" Controls a single brick. """
def __init__(self, canvas, row, col, color):
"""
Args:
canvas: The canvas to draw the brick on.
row: Which row the brick is in, with row 0 being the top.
col: Which column the brick is in, with col 0 being the left.
color: The color of the brick. """
self.__canvas = canvas
self.__color = color
# We should be able to fit 10 bricks between the two walls.
col_width = config.SCREEN_WIDTH * 0.8 / 10
# We should have 8 rows.
row_height = config.SCREEN_HEIGHT * 0.3 / 8
# Start positions for bricks.
start_x = config.SCREEN_WIDTH * 0.1 + col_width / 2
start_y = config.SCREEN_HEIGHT * 0.2 + row_height / 2
brick_x = start_x + col_width * col
brick_y = start_y + row_height * row
brick_w = col_width
brick_h = row_height
# Draw the brick.
self.__brick = obj_canvas.Rectangle(self.__canvas, (brick_x, brick_y),
(brick_w, brick_h),
fill=self.__color,
outline=self.__color)
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and this brick.
Args:
ball: The ball we could be colliding with.
Returns:
True if there was a collision, False otherwise. """
coll_x, coll_y = ball.handle_collision(self.__brick)
if (coll_x and coll_y):
# We have a collision. Remove this brick.
self.__brick.delete()
return True
return False
class BrickLayer(object):
""" Controls a layer of bricks. """
def __init__(self, canvas, row, color):
"""
Args:
canvas: The canvas to draw the brick layer on.
row: Which row the layer is, with row 0 being the top.
color: The color of the layer. """
self.__canvas = canvas
self.__row = row
# Create individual bricks.
self.__bricks = set()
for col in range(0, 10):
self.__bricks.add(Brick(self.__canvas, row, col, color))
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and this layer.
Args:
ball: The ball we could be colliding with.
Returns:
The number of points that should be awarded, or zero if there was no
collision. """
# Check for each brick individually.
to_remove = []
points = 0
for brick in self.__bricks:
if brick.handle_collision(ball):
# The brick was destroyed, so we need to remove it.
to_remove.append(brick)
# Look up the number of points we got.
points = config.ROW_POINTS[self.__row]
# Remove destroyed bricks.
for brick in to_remove:
self.__bricks.remove(brick)
return points
class Bricks(object):
""" Creates the entire set of bricks. """
def __init__(self, canvas):
"""
Args:
canvas: The canvas to draw the bricks on. """
self.__canvas = canvas
# Create bricks layer-by-layer.
self.__layers = []
for row in range(0, 8):
# Get the color for that row.
color = config.BreakoutColors.LAYER_COLORS[row]
self.__layers.append(BrickLayer(self.__canvas, row, color))
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and all the bricks.
Args:
ball: The ball we could be colliding with.
Returns:
The number of points that should be awarded, or 0 if there was no
collision. """
# Check for each layer individually.
points = 0
for layer in self.__layers:
points += layer.handle_collision(ball)
return points
class Ball(object):
""" Creates the ball. """
def __init__(self, canvas, speed=20):
"""
Args:
canvas: The canvas to draw the balls on.
speed: Base velocity of the ball, in px/s. """
self.__canvas = canvas
# The velocity vector of the ball.
self.__choose_velocity()
self.__vel_mult = speed
# Keeps track of collision data for other objects.
self.__collisions = {}
# Figure out the ball size.
self.__ball_x = config.SCREEN_WIDTH / 2
self.__ball_y = config.SCREEN_HEIGHT * 0.6
ball_h = config.SCREEN_HEIGHT * 0.015
ball_w = ball_h
# Draw the ball.
color = config.BreakoutColors.BALL_COLOR
self.__ball = obj_canvas.Rectangle(self.__canvas,
(self.__ball_x, self.__ball_y),
(ball_w, ball_h),
fill=color,
outline=color)
def __animate(self):
""" Animate the ball's motion. """
move_x = self.__vel_x * self.__vel_mult
move_y = self.__vel_y * self.__vel_mult
self.__ball.move(move_x, move_y)
def __choose_velocity(self):
""" Chooses a random starting velocity for the ball. """
self.__vel_x = random.randint(400, 600) / 1000.0
self.__vel_y = 1 - self.__vel_x
def update(self):
""" Updates the ball's state. """
self.__animate()
def handle_collision(self, canvas_obj):
""" Check for a collision between the ball and another canvas object. It
automatically makes the ball bounce.
Args:
canvas_obj: The canvas object to check for a collision with.
Returns
A tuple of booleans. The first element indicates whether there is a
collision in the x direction, the second indicates whether there is a
collision in the y direction. """
collision_x, collision_y = \
obj_canvas.CanvasObject.check_collision(self.__ball, canvas_obj)
# Get previous collision data.
last_collision_x = False
last_collision_y = False
if canvas_obj in self.__collisions:
last_collision_x, last_collision_y = self.__collisions[canvas_obj]
# Update it.
self.__collisions[canvas_obj] = (collision_x, collision_y)
if (collision_x and collision_y):
# Bounce the ball. We're going to bounce the direction that most recently
# started colliding.
if not last_collision_x:
self.__vel_x *= -1
if not last_collision_y:
self.__vel_y *= -1
return (collision_x, collision_y)
def dropped(self):
""" Detects whether the ball dropped.
Returns:
True if it did, False otherwise. """
_, y_pos = self.__ball.get_pos()
if y_pos > config.SCREEN_HEIGHT:
# It dropped.
return True
return False
def reset(self):
""" Resets the ball to its starting position. """
self.__ball.set_pos(self.__ball_x, self.__ball_y)
# Reset velocity.
self.__choose_velocity()
def increase_speed(self):
""" Increases speed of the ball as the game progresses. """
self.__vel_mult += config.SPEED_INCREASE
class LoadScreen(object):
""" Shows a loading screen. """
def __init__(self, canvas):
"""
Args:
canvas: The canvas to draw it on. """
self.__canvas = canvas
# Calculate positions.
dot_l_x = config.SCREEN_WIDTH * 0.45
dot_m_x = config.SCREEN_WIDTH * 0.5
dot_r_x = config.SCREEN_WIDTH * 0.55
dot_y = config.SCREEN_HEIGHT * 0.5
dot_w = config.SCREEN_WIDTH * 0.02
dot_h = dot_w
color = config.BreakoutColors.WALL_COLOR
# Draw dots.
self.__dot_l = obj_canvas.Rectangle(self.__canvas, (dot_l_x, dot_y),
(dot_w, dot_h), fill=color)
self.__dot_m = obj_canvas.Rectangle(self.__canvas, (dot_m_x, dot_y),
(dot_w, dot_h), fill=color)
self.__dot_r = obj_canvas.Rectangle(self.__canvas, (dot_r_x, dot_y),
(dot_w, dot_h), fill=color)
# Make sure it's displayed.
self.__canvas.update()
def clear(self):
""" Clears the loading screen and prepares for the game. """
self.__dot_l.delete()
self.__dot_m.delete()
self.__dot_r.delete() | breakout_graphics.py | import collections
import random
import numpy as np
import config
import obj_canvas
class Paddle(object):
""" Represents the breakout paddle. """
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the paddle on. """
self.__canvas = canvas
# Old position measurements to average with.
self.__old_positions = collections.deque()
# Figure out where the paddle will be located.
paddle_x = config.SCREEN_WIDTH / 2
paddle_y = config.SCREEN_HEIGHT - config.SCREEN_HEIGHT * 0.1
paddle_width = config.SCREEN_WIDTH / 4
paddle_height = config.SCREEN_HEIGHT * 0.015
paddle_color = config.BreakoutColors.PADDLE_COLOR
# The actual paddle is a rectangle.
self.__paddle = obj_canvas.Rectangle(self.__canvas, (paddle_x, paddle_y),
(paddle_width, paddle_height),
fill=paddle_color,
outline=paddle_color)
def update_position(self, new_x):
""" Updates the x position of the paddle.
Args:
new_x: The new x position. """
_, y_pos = self.__paddle.get_pos()
# Put it through the averaging filter.
self.__old_positions.append(new_x)
if len(self.__old_positions) > config.AVERAGE_POINTS:
self.__old_positions.popleft()
filtered_x = np.mean(self.__old_positions)
self.__paddle.set_pos(filtered_x, y_pos)
def handle_collision(self, ball):
""" Handles collisions between the paddle and the ball.
Args:
ball: The ball that could be colliding. """
ball.handle_collision(self.__paddle)
class Walls(object):
""" This class controls the static walls. """
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the walls on. """
self.__canvas = canvas
# Figure out the wall sizes.
top_wall_x = config.SCREEN_WIDTH / 2
top_wall_y = config.SCREEN_HEIGHT * 0.15
top_wall_w = config.SCREEN_WIDTH - config.SCREEN_HEIGHT * 0.2
top_wall_h = config.SCREEN_HEIGHT * 0.1
left_wall_x = config.SCREEN_WIDTH * 0.05
left_wall_y = config.SCREEN_HEIGHT / 2
left_wall_w = config.SCREEN_WIDTH * 0.1
left_wall_h = config.SCREEN_HEIGHT * 0.8
right_wall_x = config.SCREEN_WIDTH - left_wall_x
right_wall_y = left_wall_y
right_wall_w = left_wall_w
right_wall_h = left_wall_h
wall_color = config.BreakoutColors.WALL_COLOR
self.__wall_top = \
obj_canvas.Rectangle(self.__canvas, (top_wall_x, top_wall_y),
(top_wall_w, top_wall_h),
fill=wall_color,
outline=wall_color)
self.__wall_left = \
obj_canvas.Rectangle(self.__canvas, (left_wall_x, left_wall_y),
(left_wall_w, left_wall_h),
fill=wall_color,
outline=wall_color)
self.__wall_right = \
obj_canvas.Rectangle(self.__canvas, (right_wall_x, right_wall_y),
(right_wall_w, right_wall_h),
fill=wall_color,
outline=wall_color)
def handle_collision(self, ball):
""" Handles collisions between the walls and the ball.
Args:
ball: The ball that could be colliding. """
# This is pretty straightforward, because it just needs to bounce when it
# hits.
ball.handle_collision(self.__wall_top)
ball.handle_collision(self.__wall_left)
ball.handle_collision(self.__wall_right)
class ScoreBox(object):
""" Shows the user's score and number of turns remaining. """
class _Digit(object):
""" A superclass for digit objects. """
def __init__(self, canvas, pos, scale):
"""
Args:
canvas: The canvas to draw on.
pos: The center positon of the digit.
scale: A tuple indicating the horizontal and vertical size of the digit.
"""
self._canvas = canvas
self._pos_x, self._pos_y = pos
self._scale_x, self._scale_y = scale
self._color = config.BreakoutColors.WALL_COLOR
self._bg_color = config.BreakoutColors.BG_COLOR
# Shapes the make up the digit.
self._shapes = []
# Draw the digit.
self._draw_digit()
def _draw_digit(self):
""" Draws the digit. Must be overidden by subclasses. """
raise NotImplementedError("_draw_digit must be implemented by subclass.")
def delete(self):
""" Delete the digit. """
for shape in self._shapes:
shape.delete()
class Zero(_Digit):
""" A zero digit. """
def _draw_digit(self):
""" Draws the zero digit. """
# Calculate sizes for rectangular components.
outer_rect_pos = (self._pos_x, self._pos_y)
outer_rect_size = (self._scale_x, self._scale_y)
inner_rect_pos = outer_rect_pos
inner_rect_size = (self._scale_x * 0.4, self._scale_y * 0.4)
# Create rectangles.
outer_rect = obj_canvas.Rectangle(self._canvas, outer_rect_pos,
outer_rect_size,
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas, inner_rect_pos,
inner_rect_size,
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([outer_rect, inner_rect])
class One(_Digit):
""" A one digit. """
def _draw_digit(self):
""" Draws the one digit. """
# Calculate sizes for rectangular components.
rect_pos = (self._pos_x, self._pos_y)
rect_size = (self._scale_x * 0.2, self._scale_y)
# Create rectangles.
outer_rect = obj_canvas.Rectangle(self._canvas, rect_pos,
rect_size,
fill=self._color,
outline=self._color)
self._shapes.append(outer_rect)
class Two(_Digit):
""" A two digit. """
def _draw_digit(self):
""" Draws the two digit. """
# Calculate sizes for rectangular components.
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.4
top_rect_w = self._scale_x
top_rect_h = self._scale_y * 0.2
mid_rect_x = top_rect_x
mid_rect_y = self._pos_y
mid_rect_w = top_rect_w
mid_rect_h = top_rect_h
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.4
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
left_rect_x = self._pos_x - self._scale_x / 2 + top_rect_h / 2
left_rect_y = mid_rect_y + (bot_rect_y - mid_rect_y) / 2
left_rect_w = top_rect_h
left_rect_h = self._scale_y / 2
right_rect_x = self._pos_x + self._scale_x / 2 - top_rect_h / 2
right_rect_y = top_rect_y + (mid_rect_y - top_rect_y) / 2
right_rect_w = top_rect_h
right_rect_h = self._scale_y / 2
# Create rectangles.
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
mid_rect = obj_canvas.Rectangle(self._canvas,
(mid_rect_x, mid_rect_y),
(mid_rect_w, mid_rect_h),
fill=self._color,
outline=self._color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._color,
outline=self._color)
left_rect = obj_canvas.Rectangle(self._canvas,
(left_rect_x, left_rect_y),
(left_rect_w, left_rect_h),
fill=self._color,
outline=self._color)
right_rect = obj_canvas.Rectangle(self._canvas,
(right_rect_x, right_rect_y),
(right_rect_w, right_rect_h),
fill=self._color,
outline=self._color)
# Save these so we can switch them for fives.
self._left_rect = left_rect
self._right_rect = right_rect
self._shapes.extend([top_rect, mid_rect, bot_rect, left_rect, right_rect])
class Three(_Digit):
""" A three digit. """
def _draw_digit(self):
""" Draws the three digit. """
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x - self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas,
back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
class Four(_Digit):
""" A four digit. """
def _draw_digit(self):
""" Draws the four digit. """
# Calculate the size for rectangular components.
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.3
top_rect_w = self._scale_x
top_rect_h = self._scale_y * 0.4
inner_rect_x = top_rect_x
inner_rect_y = self._pos_y - self._scale_y * 0.4
inner_rect_w = self._scale_x * 0.6
inner_rect_h = self._scale_y * 0.2
bot_rect_x = self._pos_x + self._scale_x * 0.4
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = self._scale_x * 0.2
bot_rect_h = self._scale_y * 0.6
# Create rectangles.
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas,
(inner_rect_x, inner_rect_y),
(inner_rect_w, inner_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._color,
outline=self._color)
self._shapes.extend([top_rect, inner_rect, bot_rect])
class Five(Two):
""" A five digit. """
def _draw_digit(self):
# The five is very similar to the two. We just have to flip it.
super(ScoreBox.Five, self)._draw_digit()
# Switch side rectangles.
left_pos_x, left_pos_y = self._left_rect.get_pos()
right_pos_x, right_pos_y = self._right_rect.get_pos()
self._left_rect.set_pos(right_pos_x, left_pos_y)
self._right_rect.set_pos(left_pos_x, right_pos_y)
class Six(_Digit):
""" A six digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
inner_rect_x = self._pos_x
inner_rect_y = self._pos_y + self._scale_y * 0.2
inner_rect_w = self._scale_x * 0.6
inner_rect_h = self._scale_y * 0.2
top_rect_x = self._pos_x - self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
inner_rect = obj_canvas.Rectangle(self._canvas,
(inner_rect_x, inner_rect_y),
(inner_rect_w, inner_rect_h),
fill=self._bg_color,
outline=self._bg_color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, inner_rect, top_rect])
class Seven(_Digit):
""" A seven digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
side_rect_x = self._pos_x + self._scale_x * 0.4
side_rect_y = self._pos_y
side_rect_w = self._scale_x * 0.2
side_rect_h = self._scale_y
top_rect_x = self._pos_x + self._scale_x * 0.1
top_rect_y = self._pos_y - self._scale_y * 0.4
top_rect_w = self._scale_x * 0.8
top_rect_h = self._scale_y * 0.2
# Create rectangles.
side_rect = obj_canvas.Rectangle(self._canvas,
(side_rect_x, side_rect_y),
(side_rect_w, side_rect_h),
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._color,
outline=self._color)
self._shapes.extend([side_rect, top_rect])
class Eight(_Digit):
""" An eight digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.6
top_rect_h = self._scale_y * 0.2
bot_rect_x = top_rect_x
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = top_rect_w
bot_rect_h = top_rect_h
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
class Nine(_Digit):
""" A nine digit. """
def _draw_digit(self):
# Calculate the size for rectangular components.
back_rect_pos = (self._pos_x, self._pos_y)
back_rect_size = (self._scale_x, self._scale_y)
top_rect_x = self._pos_x
top_rect_y = self._pos_y - self._scale_y * 0.2
top_rect_w = self._scale_x * 0.6
top_rect_h = self._scale_y * 0.2
bot_rect_x = self._pos_x - self._scale_x * 0.1
bot_rect_y = self._pos_y + self._scale_y * 0.2
bot_rect_w = self._scale_x * 0.8
bot_rect_h = self._scale_y * 0.2
# Create rectangles.
back_rect = obj_canvas.Rectangle(self._canvas, back_rect_pos,
back_rect_size,
fill=self._color,
outline=self._color)
top_rect = obj_canvas.Rectangle(self._canvas,
(top_rect_x, top_rect_y),
(top_rect_w, top_rect_h),
fill=self._bg_color,
outline=self._bg_color)
bot_rect = obj_canvas.Rectangle(self._canvas,
(bot_rect_x, bot_rect_y),
(bot_rect_w, bot_rect_h),
fill=self._bg_color,
outline=self._bg_color)
self._shapes.extend([back_rect, top_rect, bot_rect])
def __init__(self, canvas):
"""
Args:
canvas: The Canvas to draw the walls on. """
self.__canvas = canvas
# Calculate position and size for turn counter.
turn_count_x = config.SCREEN_WIDTH * 0.75
turn_count_y = config.SCREEN_HEIGHT * 0.05
turn_count_w = config.SCREEN_WIDTH * 0.05
turn_count_h = config.SCREEN_HEIGHT * 0.08
# Draw the counter.
self.__turn_count = 3
self.__disp_turns = None
self.__update_turn_count()
# Draw the score.
self.__score = 0
self.__ones_digit = None
self.__tens_digit = None
self.__hundreds_digit = None
self.__update_score()
def __get_digit(self, digit):
""" A helper function that selects the correct digit class for a number.
Args:
digit: The number, between 0 and 9.
Returns:
The digit class. """
lut = [ScoreBox.Zero, ScoreBox.One, ScoreBox.Two, ScoreBox.Three,
ScoreBox.Four, ScoreBox.Five, ScoreBox.Six, ScoreBox.Seven,
ScoreBox.Eight, ScoreBox.Nine]
return lut[digit]
def __update_turn_count(self):
""" Update the displayed turn counter. """
if self.__disp_turns:
# Delete the previous number.
self.__disp_turns.delete()
# Calculate position and size for turn counter.
turn_count_x = config.SCREEN_WIDTH * 0.75
turn_count_y = config.SCREEN_HEIGHT * 0.05
turn_count_w = config.SCREEN_WIDTH * 0.05
turn_count_h = config.SCREEN_HEIGHT * 0.08
# Draw it.
digit = self.__get_digit(self.__turn_count)
self.__disp_turns = digit(self.__canvas,
(turn_count_x, turn_count_y),
(turn_count_w, turn_count_h))
def __update_score(self):
""" Updates the displayed score. """
# Calculate position and size of score digits.
score_right_x = config.SCREEN_WIDTH * 0.62
score_mid_x = config.SCREEN_WIDTH * 0.55
score_left_x = config.SCREEN_WIDTH * 0.48
score_y = config.SCREEN_HEIGHT * 0.05
score_w = config.SCREEN_WIDTH * 0.05
score_h = config.SCREEN_HEIGHT * 0.08
# Draw hundreds digit.
if self.__hundreds_digit:
self.__hundreds_digit.delete()
digit = self.__get_digit(self.__score / 100)
self.__hundreds_digit = digit(self.__canvas,
(score_left_x, score_y),
(score_w, score_h))
# Draw tens digit.
if self.__tens_digit:
self.__tens_digit.delete()
digit = self.__get_digit((self.__score % 100) / 10)
self.__tens_digit = digit(self.__canvas,
(score_mid_x, score_y),
(score_w, score_h))
# Draw ones digit.
if self.__ones_digit:
self.__ones_digit.delete()
digit = self.__get_digit(self.__score % 10)
self.__ones_digit = digit(self.__canvas,
(score_right_x, score_y),
(score_w, score_h))
def decrement_turns(self):
""" Decrements the number of turns a user has.
Returns:
True if the user had a turn, False if there were none left. """
if not self.__turn_count:
# Out of turns.
return False
self.__turn_count -= 1
self.__update_turn_count()
return True
def increase_score(self, amount):
""" Increase the user's score by a given amount. """
self.__score += amount
self.__update_score()
class Brick(object):
""" Controls a single brick. """
def __init__(self, canvas, row, col, color):
"""
Args:
canvas: The canvas to draw the brick on.
row: Which row the brick is in, with row 0 being the top.
col: Which column the brick is in, with col 0 being the left.
color: The color of the brick. """
self.__canvas = canvas
self.__color = color
# We should be able to fit 10 bricks between the two walls.
col_width = config.SCREEN_WIDTH * 0.8 / 10
# We should have 8 rows.
row_height = config.SCREEN_HEIGHT * 0.3 / 8
# Start positions for bricks.
start_x = config.SCREEN_WIDTH * 0.1 + col_width / 2
start_y = config.SCREEN_HEIGHT * 0.2 + row_height / 2
brick_x = start_x + col_width * col
brick_y = start_y + row_height * row
brick_w = col_width
brick_h = row_height
# Draw the brick.
self.__brick = obj_canvas.Rectangle(self.__canvas, (brick_x, brick_y),
(brick_w, brick_h),
fill=self.__color,
outline=self.__color)
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and this brick.
Args:
ball: The ball we could be colliding with.
Returns:
True if there was a collision, False otherwise. """
coll_x, coll_y = ball.handle_collision(self.__brick)
if (coll_x and coll_y):
# We have a collision. Remove this brick.
self.__brick.delete()
return True
return False
class BrickLayer(object):
""" Controls a layer of bricks. """
def __init__(self, canvas, row, color):
"""
Args:
canvas: The canvas to draw the brick layer on.
row: Which row the layer is, with row 0 being the top.
color: The color of the layer. """
self.__canvas = canvas
self.__row = row
# Create individual bricks.
self.__bricks = set()
for col in range(0, 10):
self.__bricks.add(Brick(self.__canvas, row, col, color))
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and this layer.
Args:
ball: The ball we could be colliding with.
Returns:
The number of points that should be awarded, or zero if there was no
collision. """
# Check for each brick individually.
to_remove = []
points = 0
for brick in self.__bricks:
if brick.handle_collision(ball):
# The brick was destroyed, so we need to remove it.
to_remove.append(brick)
# Look up the number of points we got.
points = config.ROW_POINTS[self.__row]
# Remove destroyed bricks.
for brick in to_remove:
self.__bricks.remove(brick)
return points
class Bricks(object):
""" Creates the entire set of bricks. """
def __init__(self, canvas):
"""
Args:
canvas: The canvas to draw the bricks on. """
self.__canvas = canvas
# Create bricks layer-by-layer.
self.__layers = []
for row in range(0, 8):
# Get the color for that row.
color = config.BreakoutColors.LAYER_COLORS[row]
self.__layers.append(BrickLayer(self.__canvas, row, color))
def handle_collision(self, ball):
""" Detect and handle a collision between the ball and all the bricks.
Args:
ball: The ball we could be colliding with.
Returns:
The number of points that should be awarded, or 0 if there was no
collision. """
# Check for each layer individually.
points = 0
for layer in self.__layers:
points += layer.handle_collision(ball)
return points
class Ball(object):
""" Creates the ball. """
def __init__(self, canvas, speed=20):
"""
Args:
canvas: The canvas to draw the balls on.
speed: Base velocity of the ball, in px/s. """
self.__canvas = canvas
# The velocity vector of the ball.
self.__choose_velocity()
self.__vel_mult = speed
# Keeps track of collision data for other objects.
self.__collisions = {}
# Figure out the ball size.
self.__ball_x = config.SCREEN_WIDTH / 2
self.__ball_y = config.SCREEN_HEIGHT * 0.6
ball_h = config.SCREEN_HEIGHT * 0.015
ball_w = ball_h
# Draw the ball.
color = config.BreakoutColors.BALL_COLOR
self.__ball = obj_canvas.Rectangle(self.__canvas,
(self.__ball_x, self.__ball_y),
(ball_w, ball_h),
fill=color,
outline=color)
def __animate(self):
""" Animate the ball's motion. """
move_x = self.__vel_x * self.__vel_mult
move_y = self.__vel_y * self.__vel_mult
self.__ball.move(move_x, move_y)
def __choose_velocity(self):
""" Chooses a random starting velocity for the ball. """
self.__vel_x = random.randint(400, 600) / 1000.0
self.__vel_y = 1 - self.__vel_x
def update(self):
""" Updates the ball's state. """
self.__animate()
def handle_collision(self, canvas_obj):
""" Check for a collision between the ball and another canvas object. It
automatically makes the ball bounce.
Args:
canvas_obj: The canvas object to check for a collision with.
Returns
A tuple of booleans. The first element indicates whether there is a
collision in the x direction, the second indicates whether there is a
collision in the y direction. """
collision_x, collision_y = \
obj_canvas.CanvasObject.check_collision(self.__ball, canvas_obj)
# Get previous collision data.
last_collision_x = False
last_collision_y = False
if canvas_obj in self.__collisions:
last_collision_x, last_collision_y = self.__collisions[canvas_obj]
# Update it.
self.__collisions[canvas_obj] = (collision_x, collision_y)
if (collision_x and collision_y):
# Bounce the ball. We're going to bounce the direction that most recently
# started colliding.
if not last_collision_x:
self.__vel_x *= -1
if not last_collision_y:
self.__vel_y *= -1
return (collision_x, collision_y)
def dropped(self):
""" Detects whether the ball dropped.
Returns:
True if it did, False otherwise. """
_, y_pos = self.__ball.get_pos()
if y_pos > config.SCREEN_HEIGHT:
# It dropped.
return True
return False
def reset(self):
""" Resets the ball to its starting position. """
self.__ball.set_pos(self.__ball_x, self.__ball_y)
# Reset velocity.
self.__choose_velocity()
def increase_speed(self):
""" Increases speed of the ball as the game progresses. """
self.__vel_mult += config.SPEED_INCREASE
class LoadScreen(object):
""" Shows a loading screen. """
def __init__(self, canvas):
"""
Args:
canvas: The canvas to draw it on. """
self.__canvas = canvas
# Calculate positions.
dot_l_x = config.SCREEN_WIDTH * 0.45
dot_m_x = config.SCREEN_WIDTH * 0.5
dot_r_x = config.SCREEN_WIDTH * 0.55
dot_y = config.SCREEN_HEIGHT * 0.5
dot_w = config.SCREEN_WIDTH * 0.02
dot_h = dot_w
color = config.BreakoutColors.WALL_COLOR
# Draw dots.
self.__dot_l = obj_canvas.Rectangle(self.__canvas, (dot_l_x, dot_y),
(dot_w, dot_h), fill=color)
self.__dot_m = obj_canvas.Rectangle(self.__canvas, (dot_m_x, dot_y),
(dot_w, dot_h), fill=color)
self.__dot_r = obj_canvas.Rectangle(self.__canvas, (dot_r_x, dot_y),
(dot_w, dot_h), fill=color)
# Make sure it's displayed.
self.__canvas.update()
def clear(self):
""" Clears the loading screen and prepares for the game. """
self.__dot_l.delete()
self.__dot_m.delete()
self.__dot_r.delete() | 0.836454 | 0.257563 |
import os
import discord
import requests
from discord import CategoryChannel, Guild, RawReactionActionEvent
from discord.channel import VoiceChannel
from discord.ext import commands
from discord.member import Member, VoiceState
from dotenv import load_dotenv
from Database.bot_db import BotDb
load_dotenv()
BASE_URL = 'https://discord.com/api'
API_TOKEN = os.getenv('API_TOKEN')
TARGET_V_CHANNEL_ID = os.getenv('TARGET_V_CHANNEL_ID')
INVITE_CHANNEL_ID = os.getenv('INVITE_CHANNEL_ID')
INVITE_ROLE_ID = os.getenv('INVITE_ROLE_ID')
ADD_ROLE_CHANNEL_ID = os.getenv('ADD_ROLE_CHANNEL_ID')
ADD_ROlE_EMOJI = os.getenv('ADD_ROlE_EMOJI')
RECOG_STR = os.getenv('RECOG_STR')
headers = {
'Authorization': 'Bot ' + API_TOKEN
}
client: commands.Bot = commands.Bot(command_prefix=os.getenv('PREFIX'), intents=discord.Intents.all())
bot_db = BotDb()
@client.event
async def on_ready():
print(f'起動:{client.user}')
@client.event
async def on_voice_state_update(user: Member, old: VoiceState, new: VoiceState):
if old.channel is not None:
old_channel: VoiceChannel = old.channel
# 対象ボイスチャンネルの人数が0人になったら招待メッセージとカテゴリごと消す
if old_channel.name.startswith(RECOG_STR) and len(old_channel.members) == 0:
channel_category: CategoryChannel = old_channel.category
[await t.delete() for t in channel_category.text_channels]
[await v.delete() for v in channel_category.voice_channels]
await channel_category.delete()
msg_id = bot_db.get_message_id(client.user.id, old_channel.id)
requests.delete(f'{BASE_URL}/channels/{INVITE_CHANNEL_ID}/messages/{msg_id}', headers=headers)
if new.channel is not None:
new_v_channel: VoiceChannel = new.channel
# 対象ボイスチャンネルに入室したらテキスト、ボイスチャンネルを作成して
# メンバーを移動後、招待メッセージを送信
if new_v_channel.id == int(TARGET_V_CHANNEL_ID):
guild: Guild = new_v_channel.guild
categories_num: int = [int(c.name[len(RECOG_STR):]) for c in guild.categories if c.name.startswith(RECOG_STR)]
next_categorie = 1
if len(categories_num) > 0:
next_categorie = max(categories_num) + 1
next_categorie_name = RECOG_STR + str(next_categorie)
category: CategoryChannel = await Guild.create_category(guild, next_categorie_name, reason=f'{user.display_name}が{TARGET_V_CHANNEL_ID}に入室({str(user.id)})')
await category.create_text_channel(next_categorie_name + '_聞き専')
buntai_channel: VoiceChannel = await category.create_voice_channel(next_categorie_name + '_通話')
[await m.move_to(buntai_channel) for m in new_v_channel.members]
# 募集をかける
invite: discord.Invite = await buntai_channel.create_invite()
r = requests.post(f'{BASE_URL}/channels/{INVITE_CHANNEL_ID}/messages', json={'content': '<@&' + INVITE_ROLE_ID + '>\n' + invite.url}, headers=headers)
bot_db.insert_messages(client.user.id, int(r.json()['id']), int(INVITE_CHANNEL_ID), buntai_channel.id)
# リアクション追加時
@client.event
async def on_raw_reaction_add(payload: RawReactionActionEvent):
if payload.emoji.name != ADD_ROlE_EMOJI \
or payload.channel_id != int(ADD_ROLE_CHANNEL_ID):
return
await change_role(payload, True)
# リアクション削除時
@client.event
async def on_raw_reaction_remove(payload: RawReactionActionEvent):
if payload.emoji.name != ADD_ROlE_EMOJI \
or payload.channel_id != int(ADD_ROLE_CHANNEL_ID):
return
await change_role(payload, False)
async def change_role(payload: RawReactionActionEvent, role_flg: bool):
guild: Guild = client.get_guild(payload.guild_id)
member: Member = guild.get_member(payload.user_id)
# 役職を編集
role = guild.get_role(int(INVITE_ROLE_ID))
await member.add_roles(role) if role_flg else await member.remove_roles(role)
client.run(API_TOKEN) | main.py | import os
import discord
import requests
from discord import CategoryChannel, Guild, RawReactionActionEvent
from discord.channel import VoiceChannel
from discord.ext import commands
from discord.member import Member, VoiceState
from dotenv import load_dotenv
from Database.bot_db import BotDb
load_dotenv()
BASE_URL = 'https://discord.com/api'
API_TOKEN = os.getenv('API_TOKEN')
TARGET_V_CHANNEL_ID = os.getenv('TARGET_V_CHANNEL_ID')
INVITE_CHANNEL_ID = os.getenv('INVITE_CHANNEL_ID')
INVITE_ROLE_ID = os.getenv('INVITE_ROLE_ID')
ADD_ROLE_CHANNEL_ID = os.getenv('ADD_ROLE_CHANNEL_ID')
ADD_ROlE_EMOJI = os.getenv('ADD_ROlE_EMOJI')
RECOG_STR = os.getenv('RECOG_STR')
headers = {
'Authorization': 'Bot ' + API_TOKEN
}
client: commands.Bot = commands.Bot(command_prefix=os.getenv('PREFIX'), intents=discord.Intents.all())
bot_db = BotDb()
@client.event
async def on_ready():
print(f'起動:{client.user}')
@client.event
async def on_voice_state_update(user: Member, old: VoiceState, new: VoiceState):
if old.channel is not None:
old_channel: VoiceChannel = old.channel
# 対象ボイスチャンネルの人数が0人になったら招待メッセージとカテゴリごと消す
if old_channel.name.startswith(RECOG_STR) and len(old_channel.members) == 0:
channel_category: CategoryChannel = old_channel.category
[await t.delete() for t in channel_category.text_channels]
[await v.delete() for v in channel_category.voice_channels]
await channel_category.delete()
msg_id = bot_db.get_message_id(client.user.id, old_channel.id)
requests.delete(f'{BASE_URL}/channels/{INVITE_CHANNEL_ID}/messages/{msg_id}', headers=headers)
if new.channel is not None:
new_v_channel: VoiceChannel = new.channel
# 対象ボイスチャンネルに入室したらテキスト、ボイスチャンネルを作成して
# メンバーを移動後、招待メッセージを送信
if new_v_channel.id == int(TARGET_V_CHANNEL_ID):
guild: Guild = new_v_channel.guild
categories_num: int = [int(c.name[len(RECOG_STR):]) for c in guild.categories if c.name.startswith(RECOG_STR)]
next_categorie = 1
if len(categories_num) > 0:
next_categorie = max(categories_num) + 1
next_categorie_name = RECOG_STR + str(next_categorie)
category: CategoryChannel = await Guild.create_category(guild, next_categorie_name, reason=f'{user.display_name}が{TARGET_V_CHANNEL_ID}に入室({str(user.id)})')
await category.create_text_channel(next_categorie_name + '_聞き専')
buntai_channel: VoiceChannel = await category.create_voice_channel(next_categorie_name + '_通話')
[await m.move_to(buntai_channel) for m in new_v_channel.members]
# 募集をかける
invite: discord.Invite = await buntai_channel.create_invite()
r = requests.post(f'{BASE_URL}/channels/{INVITE_CHANNEL_ID}/messages', json={'content': '<@&' + INVITE_ROLE_ID + '>\n' + invite.url}, headers=headers)
bot_db.insert_messages(client.user.id, int(r.json()['id']), int(INVITE_CHANNEL_ID), buntai_channel.id)
# リアクション追加時
@client.event
async def on_raw_reaction_add(payload: RawReactionActionEvent):
if payload.emoji.name != ADD_ROlE_EMOJI \
or payload.channel_id != int(ADD_ROLE_CHANNEL_ID):
return
await change_role(payload, True)
# リアクション削除時
@client.event
async def on_raw_reaction_remove(payload: RawReactionActionEvent):
if payload.emoji.name != ADD_ROlE_EMOJI \
or payload.channel_id != int(ADD_ROLE_CHANNEL_ID):
return
await change_role(payload, False)
async def change_role(payload: RawReactionActionEvent, role_flg: bool):
guild: Guild = client.get_guild(payload.guild_id)
member: Member = guild.get_member(payload.user_id)
# 役職を編集
role = guild.get_role(int(INVITE_ROLE_ID))
await member.add_roles(role) if role_flg else await member.remove_roles(role)
client.run(API_TOKEN) | 0.190197 | 0.045884 |
from sys import argv
from kwz import KWZParser, PALETTE
import pygame
import numpy as np
class layerSurface:
def __init__(self, size=(320, 240)):
self.surface = pygame.Surface(size, depth=8)
self.surface.set_colorkey(0)
self.surface.set_palette_at(0, (255, 255, 255))
def set_palette_at(self, index, color):
self.surface.set_palette_at(index, color)
def set_pixels(self, pixels):
pixels = np.swapaxes(pixels, 0, 1)
pygame.pixelcopy.array_to_surface(self.surface, pixels)
def get_surface(self, size=(320, 240)):
if size != (320, 240):
return pygame.transform.scale(self.surface, size)
else:
return self.surface
class frameSurface:
def __init__(self, size=(320, 240)):
self.size = size
self.paper = pygame.Surface(size, depth=8)
self.layer1 = layerSurface()
self.layer2 = layerSurface()
self.layer3 = layerSurface()
def set_layers(self, layers):
self.layer1.set_pixels(layers[0])
self.layer2.set_pixels(layers[1])
self.layer3.set_pixels(layers[2])
def set_colors(self, colors):
self.paper.set_palette_at(0, PALETTE[colors[0]])
self.layer1.set_palette_at(1, PALETTE[colors[1]])
self.layer1.set_palette_at(2, PALETTE[colors[2]])
self.layer2.set_palette_at(1, PALETTE[colors[3]])
self.layer2.set_palette_at(2, PALETTE[colors[4]])
self.layer3.set_palette_at(1, PALETTE[colors[5]])
self.layer3.set_palette_at(2, PALETTE[colors[6]])
def blit_to(self, surface, pos):
surface.blit(self.paper, pos)
surface.blit(self.layer3.get_surface(self.size), pos)
surface.blit(self.layer2.get_surface(self.size), pos)
surface.blit(self.layer1.get_surface(self.size), pos)
if len(argv) != 2:
print("\nUsage: python3 kwzViewer.py <input.kwz>\n")
exit(1)
else:
with open(argv[1], "rb") as kwz:
parser = KWZParser(kwz)
screen = pygame.display.set_mode((320*2, 240*2))
frame = frameSurface((320*2, 240*2))
pygame.init()
pygame.display.set_caption("crappy proof-of-concept kwz player™")
done = False
frame_index = 0
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
frame.set_layers(parser.decode_frame(frame_index))
frame.set_colors(parser.get_frame_palette(frame_index))
# print("Decoded frame:", frameIndex, "flag:", parser.get_frame_flag(frameIndex))
if frame_index == parser.frame_count - 1:
frame_index = 0
else:
frame_index += 1
frame.blit_to(screen, (0, 0))
pygame.display.flip() | kwzViewer.py |
from sys import argv
from kwz import KWZParser, PALETTE
import pygame
import numpy as np
class layerSurface:
def __init__(self, size=(320, 240)):
self.surface = pygame.Surface(size, depth=8)
self.surface.set_colorkey(0)
self.surface.set_palette_at(0, (255, 255, 255))
def set_palette_at(self, index, color):
self.surface.set_palette_at(index, color)
def set_pixels(self, pixels):
pixels = np.swapaxes(pixels, 0, 1)
pygame.pixelcopy.array_to_surface(self.surface, pixels)
def get_surface(self, size=(320, 240)):
if size != (320, 240):
return pygame.transform.scale(self.surface, size)
else:
return self.surface
class frameSurface:
def __init__(self, size=(320, 240)):
self.size = size
self.paper = pygame.Surface(size, depth=8)
self.layer1 = layerSurface()
self.layer2 = layerSurface()
self.layer3 = layerSurface()
def set_layers(self, layers):
self.layer1.set_pixels(layers[0])
self.layer2.set_pixels(layers[1])
self.layer3.set_pixels(layers[2])
def set_colors(self, colors):
self.paper.set_palette_at(0, PALETTE[colors[0]])
self.layer1.set_palette_at(1, PALETTE[colors[1]])
self.layer1.set_palette_at(2, PALETTE[colors[2]])
self.layer2.set_palette_at(1, PALETTE[colors[3]])
self.layer2.set_palette_at(2, PALETTE[colors[4]])
self.layer3.set_palette_at(1, PALETTE[colors[5]])
self.layer3.set_palette_at(2, PALETTE[colors[6]])
def blit_to(self, surface, pos):
surface.blit(self.paper, pos)
surface.blit(self.layer3.get_surface(self.size), pos)
surface.blit(self.layer2.get_surface(self.size), pos)
surface.blit(self.layer1.get_surface(self.size), pos)
if len(argv) != 2:
print("\nUsage: python3 kwzViewer.py <input.kwz>\n")
exit(1)
else:
with open(argv[1], "rb") as kwz:
parser = KWZParser(kwz)
screen = pygame.display.set_mode((320*2, 240*2))
frame = frameSurface((320*2, 240*2))
pygame.init()
pygame.display.set_caption("crappy proof-of-concept kwz player™")
done = False
frame_index = 0
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
frame.set_layers(parser.decode_frame(frame_index))
frame.set_colors(parser.get_frame_palette(frame_index))
# print("Decoded frame:", frameIndex, "flag:", parser.get_frame_flag(frameIndex))
if frame_index == parser.frame_count - 1:
frame_index = 0
else:
frame_index += 1
frame.blit_to(screen, (0, 0))
pygame.display.flip() | 0.197097 | 0.316369 |
import base64
import logging
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from six.moves.urllib.parse import quote_plus, unquote_plus, parse_qsl
from zooniverse_web.models import Verification
logger = logging.getLogger(__name__)
def check_path(path):
"""Check if path ends with a slash ('/'). Else, it adds a slash.
The function also creates the directory if it does not existing.
Parameters
----------
path : str
A path
Returns
-------
path : str
A functional path
"""
from os import makedirs
from os.path import exists
if len(path) > 0 and path[-1] != '/':
path = path + '/'
if not exists(path):
makedirs(path)
return path
def url_quote(raw):
"""Encode the url to base64
Parameters
----------
raw:
raw url
Returns
-------
url:
encoded url
"""
utf8 = quote_plus(raw).encode('utf8')
return base64.b16encode(utf8).decode('utf8')
def url_unquote(enc):
"""Decode the url from base64
Parameters
----------
enc:
encoded url
Returns
-------
url:
decoded url
"""
unquoted = unquote_plus(base64.b16decode(enc).decode('utf8'))
return unquoted
def get_absolute_site_url(request):
"""
:param request:
:return:
"""
site_name = request.get_host()
if request.is_secure():
protocol = 'https'
else:
protocol = settings.HTTP_PROTOCOL
return protocol + '://' + site_name
def get_token(information, validity=None):
"""
Stores the information in the database and generates a corresponding token
:param information: information that needs to be stored and corresponding token to be generated
:param validity: for how long the token will be valid (in seconds)
:return: token to be encoded in the url
"""
if validity:
now = timezone.localtime(timezone.now())
expiry = now + timedelta(seconds=validity)
else:
expiry = None
try:
verification = Verification.objects.create(information=information, expiry=expiry)
return url_quote('id=' + verification.id.__str__())
except:
logger.info("Failure generating Verification token with {}".format(information))
raise
def get_information(token):
"""
Retrieves the information from the database for a particular token
:param token: encoded token from email
:return: the actual information
"""
now = timezone.localtime(timezone.now())
try:
params = dict(parse_qsl(url_unquote(token)))
verification = Verification.objects.get(id=params.get('id'), expiry__gte=now)
if verification.verified:
raise ValueError('Already verified')
else:
verification.verified = True
verification.save()
return verification.information
except Verification.DoesNotExist:
raise ValueError('Invalid or expired verification code')
except Exception as e:
logger.exception(e) # should notify admins via email
raise | zooniverse_web/utility/utils.py | import base64
import logging
from datetime import timedelta
from django.conf import settings
from django.utils import timezone
from six.moves.urllib.parse import quote_plus, unquote_plus, parse_qsl
from zooniverse_web.models import Verification
logger = logging.getLogger(__name__)
def check_path(path):
"""Check if path ends with a slash ('/'). Else, it adds a slash.
The function also creates the directory if it does not existing.
Parameters
----------
path : str
A path
Returns
-------
path : str
A functional path
"""
from os import makedirs
from os.path import exists
if len(path) > 0 and path[-1] != '/':
path = path + '/'
if not exists(path):
makedirs(path)
return path
def url_quote(raw):
"""Encode the url to base64
Parameters
----------
raw:
raw url
Returns
-------
url:
encoded url
"""
utf8 = quote_plus(raw).encode('utf8')
return base64.b16encode(utf8).decode('utf8')
def url_unquote(enc):
"""Decode the url from base64
Parameters
----------
enc:
encoded url
Returns
-------
url:
decoded url
"""
unquoted = unquote_plus(base64.b16decode(enc).decode('utf8'))
return unquoted
def get_absolute_site_url(request):
"""
:param request:
:return:
"""
site_name = request.get_host()
if request.is_secure():
protocol = 'https'
else:
protocol = settings.HTTP_PROTOCOL
return protocol + '://' + site_name
def get_token(information, validity=None):
"""
Stores the information in the database and generates a corresponding token
:param information: information that needs to be stored and corresponding token to be generated
:param validity: for how long the token will be valid (in seconds)
:return: token to be encoded in the url
"""
if validity:
now = timezone.localtime(timezone.now())
expiry = now + timedelta(seconds=validity)
else:
expiry = None
try:
verification = Verification.objects.create(information=information, expiry=expiry)
return url_quote('id=' + verification.id.__str__())
except:
logger.info("Failure generating Verification token with {}".format(information))
raise
def get_information(token):
"""
Retrieves the information from the database for a particular token
:param token: encoded token from email
:return: the actual information
"""
now = timezone.localtime(timezone.now())
try:
params = dict(parse_qsl(url_unquote(token)))
verification = Verification.objects.get(id=params.get('id'), expiry__gte=now)
if verification.verified:
raise ValueError('Already verified')
else:
verification.verified = True
verification.save()
return verification.information
except Verification.DoesNotExist:
raise ValueError('Invalid or expired verification code')
except Exception as e:
logger.exception(e) # should notify admins via email
raise | 0.701304 | 0.217982 |
import array
import logging
from fontTools import ttLib
from fontTools.pens.hashPointPen import HashPointPen
from fontTools.ttLib.tables._g_l_y_f import (
OVERLAP_COMPOUND,
ROUND_XY_TO_GRID,
USE_MY_METRICS,
)
logger = logging.getLogger(__name__)
TRUETYPE_INSTRUCTIONS_KEY = "public.truetype.instructions"
TRUETYPE_ROUND_KEY = "public.truetype.roundOffsetToGrid"
TRUETYPE_METRICS_KEY = "public.truetype.useMyMetrics"
TRUETYPE_OVERLAP_KEY = "public.truetype.overlap"
OBJECT_LIBS_KEY = "public.objectLibs"
class InstructionCompiler(object):
def __init__(self, ufo, ttf):
self.ufo = ufo
self.font = ttf
def _compile_program(self, key, table_tag):
assert table_tag in ("prep", "fpgm")
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
f"in key '{key}', "
f"table '{table_tag}' will be empty in font."
)
return
asm = ttdata.get(key, None)
if asm is not None:
self.font[table_tag] = table = ttLib.newTable(table_tag)
table.program = ttLib.tables.ttProgram.Program()
table.program.fromAssembly(asm)
# Roundtrip once, or if the font is dumped to XML before having
# been saved, the assembly code if will look awful.
table.program.fromBytecode(table.program.getBytecode())
def compile_cvt(self):
cvts = []
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
f"in key 'controlValue', "
f"table 'cvt' will be empty in font."
)
return
cvt_list = ttdata.get("controlValue", None)
if cvt_list is not None:
# Convert string keys to int
cvt_dict = {int(v["id"]): v["value"] for v in cvt_list}
# Find the maximum cvt index.
# We can't just use the dict keys because the cvt must be
# filled consecutively.
max_cvt = max(cvt_dict.keys())
# Make value list, filling entries for missing keys with 0
cvts = [cvt_dict.get(i, 0) for i in range(max_cvt + 1)]
if cvts:
# Only write cvt to font if it contains any values
self.font["cvt "] = cvt = ttLib.newTable("cvt ")
cvt.values = array.array("h", cvts)
def compile_fpgm(self):
self._compile_program("fontProgram", "fpgm")
def compile_glyf(self):
for name in sorted(self.ufo.keys()):
glyph = self.ufo[name]
ttdata = glyph.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if name not in self.font["glyf"]:
if ttdata is not None:
logger.warning(
f"Glyph '{name}' not found in font, "
"skipping compilation of TrueType instructions "
"for this glyph."
)
logger.debug(f"UFO keys: {list(self.ufo.keys())}")
logger.debug(f"glyf keys: {list(self.font['glyf'].keys())}")
continue
glyf = self.font["glyf"][name]
if ttdata is not None:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
"in glyph '{name}', it will have "
"no instructions in font."
)
continue
# Check if glyph hash matches the current outlines
hash_pen = HashPointPen(glyph.width, self.ufo)
glyph.drawPoints(hash_pen)
glyph_id = ttdata.get("id", None)
if glyph_id is None or glyph_id != hash_pen.hash:
logger.error(
f"Glyph hash mismatch, glyph '{name}' will have "
"no instructions in font."
)
continue
# Compile the glyph program
asm = ttdata.get("assembly", None)
if asm is not None:
glyf.program = ttLib.tables.ttProgram.Program()
glyf.program.fromAssembly(asm)
# Roundtrip once, or if the font is dumped to XML before
# having been saved, the assembly code if will look awful.
glyf.program.fromBytecode(glyf.program.getBytecode())
# Handle composites
if glyf.isComposite():
# Remove empty glyph programs from composite glyphs
if hasattr(glyf, "program") and not glyf.program:
delattr(glyf, "program")
# Set component flags
# We need to decide when to set the flags.
# Let's assume if any lib key is not there, or the component
# doesn't have an identifier, we should leave the flags alone.
use_my_metrics_comp = None
for i, c in enumerate(glyf.components):
if i >= len(glyph.components):
logger.error(
"Number of components differ between UFO and TTF "
f"in glyph '{name}' ({len(glyph.components)} vs. "
f"{len(glyf.components)}, not setting flags in "
"additional components."
)
break
ufo_component_id = glyph.components[i].identifier
if (
ufo_component_id is not None
and OBJECT_LIBS_KEY in glyph.lib
and ufo_component_id in glyph.lib[OBJECT_LIBS_KEY]
and (
TRUETYPE_ROUND_KEY
in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
or TRUETYPE_METRICS_KEY
in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
)
):
component_lib = glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
c.flags &= ~ROUND_XY_TO_GRID
if component_lib.get(TRUETYPE_ROUND_KEY, False):
c.flags |= ROUND_XY_TO_GRID
c.flags &= ~USE_MY_METRICS
if component_lib.get(TRUETYPE_METRICS_KEY, False):
if use_my_metrics_comp:
logger.warning(
"Ignoring USE_MY_METRICS flag on component "
f"'{ufo_component_id}' because it has been set on "
f"component '{use_my_metrics_comp}' already."
)
else:
c.flags |= USE_MY_METRICS
use_my_metrics_comp = ufo_component_id
# We might automatically set the flags if no data is present,
# but:
# - https://github.com/googlefonts/ufo2ft/pull/425 recommends
# against setting the ROUND_XY_TO_GRID flag
# - USE_MY_METRICS has been set already by
# outlineCompiler.OutlineTTFCompiler.autoUseMyMetrics
if i == 0 and TRUETYPE_OVERLAP_KEY in glyph.lib:
# Set OVERLAP_COMPOUND on the first component only
c.flags &= ~OVERLAP_COMPOUND
if glyph.lib.get(TRUETYPE_OVERLAP_KEY, False):
c.flags |= OVERLAP_COMPOUND
def compile_maxp(self):
maxp = self.font["maxp"]
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
for name in (
"maxStorage",
"maxFunctionDefs",
"maxInstructionDefs",
"maxStackElements",
# "maxSizeOfInstructions", # Is recalculated below
"maxZones",
"maxTwilightPoints",
):
value = ttdata.get(name, None)
if value is not None:
setattr(maxp, name, value)
# Recalculate maxp.maxSizeOfInstructions
sizes = [
len(glyph.program.getBytecode())
for glyph in self.font["glyf"].glyphs.values()
if hasattr(glyph, "program")
] + [0]
maxp.maxSizeOfInstructions = max(sizes)
def compile_prep(self):
self._compile_program("controlValueProgram", "prep")
def compile(self):
self.compile_cvt()
self.compile_fpgm()
self.compile_prep()
self.compile_glyf()
# maxp depends on the other programs, to it needs to be last
self.compile_maxp() | Lib/ufo2ft/instructionCompiler.py | import array
import logging
from fontTools import ttLib
from fontTools.pens.hashPointPen import HashPointPen
from fontTools.ttLib.tables._g_l_y_f import (
OVERLAP_COMPOUND,
ROUND_XY_TO_GRID,
USE_MY_METRICS,
)
logger = logging.getLogger(__name__)
TRUETYPE_INSTRUCTIONS_KEY = "public.truetype.instructions"
TRUETYPE_ROUND_KEY = "public.truetype.roundOffsetToGrid"
TRUETYPE_METRICS_KEY = "public.truetype.useMyMetrics"
TRUETYPE_OVERLAP_KEY = "public.truetype.overlap"
OBJECT_LIBS_KEY = "public.objectLibs"
class InstructionCompiler(object):
def __init__(self, ufo, ttf):
self.ufo = ufo
self.font = ttf
def _compile_program(self, key, table_tag):
assert table_tag in ("prep", "fpgm")
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
f"in key '{key}', "
f"table '{table_tag}' will be empty in font."
)
return
asm = ttdata.get(key, None)
if asm is not None:
self.font[table_tag] = table = ttLib.newTable(table_tag)
table.program = ttLib.tables.ttProgram.Program()
table.program.fromAssembly(asm)
# Roundtrip once, or if the font is dumped to XML before having
# been saved, the assembly code if will look awful.
table.program.fromBytecode(table.program.getBytecode())
def compile_cvt(self):
cvts = []
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
f"in key 'controlValue', "
f"table 'cvt' will be empty in font."
)
return
cvt_list = ttdata.get("controlValue", None)
if cvt_list is not None:
# Convert string keys to int
cvt_dict = {int(v["id"]): v["value"] for v in cvt_list}
# Find the maximum cvt index.
# We can't just use the dict keys because the cvt must be
# filled consecutively.
max_cvt = max(cvt_dict.keys())
# Make value list, filling entries for missing keys with 0
cvts = [cvt_dict.get(i, 0) for i in range(max_cvt + 1)]
if cvts:
# Only write cvt to font if it contains any values
self.font["cvt "] = cvt = ttLib.newTable("cvt ")
cvt.values = array.array("h", cvts)
def compile_fpgm(self):
self._compile_program("fontProgram", "fpgm")
def compile_glyf(self):
for name in sorted(self.ufo.keys()):
glyph = self.ufo[name]
ttdata = glyph.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if name not in self.font["glyf"]:
if ttdata is not None:
logger.warning(
f"Glyph '{name}' not found in font, "
"skipping compilation of TrueType instructions "
"for this glyph."
)
logger.debug(f"UFO keys: {list(self.ufo.keys())}")
logger.debug(f"glyf keys: {list(self.font['glyf'].keys())}")
continue
glyf = self.font["glyf"][name]
if ttdata is not None:
formatVersion = ttdata.get("formatVersion", None)
if int(formatVersion) != 1:
logger.error(
f"Unknown formatVersion {formatVersion} "
"in glyph '{name}', it will have "
"no instructions in font."
)
continue
# Check if glyph hash matches the current outlines
hash_pen = HashPointPen(glyph.width, self.ufo)
glyph.drawPoints(hash_pen)
glyph_id = ttdata.get("id", None)
if glyph_id is None or glyph_id != hash_pen.hash:
logger.error(
f"Glyph hash mismatch, glyph '{name}' will have "
"no instructions in font."
)
continue
# Compile the glyph program
asm = ttdata.get("assembly", None)
if asm is not None:
glyf.program = ttLib.tables.ttProgram.Program()
glyf.program.fromAssembly(asm)
# Roundtrip once, or if the font is dumped to XML before
# having been saved, the assembly code if will look awful.
glyf.program.fromBytecode(glyf.program.getBytecode())
# Handle composites
if glyf.isComposite():
# Remove empty glyph programs from composite glyphs
if hasattr(glyf, "program") and not glyf.program:
delattr(glyf, "program")
# Set component flags
# We need to decide when to set the flags.
# Let's assume if any lib key is not there, or the component
# doesn't have an identifier, we should leave the flags alone.
use_my_metrics_comp = None
for i, c in enumerate(glyf.components):
if i >= len(glyph.components):
logger.error(
"Number of components differ between UFO and TTF "
f"in glyph '{name}' ({len(glyph.components)} vs. "
f"{len(glyf.components)}, not setting flags in "
"additional components."
)
break
ufo_component_id = glyph.components[i].identifier
if (
ufo_component_id is not None
and OBJECT_LIBS_KEY in glyph.lib
and ufo_component_id in glyph.lib[OBJECT_LIBS_KEY]
and (
TRUETYPE_ROUND_KEY
in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
or TRUETYPE_METRICS_KEY
in glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
)
):
component_lib = glyph.lib[OBJECT_LIBS_KEY][ufo_component_id]
c.flags &= ~ROUND_XY_TO_GRID
if component_lib.get(TRUETYPE_ROUND_KEY, False):
c.flags |= ROUND_XY_TO_GRID
c.flags &= ~USE_MY_METRICS
if component_lib.get(TRUETYPE_METRICS_KEY, False):
if use_my_metrics_comp:
logger.warning(
"Ignoring USE_MY_METRICS flag on component "
f"'{ufo_component_id}' because it has been set on "
f"component '{use_my_metrics_comp}' already."
)
else:
c.flags |= USE_MY_METRICS
use_my_metrics_comp = ufo_component_id
# We might automatically set the flags if no data is present,
# but:
# - https://github.com/googlefonts/ufo2ft/pull/425 recommends
# against setting the ROUND_XY_TO_GRID flag
# - USE_MY_METRICS has been set already by
# outlineCompiler.OutlineTTFCompiler.autoUseMyMetrics
if i == 0 and TRUETYPE_OVERLAP_KEY in glyph.lib:
# Set OVERLAP_COMPOUND on the first component only
c.flags &= ~OVERLAP_COMPOUND
if glyph.lib.get(TRUETYPE_OVERLAP_KEY, False):
c.flags |= OVERLAP_COMPOUND
def compile_maxp(self):
maxp = self.font["maxp"]
ttdata = self.ufo.lib.get(TRUETYPE_INSTRUCTIONS_KEY, None)
if ttdata:
for name in (
"maxStorage",
"maxFunctionDefs",
"maxInstructionDefs",
"maxStackElements",
# "maxSizeOfInstructions", # Is recalculated below
"maxZones",
"maxTwilightPoints",
):
value = ttdata.get(name, None)
if value is not None:
setattr(maxp, name, value)
# Recalculate maxp.maxSizeOfInstructions
sizes = [
len(glyph.program.getBytecode())
for glyph in self.font["glyf"].glyphs.values()
if hasattr(glyph, "program")
] + [0]
maxp.maxSizeOfInstructions = max(sizes)
def compile_prep(self):
self._compile_program("controlValueProgram", "prep")
def compile(self):
self.compile_cvt()
self.compile_fpgm()
self.compile_prep()
self.compile_glyf()
# maxp depends on the other programs, to it needs to be last
self.compile_maxp() | 0.511717 | 0.144571 |
from django.conf import settings
HIDE_PROMO = getattr(
settings,
'COMPONENTS_HIDE_PROMO',
False,
)
HIDE_PROMO_ROLLOVER = getattr(
settings,
'COMPONENTS_HIDE_PROMO_ROLLOVER',
True,
)
HIDE_PROMO_VIDEO = getattr(
settings,
'COMPONENTS_HIDE_PROMO_VIDEO',
True,
)
PROMO_LAYOUTS = getattr(
settings,
'COMPONENTS_PROMO_LAYOUTS',
(),
)
HIDE_TWITTER = getattr(
settings,
'COMPONENTS_HIDE_TWITTER',
False,
)
TWITTER_LAYOUTS = getattr(
settings,
'COMPONENTS_TWITTER_LAYOUTS',
(),
)
TWITTER_APP_KEY = getattr(
settings,
'TWITTER_APP_KEY',
False,
)
TWITTER_APP_SECRET = getattr(
settings,
'TWITTER_APP_SECRET',
False,
)
TWITTER_OAUTH_TOKEN = getattr(
settings,
'TWITTER_OAUTH_TOKEN',
False,
)
TWITTER_OAUTH_SECRET = getattr(
settings,
'TWITTER_OAUTH_SECRET',
False,
)
HIDE_COUNTERS = getattr(
settings,
'COMPONENTS_HIDE_COUNTERS',
False,
)
COUNTERS_LAYOUTS = getattr(
settings,
'COMPONENTS_COUNTERS_LAYOUTS',
(),
)
CUSTOM_LAYOUTS = getattr(
settings,
'COMPONENTS_CUSTOM_LAYOUTS',
(),
)
CUSTOM_PLUGINS = getattr(
settings,
'COMPONENTS_CUSTOM_PLUGINS',
{},
)
HIDE_RAWHTML = getattr(
settings,
'COMPONENTS_HIDE_RAWHTML',
False,
)
HIDE_FLOAT = getattr(
settings,
'COMPONENTS_HIDE_FLOAT',
True,
)
HIDE_LIGHTBOX = getattr(
settings,
'COMPONENTS_HIDE_LIGHTBOX',
True,
)
LIGHTBOX_LAYOUTS = getattr(
settings,
'COMPONENTS_LIGHTBOX_LAYOUTS',
(),
)
HIDE_GATED_CONTENT = getattr(
settings,
'COMPONENTS_HIDE_GATED_CONTENT',
True,
)
GATED_CONTENT_LAYOUTS = getattr(
settings,
'COMPONENTS_GATED_CONTENT_LAYOUTS',
(),
)
FLOAT_LAYOUTS = getattr(
settings,
'COMPONENTS_FLOAT_LAYOUTS',
(),
)
TWYTHON_KWARGS = getattr(
settings,
'COMPONENTS_TWYTHON_KWARGS',
{'use_display_url': True},
)
TWITTER_CACHE_TIMEOUT = getattr(
settings,
'COMPONENTS_TWITTER_CACHE_TIMEOUT',
60*60*24,
)
PROMO_CHILD_CLASSES = getattr(
settings,
'COMPONENTS_PROMO_CHILD_CLASSES',
[]
)
ALL_ANIMATIONS = [
# Attention seekers
'bounce',
'flash',
'pulse',
'rubberBand',
'shakeX',
'shakeY',
'headShake',
'swing',
'tada',
'wobble',
'jello',
'heartBeat',
# Back entrances
'backInDown',
'backInLeft',
'backInRight',
'backInUp',
# Back exits
'backOutDown',
'backOutLeft',
'backOutRight',
'backOutUp',
# Bouncing entrances
'bounceIn',
'bounceInDown',
'bounceInLeft',
'bounceInRight',
'bounceInUp',
# Bouncing exits
'bounceOut',
'bounceOutDown',
'bounceOutLeft',
'bounceOutRight',
'bounceOutUp',
# Fading entrances
'fadeIn',
'fadeInDown',
'fadeInDownBig',
'fadeInLeft',
'fadeInLeftBig',
'fadeInRight',
'fadeInRightBig',
'fadeInUp',
'fadeInUpBig',
'fadeInTopLeft',
'fadeInTopRight',
'fadeInBottomLeft',
'fadeInBottomRight',
# Fading exits
'fadeOut',
'fadeOutDown',
'fadeOutDownBig',
'fadeOutLeft',
'fadeOutLeftBig',
'fadeOutRight',
'fadeOutRightBig',
'fadeOutUp',
'fadeOutUpBig',
'fadeOutTopLeft',
'fadeOutTopRight',
'fadeOutBottomRight',
'fadeOutBottomLeft',
# Flippers
'flip',
'flipInX',
'flipInY',
'flipOutX',
'flipOutY',
# Lightspeed
'lightSpeedInRight',
'lightSpeedInLeft',
'lightSpeedOutRight',
'lightSpeedOutLeft',
# Rotating entrances
'rotateIn',
'rotateInDownLeft',
'rotateInDownRight',
'rotateInUpLeft',
'rotateInUpRight',
# Rotating exits
'rotateOut',
'rotateOutDownLeft',
'rotateOutDownRight',
'rotateOutUpLeft',
'rotateOutUpRight',
# Specials
'hinge',
'jackInTheBox',
'rollIn',
'rollOut',
# Zooming entrances
'zoomIn',
'zoomInDown',
'zoomInLeft',
'zoomInRight',
'zoomInUp',
# Zooming exits
'zoomOut',
'zoomOutDown',
'zoomOutLeft',
'zoomOutRight',
'zoomOutUp',
# Sliding entrances
'slideInDown',
'slideInLeft',
'slideInRight',
'slideInUp',
# Sliding exits
'slideOutDown',
'slideOutLeft',
'slideOutRight',
'slideOutUp']
ENABLED_ANIMATIONS = getattr(
settings,
'COMPONENTS_ENABLED_ANIMATIONS',
('*'),
)
DISABLED_ANIMATIONS = getattr(
settings,
'COMPONENTS_DISABLED_ANIMATIONS',
(),
)
import re
enabled = re.findall("(%s)" % "|".join(map(lambda x: x.strip().replace('*', '\S*'), ENABLED_ANIMATIONS)), ' '.join(ALL_ANIMATIONS), re.I)
disabled = re.findall("(%s)" % "|".join(map(lambda x: x.strip().replace('*', '\S*'), DISABLED_ANIMATIONS)), ' '.join(enabled), re.I)
ANIMATIONS = list(set(enabled) - set(disabled))
ANIMATIONS.sort() | js_components/constants.py |
from django.conf import settings
HIDE_PROMO = getattr(
settings,
'COMPONENTS_HIDE_PROMO',
False,
)
HIDE_PROMO_ROLLOVER = getattr(
settings,
'COMPONENTS_HIDE_PROMO_ROLLOVER',
True,
)
HIDE_PROMO_VIDEO = getattr(
settings,
'COMPONENTS_HIDE_PROMO_VIDEO',
True,
)
PROMO_LAYOUTS = getattr(
settings,
'COMPONENTS_PROMO_LAYOUTS',
(),
)
HIDE_TWITTER = getattr(
settings,
'COMPONENTS_HIDE_TWITTER',
False,
)
TWITTER_LAYOUTS = getattr(
settings,
'COMPONENTS_TWITTER_LAYOUTS',
(),
)
TWITTER_APP_KEY = getattr(
settings,
'TWITTER_APP_KEY',
False,
)
TWITTER_APP_SECRET = getattr(
settings,
'TWITTER_APP_SECRET',
False,
)
TWITTER_OAUTH_TOKEN = getattr(
settings,
'TWITTER_OAUTH_TOKEN',
False,
)
TWITTER_OAUTH_SECRET = getattr(
settings,
'TWITTER_OAUTH_SECRET',
False,
)
HIDE_COUNTERS = getattr(
settings,
'COMPONENTS_HIDE_COUNTERS',
False,
)
COUNTERS_LAYOUTS = getattr(
settings,
'COMPONENTS_COUNTERS_LAYOUTS',
(),
)
CUSTOM_LAYOUTS = getattr(
settings,
'COMPONENTS_CUSTOM_LAYOUTS',
(),
)
CUSTOM_PLUGINS = getattr(
settings,
'COMPONENTS_CUSTOM_PLUGINS',
{},
)
HIDE_RAWHTML = getattr(
settings,
'COMPONENTS_HIDE_RAWHTML',
False,
)
HIDE_FLOAT = getattr(
settings,
'COMPONENTS_HIDE_FLOAT',
True,
)
HIDE_LIGHTBOX = getattr(
settings,
'COMPONENTS_HIDE_LIGHTBOX',
True,
)
LIGHTBOX_LAYOUTS = getattr(
settings,
'COMPONENTS_LIGHTBOX_LAYOUTS',
(),
)
HIDE_GATED_CONTENT = getattr(
settings,
'COMPONENTS_HIDE_GATED_CONTENT',
True,
)
GATED_CONTENT_LAYOUTS = getattr(
settings,
'COMPONENTS_GATED_CONTENT_LAYOUTS',
(),
)
FLOAT_LAYOUTS = getattr(
settings,
'COMPONENTS_FLOAT_LAYOUTS',
(),
)
TWYTHON_KWARGS = getattr(
settings,
'COMPONENTS_TWYTHON_KWARGS',
{'use_display_url': True},
)
TWITTER_CACHE_TIMEOUT = getattr(
settings,
'COMPONENTS_TWITTER_CACHE_TIMEOUT',
60*60*24,
)
PROMO_CHILD_CLASSES = getattr(
settings,
'COMPONENTS_PROMO_CHILD_CLASSES',
[]
)
ALL_ANIMATIONS = [
# Attention seekers
'bounce',
'flash',
'pulse',
'rubberBand',
'shakeX',
'shakeY',
'headShake',
'swing',
'tada',
'wobble',
'jello',
'heartBeat',
# Back entrances
'backInDown',
'backInLeft',
'backInRight',
'backInUp',
# Back exits
'backOutDown',
'backOutLeft',
'backOutRight',
'backOutUp',
# Bouncing entrances
'bounceIn',
'bounceInDown',
'bounceInLeft',
'bounceInRight',
'bounceInUp',
# Bouncing exits
'bounceOut',
'bounceOutDown',
'bounceOutLeft',
'bounceOutRight',
'bounceOutUp',
# Fading entrances
'fadeIn',
'fadeInDown',
'fadeInDownBig',
'fadeInLeft',
'fadeInLeftBig',
'fadeInRight',
'fadeInRightBig',
'fadeInUp',
'fadeInUpBig',
'fadeInTopLeft',
'fadeInTopRight',
'fadeInBottomLeft',
'fadeInBottomRight',
# Fading exits
'fadeOut',
'fadeOutDown',
'fadeOutDownBig',
'fadeOutLeft',
'fadeOutLeftBig',
'fadeOutRight',
'fadeOutRightBig',
'fadeOutUp',
'fadeOutUpBig',
'fadeOutTopLeft',
'fadeOutTopRight',
'fadeOutBottomRight',
'fadeOutBottomLeft',
# Flippers
'flip',
'flipInX',
'flipInY',
'flipOutX',
'flipOutY',
# Lightspeed
'lightSpeedInRight',
'lightSpeedInLeft',
'lightSpeedOutRight',
'lightSpeedOutLeft',
# Rotating entrances
'rotateIn',
'rotateInDownLeft',
'rotateInDownRight',
'rotateInUpLeft',
'rotateInUpRight',
# Rotating exits
'rotateOut',
'rotateOutDownLeft',
'rotateOutDownRight',
'rotateOutUpLeft',
'rotateOutUpRight',
# Specials
'hinge',
'jackInTheBox',
'rollIn',
'rollOut',
# Zooming entrances
'zoomIn',
'zoomInDown',
'zoomInLeft',
'zoomInRight',
'zoomInUp',
# Zooming exits
'zoomOut',
'zoomOutDown',
'zoomOutLeft',
'zoomOutRight',
'zoomOutUp',
# Sliding entrances
'slideInDown',
'slideInLeft',
'slideInRight',
'slideInUp',
# Sliding exits
'slideOutDown',
'slideOutLeft',
'slideOutRight',
'slideOutUp']
ENABLED_ANIMATIONS = getattr(
settings,
'COMPONENTS_ENABLED_ANIMATIONS',
('*'),
)
DISABLED_ANIMATIONS = getattr(
settings,
'COMPONENTS_DISABLED_ANIMATIONS',
(),
)
import re
enabled = re.findall("(%s)" % "|".join(map(lambda x: x.strip().replace('*', '\S*'), ENABLED_ANIMATIONS)), ' '.join(ALL_ANIMATIONS), re.I)
disabled = re.findall("(%s)" % "|".join(map(lambda x: x.strip().replace('*', '\S*'), DISABLED_ANIMATIONS)), ' '.join(enabled), re.I)
ANIMATIONS = list(set(enabled) - set(disabled))
ANIMATIONS.sort() | 0.326593 | 0.058239 |
import assessment.builder.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Unique label for this classification. e.g., Education & Research', max_length=64)),
('slug', models.SlugField(help_text='Unique, short abbreviation e.g., edu-research for Education & Research', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of the classification.')),
],
options={
'verbose_name': 'Activity',
'verbose_name_plural': '1. Activities',
'ordering': ('order',),
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(blank=True, help_text='Unique label for this assessment. e.g., Student Learning Experience', max_length=64)),
('slug', models.SlugField(blank=True, help_text='Unique, short abbreviation for category. e.g. student-experience', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of this Assessment Category.')),
],
options={
'verbose_name': 'Assessment Category',
'verbose_name_plural': '3. Assessment Categories',
},
managers=[
('active', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='AssessmentMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Label for this metric. e.g., Uses appropriate font size', max_length=64)),
('description', models.TextField(blank=True, help_text='Optional description of how to assess this metric.')),
],
options={
'verbose_name': 'Metric',
'verbose_name_plural': '5. Metrics',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='AssessmentQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Short label for this question. e.g., Use of Visual Aids', max_length=64)),
('description', models.TextField(blank=True, help_text='Complete question text or detailed description of concern to be assessed.')),
],
options={
'verbose_name': 'Question',
'verbose_name_plural': '4. Questions',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='MetricChoicesType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(help_text='Short label for these choices. e.g., Percentage Range', max_length=64)),
('choice_map', models.TextField(help_text='JSON enoded dictionary mapping choices to integer values. \n E.g., { "< 80%" : 0, "80 - 90%" : 1, ">90%" : 2 }', validators=[assessment.builder.validators.validate_JSON_scoring_choices], verbose_name='Choices')),
],
options={
'verbose_name': 'Metric Choices Type',
'verbose_name_plural': 'Metric Choices Types',
},
),
migrations.CreateModel(
name='ReferenceDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('label', models.CharField(help_text='Short label for this document. e.g., SOP Classroom Safety', max_length=128)),
('description', models.TextField(blank=True, help_text='Optional description of document and/or how it is used in the Assessment.')),
('url', models.URLField(help_text='URL for this document. e.g., https://docs.example.com/sop/classroom-safety.pdf ', max_length=256)),
],
options={
'ordering': ('order',),
'abstract': False,
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Unique label for this classification. e.g., Education & Research', max_length=64)),
('slug', models.SlugField(help_text='Unique, short abbreviation e.g., edu-research for Education & Research', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of the classification.')),
],
options={
'verbose_name': 'Topic',
'verbose_name_plural': '2. Topics',
'ordering': ('order',),
'abstract': False,
},
),
migrations.AddConstraint(
model_name='topic',
constraint=models.UniqueConstraint(condition=models.Q(status='active'), fields=('label',), name='topic_unique_active_label'),
),
migrations.AddField(
model_name='referencedocument',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reference_docs', to='builder.AssessmentCategory'),
),
migrations.AddField(
model_name='assessmentquestion',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_set', to='builder.AssessmentCategory'),
),
migrations.AddField(
model_name='assessmentmetric',
name='choices',
field=models.ForeignKey(help_text='Define choices used to score this metric.', on_delete=django.db.models.deletion.CASCADE, to='builder.MetricChoicesType'),
),
migrations.AddField(
model_name='assessmentmetric',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metric_set', to='builder.AssessmentQuestion'),
),
migrations.AddField(
model_name='assessmentcategory',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_set', to='builder.Activity'),
),
migrations.AddField(
model_name='assessmentcategory',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_set', to='builder.Topic'),
),
migrations.AddConstraint(
model_name='activity',
constraint=models.UniqueConstraint(condition=models.Q(status='active'), fields=('label',), name='activity_unique_active_label'),
),
migrations.AddConstraint(
model_name='assessmentcategory',
constraint=models.UniqueConstraint(fields=('activity', 'topic'), name='unique_activity_topic'),
),
] | assessment/builder/migrations/0001_initial.py |
import assessment.builder.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Activity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Unique label for this classification. e.g., Education & Research', max_length=64)),
('slug', models.SlugField(help_text='Unique, short abbreviation e.g., edu-research for Education & Research', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of the classification.')),
],
options={
'verbose_name': 'Activity',
'verbose_name_plural': '1. Activities',
'ordering': ('order',),
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(blank=True, help_text='Unique label for this assessment. e.g., Student Learning Experience', max_length=64)),
('slug', models.SlugField(blank=True, help_text='Unique, short abbreviation for category. e.g. student-experience', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of this Assessment Category.')),
],
options={
'verbose_name': 'Assessment Category',
'verbose_name_plural': '3. Assessment Categories',
},
managers=[
('active', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='AssessmentMetric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Label for this metric. e.g., Uses appropriate font size', max_length=64)),
('description', models.TextField(blank=True, help_text='Optional description of how to assess this metric.')),
],
options={
'verbose_name': 'Metric',
'verbose_name_plural': '5. Metrics',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='AssessmentQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Short label for this question. e.g., Use of Visual Aids', max_length=64)),
('description', models.TextField(blank=True, help_text='Complete question text or detailed description of concern to be assessed.')),
],
options={
'verbose_name': 'Question',
'verbose_name_plural': '4. Questions',
'ordering': ('order',),
},
),
migrations.CreateModel(
name='MetricChoicesType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(help_text='Short label for these choices. e.g., Percentage Range', max_length=64)),
('choice_map', models.TextField(help_text='JSON enoded dictionary mapping choices to integer values. \n E.g., { "< 80%" : 0, "80 - 90%" : 1, ">90%" : 2 }', validators=[assessment.builder.validators.validate_JSON_scoring_choices], verbose_name='Choices')),
],
options={
'verbose_name': 'Metric Choices Type',
'verbose_name_plural': 'Metric Choices Types',
},
),
migrations.CreateModel(
name='ReferenceDocument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('label', models.CharField(help_text='Short label for this document. e.g., SOP Classroom Safety', max_length=128)),
('description', models.TextField(blank=True, help_text='Optional description of document and/or how it is used in the Assessment.')),
('url', models.URLField(help_text='URL for this document. e.g., https://docs.example.com/sop/classroom-safety.pdf ', max_length=256)),
],
options={
'ordering': ('order',),
'abstract': False,
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.PositiveIntegerField(db_index=True, editable=False, verbose_name='order')),
('status', models.CharField(choices=[('draft', 'Draft'), ('active', 'Active'), ('retired', 'Retired')], default='active', max_length=8)),
('label', models.CharField(help_text='Unique label for this classification. e.g., Education & Research', max_length=64)),
('slug', models.SlugField(help_text='Unique, short abbreviation e.g., edu-research for Education & Research', max_length=64, unique=True)),
('description', models.TextField(blank=True, help_text='Optional description of the classification.')),
],
options={
'verbose_name': 'Topic',
'verbose_name_plural': '2. Topics',
'ordering': ('order',),
'abstract': False,
},
),
migrations.AddConstraint(
model_name='topic',
constraint=models.UniqueConstraint(condition=models.Q(status='active'), fields=('label',), name='topic_unique_active_label'),
),
migrations.AddField(
model_name='referencedocument',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reference_docs', to='builder.AssessmentCategory'),
),
migrations.AddField(
model_name='assessmentquestion',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='question_set', to='builder.AssessmentCategory'),
),
migrations.AddField(
model_name='assessmentmetric',
name='choices',
field=models.ForeignKey(help_text='Define choices used to score this metric.', on_delete=django.db.models.deletion.CASCADE, to='builder.MetricChoicesType'),
),
migrations.AddField(
model_name='assessmentmetric',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='metric_set', to='builder.AssessmentQuestion'),
),
migrations.AddField(
model_name='assessmentcategory',
name='activity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_set', to='builder.Activity'),
),
migrations.AddField(
model_name='assessmentcategory',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_set', to='builder.Topic'),
),
migrations.AddConstraint(
model_name='activity',
constraint=models.UniqueConstraint(condition=models.Q(status='active'), fields=('label',), name='activity_unique_active_label'),
),
migrations.AddConstraint(
model_name='assessmentcategory',
constraint=models.UniqueConstraint(fields=('activity', 'topic'), name='unique_activity_topic'),
),
] | 0.688887 | 0.177027 |
from django.db import migrations
def reviewer_to_user(models, reviewer):
if reviewer.user:
user, created = models.User.objects.get_or_create(
internal=reviewer.user,
)
return user
else:
external_user, created = models.ExternalUser.objects.get_or_create(
email=reviewer.email,
)
share, created = models.Share.objects.get_or_create(
external_user=external_user,
page_id=reviewer.review.page_revision.page_id,
defaults={
'shared_by_id': reviewer.review.submitter_id,
'shared_at': reviewer.review.created_at,
'can_comment': True,
}
)
user, created = models.User.objects.get_or_create(
external=external_user,
)
return user
def review_to_review_request(models, review):
request = models.ReviewRequest.objects.create(
page_revision_id=review.page_revision_id,
submitted_by_id=review.submitter_id,
submitted_at=review.created_at,
is_closed=review.status == 'closed',
)
# Reviewers
request.assignees.set([
reviewer_to_user(models, reviewer)
for reviewer in review.reviewers.all()
])
# Annotations
for annotation in models.Annotation.objects.filter(reviewer__review=review).select_related('reviewer'):
annotation_range = annotation.ranges.first()
models.Comment.objects.create(
page_revision_id=review.page_revision_id,
user=reviewer_to_user(models, annotation.reviewer),
quote=annotation.quote,
text=annotation.text,
created_at=annotation.created_at,
updated_at=annotation.updated_at,
resolved_at=annotation.created_at if review.status == 'closed' else None,
content_path='!unknown',
start_xpath=annotation_range.start,
start_offset=annotation_range.start_offset,
end_xpath=annotation_range.end,
end_offset=annotation_range.end_offset,
)
# Responses
for response in models.Response.objects.filter(reviewer__review=review).select_related('reviewer'):
models.ReviewResponse.objects.create(
request=request,
submitted_by=reviewer_to_user(models, response.reviewer),
submitted_at=response.created_at,
status='approved' if response.result == 'approve' else 'needs-changes',
comment=response.comment,
)
return request
class Models:
def __init__(self, apps):
self.ExternalUser = apps.get_model('wagtail_review.ExternalUser')
self.Share = apps.get_model('wagtail_review.Share')
self.User = apps.get_model('wagtail_review.User')
self.Comment = apps.get_model('wagtail_review.Comment')
self.ReviewRequest = apps.get_model('wagtail_review.ReviewRequest')
self.ReviewResponse = apps.get_model('wagtail_review.ReviewResponse')
self.Review = apps.get_model('wagtail_review.Review')
self.Annotation = apps.get_model('wagtail_review.Annotation')
self.Response = apps.get_model('wagtail_review.Response')
def migrate_into_new_models(apps, schema_editor):
models = Models(apps)
for review in models.Review.objects.all().iterator():
review_to_review_request(models, review)
class Migration(migrations.Migration):
dependencies = [
('wagtail_review', '0004_new_models'),
]
operations = [
migrations.RunPython(migrate_into_new_models),
] | wagtail_review/migrations/0005_migrate_to_new_models.py |
from django.db import migrations
def reviewer_to_user(models, reviewer):
if reviewer.user:
user, created = models.User.objects.get_or_create(
internal=reviewer.user,
)
return user
else:
external_user, created = models.ExternalUser.objects.get_or_create(
email=reviewer.email,
)
share, created = models.Share.objects.get_or_create(
external_user=external_user,
page_id=reviewer.review.page_revision.page_id,
defaults={
'shared_by_id': reviewer.review.submitter_id,
'shared_at': reviewer.review.created_at,
'can_comment': True,
}
)
user, created = models.User.objects.get_or_create(
external=external_user,
)
return user
def review_to_review_request(models, review):
request = models.ReviewRequest.objects.create(
page_revision_id=review.page_revision_id,
submitted_by_id=review.submitter_id,
submitted_at=review.created_at,
is_closed=review.status == 'closed',
)
# Reviewers
request.assignees.set([
reviewer_to_user(models, reviewer)
for reviewer in review.reviewers.all()
])
# Annotations
for annotation in models.Annotation.objects.filter(reviewer__review=review).select_related('reviewer'):
annotation_range = annotation.ranges.first()
models.Comment.objects.create(
page_revision_id=review.page_revision_id,
user=reviewer_to_user(models, annotation.reviewer),
quote=annotation.quote,
text=annotation.text,
created_at=annotation.created_at,
updated_at=annotation.updated_at,
resolved_at=annotation.created_at if review.status == 'closed' else None,
content_path='!unknown',
start_xpath=annotation_range.start,
start_offset=annotation_range.start_offset,
end_xpath=annotation_range.end,
end_offset=annotation_range.end_offset,
)
# Responses
for response in models.Response.objects.filter(reviewer__review=review).select_related('reviewer'):
models.ReviewResponse.objects.create(
request=request,
submitted_by=reviewer_to_user(models, response.reviewer),
submitted_at=response.created_at,
status='approved' if response.result == 'approve' else 'needs-changes',
comment=response.comment,
)
return request
class Models:
def __init__(self, apps):
self.ExternalUser = apps.get_model('wagtail_review.ExternalUser')
self.Share = apps.get_model('wagtail_review.Share')
self.User = apps.get_model('wagtail_review.User')
self.Comment = apps.get_model('wagtail_review.Comment')
self.ReviewRequest = apps.get_model('wagtail_review.ReviewRequest')
self.ReviewResponse = apps.get_model('wagtail_review.ReviewResponse')
self.Review = apps.get_model('wagtail_review.Review')
self.Annotation = apps.get_model('wagtail_review.Annotation')
self.Response = apps.get_model('wagtail_review.Response')
def migrate_into_new_models(apps, schema_editor):
models = Models(apps)
for review in models.Review.objects.all().iterator():
review_to_review_request(models, review)
class Migration(migrations.Migration):
dependencies = [
('wagtail_review', '0004_new_models'),
]
operations = [
migrations.RunPython(migrate_into_new_models),
] | 0.621656 | 0.138987 |
#!/usr/bin/env python3
import argparse
import logging
import json
import sys
import requests
import uuid
import time
import boto3
import botocore
from utils.config_loader import Config
from pprint import pprint as pretty
import json
import random
from datetime import datetime
from payloadHandler import payloadHandler
from cmsHandler import ConnectedMobility
from cognitoHandler import Cognito
from awsiot import mqtt_connection_builder
from awscrt import io, mqtt, auth, http
log = logging.getLogger('deploy.cf.create_or_update') # pylint: disable=C0103
def on_connection_interrupted(self, connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(self, connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(self.on_resubscribe_complete)
def main(vin, dtc):
#Set Config path
CONFIG_PATH = 'config.ini'
payloadhandler = payloadHandler(CONFIG_PATH)
#c = Cognito(profile)
#m = ConnectedMobility(profile, stackname)
config = Config(CONFIG_PATH)
config_parameters = config.get_section('SETTINGS')
ENDPOINT = config_parameters['IOT_ENDPOINT']
CLIENT_ID = vin
PATH_TO_CERT = "{}/{}".format(config_parameters['SECURE_CERT_PATH'].format(unique_id=CLIENT_ID), config_parameters['PROD_CERT'])
PATH_TO_KEY = "{}/{}".format(config_parameters['SECURE_CERT_PATH'].format(unique_id=CLIENT_ID), config_parameters['PROD_KEY'])
PATH_TO_ROOT = "{}/{}".format(config_parameters['ROOT_CERT_PATH'], config_parameters['ROOT_CERT'])
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
test_MQTTClient = mqtt_connection_builder.mtls_from_path(
endpoint=ENDPOINT,
cert_filepath=PATH_TO_CERT,
pri_key_filepath=PATH_TO_KEY,
client_bootstrap=client_bootstrap,
ca_filepath=PATH_TO_ROOT,
client_id=CLIENT_ID,
clean_session=False,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
keep_alive_secs=6)
print("Connecting with Prod certs to {} with client ID '{}'...".format(ENDPOINT, CLIENT_ID))
connect_future = test_MQTTClient.connect()
connect_future.result()
print("Connected with production certificates to the endpoint")
payload = payloadhandler.getDTCPayload( dtc, CLIENT_ID)
payloadhandler.publishDTCPayload(test_MQTTClient, payload, CLIENT_ID)
print("Successfully published DTC: {}".format(dtc))
exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--VIN", action="store", dest="vin", default=None, help="VIN for vehicle")
parser.add_argument("-d", "--DTC", action="store", dest="dtc", default=None, help="DTC for vehicle")
args = parser.parse_args()
if args.vin and args.dtc:
main(args.vin, args.dtc)
else:
print('[Error] Missing arguments..')
parser.print_help() | generateDTC.py |
#!/usr/bin/env python3
import argparse
import logging
import json
import sys
import requests
import uuid
import time
import boto3
import botocore
from utils.config_loader import Config
from pprint import pprint as pretty
import json
import random
from datetime import datetime
from payloadHandler import payloadHandler
from cmsHandler import ConnectedMobility
from cognitoHandler import Cognito
from awsiot import mqtt_connection_builder
from awscrt import io, mqtt, auth, http
log = logging.getLogger('deploy.cf.create_or_update') # pylint: disable=C0103
def on_connection_interrupted(self, connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(self, connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(self.on_resubscribe_complete)
def main(vin, dtc):
#Set Config path
CONFIG_PATH = 'config.ini'
payloadhandler = payloadHandler(CONFIG_PATH)
#c = Cognito(profile)
#m = ConnectedMobility(profile, stackname)
config = Config(CONFIG_PATH)
config_parameters = config.get_section('SETTINGS')
ENDPOINT = config_parameters['IOT_ENDPOINT']
CLIENT_ID = vin
PATH_TO_CERT = "{}/{}".format(config_parameters['SECURE_CERT_PATH'].format(unique_id=CLIENT_ID), config_parameters['PROD_CERT'])
PATH_TO_KEY = "{}/{}".format(config_parameters['SECURE_CERT_PATH'].format(unique_id=CLIENT_ID), config_parameters['PROD_KEY'])
PATH_TO_ROOT = "{}/{}".format(config_parameters['ROOT_CERT_PATH'], config_parameters['ROOT_CERT'])
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
test_MQTTClient = mqtt_connection_builder.mtls_from_path(
endpoint=ENDPOINT,
cert_filepath=PATH_TO_CERT,
pri_key_filepath=PATH_TO_KEY,
client_bootstrap=client_bootstrap,
ca_filepath=PATH_TO_ROOT,
client_id=CLIENT_ID,
clean_session=False,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
keep_alive_secs=6)
print("Connecting with Prod certs to {} with client ID '{}'...".format(ENDPOINT, CLIENT_ID))
connect_future = test_MQTTClient.connect()
connect_future.result()
print("Connected with production certificates to the endpoint")
payload = payloadhandler.getDTCPayload( dtc, CLIENT_ID)
payloadhandler.publishDTCPayload(test_MQTTClient, payload, CLIENT_ID)
print("Successfully published DTC: {}".format(dtc))
exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--VIN", action="store", dest="vin", default=None, help="VIN for vehicle")
parser.add_argument("-d", "--DTC", action="store", dest="dtc", default=None, help="DTC for vehicle")
args = parser.parse_args()
if args.vin and args.dtc:
main(args.vin, args.dtc)
else:
print('[Error] Missing arguments..')
parser.print_help() | 0.250363 | 0.048316 |
import logging
from typing import List, Literal, Optional
from pydantic import Field
from pydantic.class_validators import validator
from pydantic.types import NonNegativeInt
from hydrolib.core.io.ini.models import INIBasedModel, INIGeneral, INIModel
from hydrolib.core.io.ini.util import (
get_split_string_on_delimiter_validator,
make_list_length_root_validator,
make_list_validator,
)
logger = logging.getLogger(__name__)
class OneDFieldGeneral(INIGeneral):
"""The 1D field file's `[General]` section with file meta data."""
class Comments(INIBasedModel.Comments):
fileversion: Optional[str] = Field(
"File version. Do not edit this.", alias="fileVersion"
)
filetype: Optional[str] = Field(
"File type. Should be '1dField'. Do not edit this.",
alias="fileType",
)
comments: Comments = Comments()
_header: Literal["General"] = "General"
fileversion: str = Field("2.00", alias="fileVersion")
filetype: Literal["1dField"] = Field("1dField", alias="fileType")
class OneDFieldGlobal(INIBasedModel):
"""The `[Global]` block with a uniform value for use inside a 1D field file."""
class Comments(INIBasedModel.Comments):
quantity: Optional[str] = Field("The name of the quantity", alias="quantity")
unit: Optional[str] = Field("The unit of the quantity", alias="unit")
value: Optional[str] = Field(
"The global default value for this quantity", alias="value"
)
comments: Comments = Comments()
_header: Literal["Global"] = "Global"
quantity: str = Field(alias="quantity")
unit: str = Field(alias="unit")
value: float = Field(alias="value")
class OneDFieldBranch(INIBasedModel):
"""
A `[Branch]` block for use inside a 1D field file.
Each block can define value(s) on a particular branch.
"""
class Comments(INIBasedModel.Comments):
branchid: Optional[str] = Field("The name of the branch", alias="branchId")
numlocations: Optional[str] = Field(
"Number of locations on branch. The default 0 value implies branch uniform values.",
alias="numLocations",
)
chainage: Optional[str] = Field(
"Space separated list of locations on the branch (m). Locations sorted by increasing chainage. The keyword must be specified if numLocations >0.",
alias="chainage",
)
values: Optional[str] = Field(
"Space separated list of numLocations values; one for each chainage specified. One value required if numLocations =0",
alias="values",
)
comments: Comments = Comments()
_header: Literal["Branch"] = "Branch"
branchid: str = Field(alias="branchId")
numlocations: Optional[NonNegativeInt] = Field(0, alias="numLocations")
chainage: Optional[List[float]] = Field(alias="chainage")
values: List[float] = Field(alias="values")
_split_to_list = get_split_string_on_delimiter_validator(
"chainage",
"values",
delimiter=" ",
)
_check_list_length_values = make_list_length_root_validator(
"chainage",
length_name="numlocations",
list_required_with_length=True,
)
_check_list_length_chainage = make_list_length_root_validator(
"values",
length_name="numlocations",
list_required_with_length=True,
min_length=1,
)
def _get_identifier(self, data: dict) -> Optional[str]:
return data.get("branchid")
class OneDFieldModel(INIModel):
"""
The overall 1D field model that contains the contents of a 1D field file.
This model is typically used when a [FMModel][hydrolib.core.io.mdu.models.FMModel]`.geometry.inifieldfile[..].initial[..].datafiletype==DataFileType.onedfield`.
Attributes:
general (OneDFieldGeneral): `[General]` block with file metadata.
global_ (Optional[OneDFieldGlobal]): Optional `[Global]` block with uniform value.
branch (List[OneDFieldBranch]): Definitions of `[Branch]` field values.
"""
general: OneDFieldGeneral = OneDFieldGeneral()
global_: Optional[OneDFieldGlobal] = Field(
alias="global"
) # to circumvent built-in kw
branch: List[OneDFieldBranch] = []
_split_to_list = make_list_validator(
"branch",
)
@classmethod
def _ext(cls) -> str:
return ".ini"
@classmethod
def _filename(cls) -> str:
return "1dfield" | hydrolib/core/io/onedfield/models.py | import logging
from typing import List, Literal, Optional
from pydantic import Field
from pydantic.class_validators import validator
from pydantic.types import NonNegativeInt
from hydrolib.core.io.ini.models import INIBasedModel, INIGeneral, INIModel
from hydrolib.core.io.ini.util import (
get_split_string_on_delimiter_validator,
make_list_length_root_validator,
make_list_validator,
)
logger = logging.getLogger(__name__)
class OneDFieldGeneral(INIGeneral):
"""The 1D field file's `[General]` section with file meta data."""
class Comments(INIBasedModel.Comments):
fileversion: Optional[str] = Field(
"File version. Do not edit this.", alias="fileVersion"
)
filetype: Optional[str] = Field(
"File type. Should be '1dField'. Do not edit this.",
alias="fileType",
)
comments: Comments = Comments()
_header: Literal["General"] = "General"
fileversion: str = Field("2.00", alias="fileVersion")
filetype: Literal["1dField"] = Field("1dField", alias="fileType")
class OneDFieldGlobal(INIBasedModel):
"""The `[Global]` block with a uniform value for use inside a 1D field file."""
class Comments(INIBasedModel.Comments):
quantity: Optional[str] = Field("The name of the quantity", alias="quantity")
unit: Optional[str] = Field("The unit of the quantity", alias="unit")
value: Optional[str] = Field(
"The global default value for this quantity", alias="value"
)
comments: Comments = Comments()
_header: Literal["Global"] = "Global"
quantity: str = Field(alias="quantity")
unit: str = Field(alias="unit")
value: float = Field(alias="value")
class OneDFieldBranch(INIBasedModel):
"""
A `[Branch]` block for use inside a 1D field file.
Each block can define value(s) on a particular branch.
"""
class Comments(INIBasedModel.Comments):
branchid: Optional[str] = Field("The name of the branch", alias="branchId")
numlocations: Optional[str] = Field(
"Number of locations on branch. The default 0 value implies branch uniform values.",
alias="numLocations",
)
chainage: Optional[str] = Field(
"Space separated list of locations on the branch (m). Locations sorted by increasing chainage. The keyword must be specified if numLocations >0.",
alias="chainage",
)
values: Optional[str] = Field(
"Space separated list of numLocations values; one for each chainage specified. One value required if numLocations =0",
alias="values",
)
comments: Comments = Comments()
_header: Literal["Branch"] = "Branch"
branchid: str = Field(alias="branchId")
numlocations: Optional[NonNegativeInt] = Field(0, alias="numLocations")
chainage: Optional[List[float]] = Field(alias="chainage")
values: List[float] = Field(alias="values")
_split_to_list = get_split_string_on_delimiter_validator(
"chainage",
"values",
delimiter=" ",
)
_check_list_length_values = make_list_length_root_validator(
"chainage",
length_name="numlocations",
list_required_with_length=True,
)
_check_list_length_chainage = make_list_length_root_validator(
"values",
length_name="numlocations",
list_required_with_length=True,
min_length=1,
)
def _get_identifier(self, data: dict) -> Optional[str]:
return data.get("branchid")
class OneDFieldModel(INIModel):
"""
The overall 1D field model that contains the contents of a 1D field file.
This model is typically used when a [FMModel][hydrolib.core.io.mdu.models.FMModel]`.geometry.inifieldfile[..].initial[..].datafiletype==DataFileType.onedfield`.
Attributes:
general (OneDFieldGeneral): `[General]` block with file metadata.
global_ (Optional[OneDFieldGlobal]): Optional `[Global]` block with uniform value.
branch (List[OneDFieldBranch]): Definitions of `[Branch]` field values.
"""
general: OneDFieldGeneral = OneDFieldGeneral()
global_: Optional[OneDFieldGlobal] = Field(
alias="global"
) # to circumvent built-in kw
branch: List[OneDFieldBranch] = []
_split_to_list = make_list_validator(
"branch",
)
@classmethod
def _ext(cls) -> str:
return ".ini"
@classmethod
def _filename(cls) -> str:
return "1dfield" | 0.902542 | 0.385953 |
from aws_cdk.core import (
App,
CfnOutput,
Tags,
Stack
)
from aws_cdk import aws_elasticache as elasticache
from config import config_util as config
from cache.helper import (
log_group,
secret,
user_group,
vpc
)
class ElastiCacheStack(Stack):
# Class for the ReplicationGroup stack
def __init__(self, scope: App, construct_id: str, **kwargs) -> None:
"""
Constructor for ReplicationStack class
Args:
scope (core.App): the app object, all child constructs are defined within this app object.
construct_id (str): Id for the construct which is used to uniquely identify it.
"""
super().__init__(scope, construct_id, **kwargs)
self.cluster_name = config.get_cluster_name()
self.transit_encryption = config.get_transit_encryption()
self.security_group = vpc.get_security_group(self)
self.subnet_group = vpc.get_subnet_group(self)
# Only Redis 6.x supports the Redis slow-log delivery
if config.get_engine_version() != "6.x":
self.log_delivery_configuration_request = None
else:
log_group_name = f"/aws/elasticache/redis-slowlog/{self.cluster_name}"
self.log_group = log_group.get_log_group(self, log_group_name)
self.log_delivery_configuration_request = [log_group.get_log_delivery_configuration_request(log_group_name)]
self.create_cache()
self.output_cache()
def create_cache(self) -> None:
"""
Create the Replication Group cluster.
Args: None
Returns: None
"""
self.user_secrets = secret.get_user_secrets(self)
if hasattr(self, "user_secrets") and self.user_secrets:
user_group.create_user_group(self, self.user_secrets)
if hasattr(self, "user_group"):
user_group_ids = [config.get_user_group_id()]
else:
user_group_ids = None
self.cluster = elasticache.CfnReplicationGroup(
self, "ElastiCacheReplicationGroup",
multi_az_enabled=config.get_multi_az(),
auth_token=secret.get_auth_token(self),
at_rest_encryption_enabled=config.get_at_rest_encryption(),
transit_encryption_enabled=self.transit_encryption,
cache_node_type=config.get_node_type(),
engine="redis",
engine_version=config.get_engine_version(),
auto_minor_version_upgrade=True,
port=config.get_port_number(),
cache_subnet_group_name=self.subnet_group.ref,
security_group_ids=[self.security_group.security_group_id],
log_delivery_configurations=self.log_delivery_configuration_request,
num_node_groups=config.get_num_node_groups(),
replicas_per_node_group=config.get_replicas_per_node_group(),
automatic_failover_enabled=config.get_automatic_failover(),
replication_group_description=f"Replication group for {self.cluster_name}",
user_group_ids=user_group_ids,
snapshot_window=config.get_snapshot_window(),
snapshot_retention_limit=config.get_snapshot_retension_limit(),
replication_group_id=self.cluster_name
)
Tags.of(self.cluster).add("Name", self.cluster_name)
self.cluster.add_depends_on(self.subnet_group)
if hasattr(self, "user_group"):
self.cluster.add_depends_on(self.user_group)
def output_cache(self):
"""
Output the CloudFormation stack items for replication group.
Args: None
Returns: None
"""
self.output_security_group()
self.output_secret()
self.output_cache_cluster()
def output_security_group(self) -> None:
"""
Output security group.
Args: None
Returns: None
"""
separator = ","
CfnOutput(
self, "output-security-group",
value=separator.join(self.cluster.security_group_ids),
description="Redis security group id for the cluster",
export_name=f"{self.cluster_name}-security-group-id"
)
def output_secret(self) -> None:
"""
Output user or token secrets if it is configured.
Args: None
Returns: None
"""
if hasattr(self, "user_secrets"):
for idx, user_secret in enumerate(self.user_secrets):
CfnOutput(
self, f"output-user-secret-name-{idx + 1}",
value=user_secret.secret.secret_name,
description="Replication group configuration user secret name for cluster",
export_name=f"{self.cluster_name}-configuration-user-secret-name-{idx}"
)
if hasattr(self, "token_secret"):
CfnOutput(
self, "output-token-secret-name",
value=self.token_secret.secret.secret_name,
description="Replication group configuration token secret name for cluster",
export_name=f"{self.cluster_name}-configuration-token-secret-name"
)
def output_cache_cluster(self) -> None:
"""
Output specific CloudFormation stack items for replication group.
The items are group id, port number, and configuration endpoint.
Args: None
Returns: None
"""
CfnOutput(
self, "output-port-number",
value=self.cluster.attr_configuration_end_point_port,
description="Replication group configuration port for cluster",
export_name=f"{self.cluster_name}-port-number"
)
CfnOutput(
self, "output-endpoint",
value=self.cluster.attr_configuration_end_point_address,
description="Replication group configuration endpoint for cluster",
export_name=f"{self.cluster_name}-endpoint"
)
CfnOutput(
self, "output-id",
value=self.cluster.ref,
description="Replication group id for cluster",
export_name=f"{self.cluster_name}-id"
) | python/elasticache/cache/elasticache_stack.py | from aws_cdk.core import (
App,
CfnOutput,
Tags,
Stack
)
from aws_cdk import aws_elasticache as elasticache
from config import config_util as config
from cache.helper import (
log_group,
secret,
user_group,
vpc
)
class ElastiCacheStack(Stack):
# Class for the ReplicationGroup stack
def __init__(self, scope: App, construct_id: str, **kwargs) -> None:
"""
Constructor for ReplicationStack class
Args:
scope (core.App): the app object, all child constructs are defined within this app object.
construct_id (str): Id for the construct which is used to uniquely identify it.
"""
super().__init__(scope, construct_id, **kwargs)
self.cluster_name = config.get_cluster_name()
self.transit_encryption = config.get_transit_encryption()
self.security_group = vpc.get_security_group(self)
self.subnet_group = vpc.get_subnet_group(self)
# Only Redis 6.x supports the Redis slow-log delivery
if config.get_engine_version() != "6.x":
self.log_delivery_configuration_request = None
else:
log_group_name = f"/aws/elasticache/redis-slowlog/{self.cluster_name}"
self.log_group = log_group.get_log_group(self, log_group_name)
self.log_delivery_configuration_request = [log_group.get_log_delivery_configuration_request(log_group_name)]
self.create_cache()
self.output_cache()
def create_cache(self) -> None:
"""
Create the Replication Group cluster.
Args: None
Returns: None
"""
self.user_secrets = secret.get_user_secrets(self)
if hasattr(self, "user_secrets") and self.user_secrets:
user_group.create_user_group(self, self.user_secrets)
if hasattr(self, "user_group"):
user_group_ids = [config.get_user_group_id()]
else:
user_group_ids = None
self.cluster = elasticache.CfnReplicationGroup(
self, "ElastiCacheReplicationGroup",
multi_az_enabled=config.get_multi_az(),
auth_token=secret.get_auth_token(self),
at_rest_encryption_enabled=config.get_at_rest_encryption(),
transit_encryption_enabled=self.transit_encryption,
cache_node_type=config.get_node_type(),
engine="redis",
engine_version=config.get_engine_version(),
auto_minor_version_upgrade=True,
port=config.get_port_number(),
cache_subnet_group_name=self.subnet_group.ref,
security_group_ids=[self.security_group.security_group_id],
log_delivery_configurations=self.log_delivery_configuration_request,
num_node_groups=config.get_num_node_groups(),
replicas_per_node_group=config.get_replicas_per_node_group(),
automatic_failover_enabled=config.get_automatic_failover(),
replication_group_description=f"Replication group for {self.cluster_name}",
user_group_ids=user_group_ids,
snapshot_window=config.get_snapshot_window(),
snapshot_retention_limit=config.get_snapshot_retension_limit(),
replication_group_id=self.cluster_name
)
Tags.of(self.cluster).add("Name", self.cluster_name)
self.cluster.add_depends_on(self.subnet_group)
if hasattr(self, "user_group"):
self.cluster.add_depends_on(self.user_group)
def output_cache(self):
"""
Output the CloudFormation stack items for replication group.
Args: None
Returns: None
"""
self.output_security_group()
self.output_secret()
self.output_cache_cluster()
def output_security_group(self) -> None:
"""
Output security group.
Args: None
Returns: None
"""
separator = ","
CfnOutput(
self, "output-security-group",
value=separator.join(self.cluster.security_group_ids),
description="Redis security group id for the cluster",
export_name=f"{self.cluster_name}-security-group-id"
)
def output_secret(self) -> None:
"""
Output user or token secrets if it is configured.
Args: None
Returns: None
"""
if hasattr(self, "user_secrets"):
for idx, user_secret in enumerate(self.user_secrets):
CfnOutput(
self, f"output-user-secret-name-{idx + 1}",
value=user_secret.secret.secret_name,
description="Replication group configuration user secret name for cluster",
export_name=f"{self.cluster_name}-configuration-user-secret-name-{idx}"
)
if hasattr(self, "token_secret"):
CfnOutput(
self, "output-token-secret-name",
value=self.token_secret.secret.secret_name,
description="Replication group configuration token secret name for cluster",
export_name=f"{self.cluster_name}-configuration-token-secret-name"
)
def output_cache_cluster(self) -> None:
"""
Output specific CloudFormation stack items for replication group.
The items are group id, port number, and configuration endpoint.
Args: None
Returns: None
"""
CfnOutput(
self, "output-port-number",
value=self.cluster.attr_configuration_end_point_port,
description="Replication group configuration port for cluster",
export_name=f"{self.cluster_name}-port-number"
)
CfnOutput(
self, "output-endpoint",
value=self.cluster.attr_configuration_end_point_address,
description="Replication group configuration endpoint for cluster",
export_name=f"{self.cluster_name}-endpoint"
)
CfnOutput(
self, "output-id",
value=self.cluster.ref,
description="Replication group id for cluster",
export_name=f"{self.cluster_name}-id"
) | 0.852614 | 0.105763 |
import tblink_rpc
import ctypes
@tblink_rpc.iftype("i2c_bfms.initiator")
class I2cInitiatorBfm(object):
def __init__(self):
self._ev = tblink_rpc.event()
self._lock = tblink_rpc.lock()
self._is_reset = False
self._is_reset_ev = tblink_rpc.event()
self._data = 0
self._ack = 0
pass
async def memwrite(self, dev_a, mem_a, data):
await self._lock.acquire()
if not self._is_reset:
await self._is_reset_ev.wait()
self._is_reset_ev.clear()
a_data = (((dev_a & 0x7F) << 1) | 0)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
a_data = (mem_a & 0x7F)
await self._cmd_write(a_data, 0, 0)
await self._ev.wait()
self._ev.clear()
for i, d in enumerate(data):
if i+1 == len(data):
stop = 1
else:
stop = 0;
await self._cmd_write(d, 0, stop)
await self._ev.wait()
self._ev.clear()
self._lock.release()
async def memread(self, dev_a, mem_a, sz):
await self._lock.acquire()
if not self._is_reset:
await self._is_reset_ev.wait()
self._is_reset_ev.clear()
ret = []
a_data = (((dev_a & 0x7F) << 1) | 0)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
a_data = (mem_a & 0x7F)
await self._cmd_write(a_data, 0, 0)
await self._ev.wait()
self._ev.clear()
a_data = (((dev_a & 0x7F) << 1) | 1)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
for i in range(sz):
if i+1 == sz:
ack = 1
else:
ack = 0
await self._cmd_read(ack, 1)
await self._ev.wait()
self._ev.clear()
ret.append(self._data)
self._lock.release()
return ret
@tblink_rpc.exptask
def _set_prescale(self, prescale : ctypes.c_uint16):
pass
@tblink_rpc.exptask
def _cmd_read(self, ack : ctypes.c_uint8, stop : ctypes.c_uint8):
pass
@tblink_rpc.exptask
def _cmd_write(self, data : ctypes.c_uint8, start : ctypes.c_uint8, stop : ctypes.c_uint8):
pass
@tblink_rpc.impfunc
def _ack_cmd(self, data : ctypes.c_uint8, ack : ctypes.c_uint8):
self._data = data
self._ack = ack
self._ev.set()
pass
@tblink_rpc.impfunc
def _reset(self):
self._is_reset = True
self._is_reset_ev.set() | frontends/python/i2c_bfms/i2c_initiator_bfm.py | import tblink_rpc
import ctypes
@tblink_rpc.iftype("i2c_bfms.initiator")
class I2cInitiatorBfm(object):
def __init__(self):
self._ev = tblink_rpc.event()
self._lock = tblink_rpc.lock()
self._is_reset = False
self._is_reset_ev = tblink_rpc.event()
self._data = 0
self._ack = 0
pass
async def memwrite(self, dev_a, mem_a, data):
await self._lock.acquire()
if not self._is_reset:
await self._is_reset_ev.wait()
self._is_reset_ev.clear()
a_data = (((dev_a & 0x7F) << 1) | 0)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
a_data = (mem_a & 0x7F)
await self._cmd_write(a_data, 0, 0)
await self._ev.wait()
self._ev.clear()
for i, d in enumerate(data):
if i+1 == len(data):
stop = 1
else:
stop = 0;
await self._cmd_write(d, 0, stop)
await self._ev.wait()
self._ev.clear()
self._lock.release()
async def memread(self, dev_a, mem_a, sz):
await self._lock.acquire()
if not self._is_reset:
await self._is_reset_ev.wait()
self._is_reset_ev.clear()
ret = []
a_data = (((dev_a & 0x7F) << 1) | 0)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
a_data = (mem_a & 0x7F)
await self._cmd_write(a_data, 0, 0)
await self._ev.wait()
self._ev.clear()
a_data = (((dev_a & 0x7F) << 1) | 1)
await self._cmd_write(a_data, 1, 0)
await self._ev.wait()
self._ev.clear()
for i in range(sz):
if i+1 == sz:
ack = 1
else:
ack = 0
await self._cmd_read(ack, 1)
await self._ev.wait()
self._ev.clear()
ret.append(self._data)
self._lock.release()
return ret
@tblink_rpc.exptask
def _set_prescale(self, prescale : ctypes.c_uint16):
pass
@tblink_rpc.exptask
def _cmd_read(self, ack : ctypes.c_uint8, stop : ctypes.c_uint8):
pass
@tblink_rpc.exptask
def _cmd_write(self, data : ctypes.c_uint8, start : ctypes.c_uint8, stop : ctypes.c_uint8):
pass
@tblink_rpc.impfunc
def _ack_cmd(self, data : ctypes.c_uint8, ack : ctypes.c_uint8):
self._data = data
self._ack = ack
self._ev.set()
pass
@tblink_rpc.impfunc
def _reset(self):
self._is_reset = True
self._is_reset_ev.set() | 0.205217 | 0.080141 |
# Este codigo plota os valores do ruido de leitura encontrados
#pela biblioteca hyperopt em funcao do numero de iteracao.
#22/11/2019. <NAME>.
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import json
from sys import exit
##arq = open(r'Logs\Parameters\log.txt', 'r')
##lines_parameters = arq.read().splitlines()
##lines_parameters = [i.split('\t') for i in lines_parameters]
##arq.close()
array_dic_modes=[]
with open(r'Logs\Parameters\log.txt', 'r') as arq:
lines = arq.read().splitlines()
for line in lines:
dic = json.loads(line)
array_dic_modes.append(dic)
arq.close()
##arq = open(r'Logs\Loss\log.txt', 'r')
##lines_loss = arq.read().splitlines()
##lines_loss = [float(i) for i in lines_loss[:-1]]
##arq.close()
t_exp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_mode = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_gain = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
hss = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
preamp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
binn = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
loss ={'0.1':[], '1':[], '10':[], '20':[], '30':[]}
for i in range(len(array_dic_modes)):
dic = array_dic_modes[i]
#line = [float(i) for i in line]
if dic['hss'] == 0.1:
t_exp['0.1'].append(dic['t_exp'])
em_mode['0.1'].append(dic['em_mode'])
em_gain['0.1'].append(dic['em_gain'])
preamp['0.1'].append(dic['preamp'])
binn['0.1'].append(dic['binn'])
loss['0.1'].append(dic['snr'])
if dic['hss'] == 1:
t_exp['1'].append(dic['t_exp'])
em_mode['1'].append(dic['em_mode'])
em_gain['1'].append(dic['em_gain'])
preamp['1'].append(dic['preamp'])
binn['1'].append(dic['binn'])
loss['1'].append(dic['snr'])
if dic['hss'] == 10:
t_exp['10'].append(dic['t_exp'])
em_mode['10'].append(dic['em_mode'])
em_gain['10'].append(dic['em_gain'])
preamp['10'].append(dic['preamp'])
binn['10'].append(dic['binn'])
loss['10'].append(dic['snr'])
if dic['hss'] == 20:
t_exp['20'].append(dic['t_exp'])
em_mode['20'].append(dic['em_mode'])
em_gain['20'].append(dic['em_gain'])
preamp['20'].append(dic['preamp'])
binn['20'].append(dic['binn'])
loss['20'].append(dic['snr'])
if dic['hss'] == 30:
t_exp['30'].append(dic['t_exp'])
em_mode['30'].append(dic['em_mode'])
em_gain['30'].append(dic['em_gain'])
preamp['30'].append(dic['preamp'])
binn['30'].append(dic['binn'])
loss['30'].append(dic['snr'])
fig = plt.figure()
list_fake2Dlines = []
list_labels = []
ax = fig.add_subplot((111), projection='3d')
if t_exp['30']:
ax.scatter(t_exp['30'], em_gain['30'], loss['30'], c='blue', marker='o', alpha=0.5)
fake2Dline1 = mpl.lines.Line2D([0],[0], linestyle="none", c='blue', marker = 'o')
list_fake2Dlines.append(fake2Dline1)
list_labels.append(r'30 MHz')
if t_exp['20']:
ax.scatter(t_exp['20'], em_gain['20'], loss['20'], c='red', marker='o', alpha=0.5)
fake2Dline2 = mpl.lines.Line2D([0],[0], linestyle="none", c='red', marker = 'o')
list_fake2Dlines.append(fake2Dline2)
list_labels.append(r'20 MHz')
if t_exp['10']:
ax.scatter(t_exp['10'], em_gain['10'], loss['10'], c='green', marker='o', alpha=0.5)
fake2Dline3 = mpl.lines.Line2D([0],[0], linestyle="none", c='green', marker = 'o')
list_fake2Dlines.append(fake2Dline3)
list_labels.append(r'10 MHz')
if t_exp['1']:
ax.scatter(t_exp['1'], em_gain['1'], loss['1'], c='tab:purple', marker='o', alpha=0.6)
fake2Dline4 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:purple', marker = 'o')
list_fake2Dlines.append(fake2Dline4)
list_labels.append(r'1 MHz')
if t_exp['0.1']:
ax.scatter(t_exp['0.1'], em_gain['0.1'], loss['0.1'], c='tab:olive', marker='o', alpha=0.8)
fake2Dline5 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:olive', marker = 'o')
list_fake2Dlines.append(fake2Dline5)
list_labels.append(r'0,1 MHz')
ax.set_xlabel('Exposure Time (s)')
ax.set_ylabel('EM Gain')
ax.set_zlabel('SNR*FA')
ax.legend(list_fake2Dlines, list_labels, numpoints = 1)
plt.show()
'''
for i in x:
line = lines_parameters[i]
binn = 0
if line[5] == str(2): binn = 1
if line[4] == str(1):
t_exp_1[binn].append(float(line[0]))
em_mode_1[binn].append(float(line[1]))
em_gain_1[binn].append(float(line[2]))
hss_1[binn].append(float(line[3]))
preamp_1[binn].append(float(line[4]))
binn_1[binn].append(float(line[5]))
loss_1[binn].append(lines_loss[i])
else:
t_exp_2[binn].append(float(line[0]))
em_mode_2[binn].append(float(line[1]))
em_gain_2[binn].append(float(line[2]))
hss_2[binn].append(float(line[3]))
preamp_2[binn].append(float(line[4]))
binn_2[binn].append(float(line[5]))
loss_2[binn].append(lines_loss[i])
''' | Codigos_python/Graficos/Iteracoes_ruido_parametros/plot_parameters_vs_iteration.py |
# Este codigo plota os valores do ruido de leitura encontrados
#pela biblioteca hyperopt em funcao do numero de iteracao.
#22/11/2019. <NAME>.
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import json
from sys import exit
##arq = open(r'Logs\Parameters\log.txt', 'r')
##lines_parameters = arq.read().splitlines()
##lines_parameters = [i.split('\t') for i in lines_parameters]
##arq.close()
array_dic_modes=[]
with open(r'Logs\Parameters\log.txt', 'r') as arq:
lines = arq.read().splitlines()
for line in lines:
dic = json.loads(line)
array_dic_modes.append(dic)
arq.close()
##arq = open(r'Logs\Loss\log.txt', 'r')
##lines_loss = arq.read().splitlines()
##lines_loss = [float(i) for i in lines_loss[:-1]]
##arq.close()
t_exp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_mode = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_gain = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
hss = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
preamp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
binn = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
loss ={'0.1':[], '1':[], '10':[], '20':[], '30':[]}
for i in range(len(array_dic_modes)):
dic = array_dic_modes[i]
#line = [float(i) for i in line]
if dic['hss'] == 0.1:
t_exp['0.1'].append(dic['t_exp'])
em_mode['0.1'].append(dic['em_mode'])
em_gain['0.1'].append(dic['em_gain'])
preamp['0.1'].append(dic['preamp'])
binn['0.1'].append(dic['binn'])
loss['0.1'].append(dic['snr'])
if dic['hss'] == 1:
t_exp['1'].append(dic['t_exp'])
em_mode['1'].append(dic['em_mode'])
em_gain['1'].append(dic['em_gain'])
preamp['1'].append(dic['preamp'])
binn['1'].append(dic['binn'])
loss['1'].append(dic['snr'])
if dic['hss'] == 10:
t_exp['10'].append(dic['t_exp'])
em_mode['10'].append(dic['em_mode'])
em_gain['10'].append(dic['em_gain'])
preamp['10'].append(dic['preamp'])
binn['10'].append(dic['binn'])
loss['10'].append(dic['snr'])
if dic['hss'] == 20:
t_exp['20'].append(dic['t_exp'])
em_mode['20'].append(dic['em_mode'])
em_gain['20'].append(dic['em_gain'])
preamp['20'].append(dic['preamp'])
binn['20'].append(dic['binn'])
loss['20'].append(dic['snr'])
if dic['hss'] == 30:
t_exp['30'].append(dic['t_exp'])
em_mode['30'].append(dic['em_mode'])
em_gain['30'].append(dic['em_gain'])
preamp['30'].append(dic['preamp'])
binn['30'].append(dic['binn'])
loss['30'].append(dic['snr'])
fig = plt.figure()
list_fake2Dlines = []
list_labels = []
ax = fig.add_subplot((111), projection='3d')
if t_exp['30']:
ax.scatter(t_exp['30'], em_gain['30'], loss['30'], c='blue', marker='o', alpha=0.5)
fake2Dline1 = mpl.lines.Line2D([0],[0], linestyle="none", c='blue', marker = 'o')
list_fake2Dlines.append(fake2Dline1)
list_labels.append(r'30 MHz')
if t_exp['20']:
ax.scatter(t_exp['20'], em_gain['20'], loss['20'], c='red', marker='o', alpha=0.5)
fake2Dline2 = mpl.lines.Line2D([0],[0], linestyle="none", c='red', marker = 'o')
list_fake2Dlines.append(fake2Dline2)
list_labels.append(r'20 MHz')
if t_exp['10']:
ax.scatter(t_exp['10'], em_gain['10'], loss['10'], c='green', marker='o', alpha=0.5)
fake2Dline3 = mpl.lines.Line2D([0],[0], linestyle="none", c='green', marker = 'o')
list_fake2Dlines.append(fake2Dline3)
list_labels.append(r'10 MHz')
if t_exp['1']:
ax.scatter(t_exp['1'], em_gain['1'], loss['1'], c='tab:purple', marker='o', alpha=0.6)
fake2Dline4 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:purple', marker = 'o')
list_fake2Dlines.append(fake2Dline4)
list_labels.append(r'1 MHz')
if t_exp['0.1']:
ax.scatter(t_exp['0.1'], em_gain['0.1'], loss['0.1'], c='tab:olive', marker='o', alpha=0.8)
fake2Dline5 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:olive', marker = 'o')
list_fake2Dlines.append(fake2Dline5)
list_labels.append(r'0,1 MHz')
ax.set_xlabel('Exposure Time (s)')
ax.set_ylabel('EM Gain')
ax.set_zlabel('SNR*FA')
ax.legend(list_fake2Dlines, list_labels, numpoints = 1)
plt.show()
'''
for i in x:
line = lines_parameters[i]
binn = 0
if line[5] == str(2): binn = 1
if line[4] == str(1):
t_exp_1[binn].append(float(line[0]))
em_mode_1[binn].append(float(line[1]))
em_gain_1[binn].append(float(line[2]))
hss_1[binn].append(float(line[3]))
preamp_1[binn].append(float(line[4]))
binn_1[binn].append(float(line[5]))
loss_1[binn].append(lines_loss[i])
else:
t_exp_2[binn].append(float(line[0]))
em_mode_2[binn].append(float(line[1]))
em_gain_2[binn].append(float(line[2]))
hss_2[binn].append(float(line[3]))
preamp_2[binn].append(float(line[4]))
binn_2[binn].append(float(line[5]))
loss_2[binn].append(lines_loss[i])
''' | 0.19888 | 0.433262 |
from expfactory.views import preview_experiment, run_battery, run_single
from expfactory.battery import generate, generate_local
from expfactory.experiment import validate, load_experiment
from expfactory.tests import validate_surveys
from glob import glob
import argparse
import sys
import os
def main():
parser = argparse.ArgumentParser(
description="generate experiments and infrastructure to serve them.")
parser.add_argument("--folder", dest='folder', help="full path to single experiment folder (for single experiment run with --run) or folder with many experiments (for battery run with --run)", type=str, default=None)
parser.add_argument("--subid", dest='subid', help="subject id to embed in experiments data in the case of a battery run with --run", type=str, default=None)
parser.add_argument("--experiments", dest='experiments', help="comma separated list of experiments for a local battery", type=str, default=None)
parser.add_argument("--port", dest='port', help="port to preview experiment", type=int, default=None)
parser.add_argument("--battery", dest='battery_folder', help="full path to local battery folder to use as template", type=str, default=None)
parser.add_argument("--time", dest='time', help="maximum number of minutes for battery to endure, to select experiments", type=int, default=99999)
parser.add_argument('--preview', help="preview an experiment locally (development function)", dest='preview', default=False, action='store_true')
parser.add_argument('--run', help="run a single experiment/survey or battery locally", dest='run', default=False, action='store_true')
parser.add_argument("--survey", dest='survey', help="survey to run for a local assessment", type=str, default=None)
parser.add_argument("--game", dest='game', help="game to run for a local assessment", type=str, default=None)
parser.add_argument('--validate', dest='validate', help="validate an experiment folder", default=False, action='store_true')
parser.add_argument('--psiturk', dest='psiturk', help="to be used with the --generate command, to generate a psiturk battery instead of local folder deployment", default=False, action='store_true')
parser.add_argument('--generate', dest='generate', help="generate (and don't run) a battery with --experiments to a --folder", default=False, action='store_true')
parser.add_argument("--output", dest='output', help="output folder for --generate command, if a temporary directory is not desired. Must not exist.", type=str, default=None)
parser.add_argument('--test', dest='test', help="test an experiment folder with the experiment robot", default=False, action='store_true')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
# Check if the person wants to preview experiment or battery
if args.preview == True:
preview_experiment(folder=args.folder,battery_folder=args.battery_folder,port=args.port)
# Generate a local battery folder (static)
elif args.generate == True:
if args.experiments != None:
# Deploy a psiturk battery folder
experiments = args.experiments.split(",")
if args.psiturk == True:
outdir = generate(battery_dest=args.output,
battery_repo=args.battery_folder,
experiment_repo=args.folder,
experiments=experiments,
make_config=True,
warning=False)
# Deploy a regular battery folder
else:
outdir = generate_local(battery_dest=args.output,
subject_id="expfactory_battery_result",
battery_repo=args.battery_folder,
experiment_repo=args.folder,
experiments=experiments,
warning=False,
time=args.time)
print("Battery generation complete: static files are in %s" %(outdir))
else:
print("Please specify list of comma separated experiments with --experiments")
# Run a local battery
elif args.run == True:
# Warn the user about using repos for experiments and battery
if args.battery_folder == None:
print("No battery folder specified. Will pull latest battery from expfactory-battery repo")
if args.folder == None:
print("No experiments, games, or surveys folder specified. Will pull latest from expfactory-experiments repo")
if args.survey != None:
survey = args.survey.split(",")
if len(survey) > 0:
print("Currently only support running one survey, will run first in list.")
survey = survey[0]
run_single(exp_id=survey,
repo_type="surveys",
source_repo=args.folder,
battery_repo=args.battery_folder,
port=args.port,
subject_id=args.subid)
if args.game != None:
game = args.game.split(",")
if len(game) > 0:
print("Currently only support running one game, will run first in list.")
game = game[0]
run_single(exp_id=game,
repo_type="games",
source_repo=args.folder,
battery_repo=args.battery_folder,
port=args.port,
subject_id=args.subid)
if args.experiments != None:
experiments = args.experiments.split(",")
run_battery(experiments=experiments,
experiment_folder=args.folder,
subject_id=args.subid,
battery_folder=args.battery_folder,
port=args.port,
time=args.time)
else:
print("Please specify list of comma separated experiments with --experiments")
# Validate a config.json
elif args.validate == True:
if args.folder == None:
folder = os.getcwd()
validate(experiment_folder=folder)
# If a survey, and if validates, also validate survey.tsv
experiment = load_experiment(folder)[0]
if experiment["template"] == "survey":
print("Validating survey.tsv...")
survey_repo = os.path.dirname(folder)
validate_surveys(experiment["exp_id"],survey_repo)
# Run the experiment robot
elif args.test == True:
from expfactory.tests import test_experiment
test_experiment(folder=args.folder,battery_folder=args.battery_folder,port=args.port)
# Otherwise, just open the expfactory interface
else:
from expfactory.interface import start
start(port=args.port)
if __name__ == '__main__':
main() | expfactory/scripts.py | from expfactory.views import preview_experiment, run_battery, run_single
from expfactory.battery import generate, generate_local
from expfactory.experiment import validate, load_experiment
from expfactory.tests import validate_surveys
from glob import glob
import argparse
import sys
import os
def main():
parser = argparse.ArgumentParser(
description="generate experiments and infrastructure to serve them.")
parser.add_argument("--folder", dest='folder', help="full path to single experiment folder (for single experiment run with --run) or folder with many experiments (for battery run with --run)", type=str, default=None)
parser.add_argument("--subid", dest='subid', help="subject id to embed in experiments data in the case of a battery run with --run", type=str, default=None)
parser.add_argument("--experiments", dest='experiments', help="comma separated list of experiments for a local battery", type=str, default=None)
parser.add_argument("--port", dest='port', help="port to preview experiment", type=int, default=None)
parser.add_argument("--battery", dest='battery_folder', help="full path to local battery folder to use as template", type=str, default=None)
parser.add_argument("--time", dest='time', help="maximum number of minutes for battery to endure, to select experiments", type=int, default=99999)
parser.add_argument('--preview', help="preview an experiment locally (development function)", dest='preview', default=False, action='store_true')
parser.add_argument('--run', help="run a single experiment/survey or battery locally", dest='run', default=False, action='store_true')
parser.add_argument("--survey", dest='survey', help="survey to run for a local assessment", type=str, default=None)
parser.add_argument("--game", dest='game', help="game to run for a local assessment", type=str, default=None)
parser.add_argument('--validate', dest='validate', help="validate an experiment folder", default=False, action='store_true')
parser.add_argument('--psiturk', dest='psiturk', help="to be used with the --generate command, to generate a psiturk battery instead of local folder deployment", default=False, action='store_true')
parser.add_argument('--generate', dest='generate', help="generate (and don't run) a battery with --experiments to a --folder", default=False, action='store_true')
parser.add_argument("--output", dest='output', help="output folder for --generate command, if a temporary directory is not desired. Must not exist.", type=str, default=None)
parser.add_argument('--test', dest='test', help="test an experiment folder with the experiment robot", default=False, action='store_true')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
# Check if the person wants to preview experiment or battery
if args.preview == True:
preview_experiment(folder=args.folder,battery_folder=args.battery_folder,port=args.port)
# Generate a local battery folder (static)
elif args.generate == True:
if args.experiments != None:
# Deploy a psiturk battery folder
experiments = args.experiments.split(",")
if args.psiturk == True:
outdir = generate(battery_dest=args.output,
battery_repo=args.battery_folder,
experiment_repo=args.folder,
experiments=experiments,
make_config=True,
warning=False)
# Deploy a regular battery folder
else:
outdir = generate_local(battery_dest=args.output,
subject_id="expfactory_battery_result",
battery_repo=args.battery_folder,
experiment_repo=args.folder,
experiments=experiments,
warning=False,
time=args.time)
print("Battery generation complete: static files are in %s" %(outdir))
else:
print("Please specify list of comma separated experiments with --experiments")
# Run a local battery
elif args.run == True:
# Warn the user about using repos for experiments and battery
if args.battery_folder == None:
print("No battery folder specified. Will pull latest battery from expfactory-battery repo")
if args.folder == None:
print("No experiments, games, or surveys folder specified. Will pull latest from expfactory-experiments repo")
if args.survey != None:
survey = args.survey.split(",")
if len(survey) > 0:
print("Currently only support running one survey, will run first in list.")
survey = survey[0]
run_single(exp_id=survey,
repo_type="surveys",
source_repo=args.folder,
battery_repo=args.battery_folder,
port=args.port,
subject_id=args.subid)
if args.game != None:
game = args.game.split(",")
if len(game) > 0:
print("Currently only support running one game, will run first in list.")
game = game[0]
run_single(exp_id=game,
repo_type="games",
source_repo=args.folder,
battery_repo=args.battery_folder,
port=args.port,
subject_id=args.subid)
if args.experiments != None:
experiments = args.experiments.split(",")
run_battery(experiments=experiments,
experiment_folder=args.folder,
subject_id=args.subid,
battery_folder=args.battery_folder,
port=args.port,
time=args.time)
else:
print("Please specify list of comma separated experiments with --experiments")
# Validate a config.json
elif args.validate == True:
if args.folder == None:
folder = os.getcwd()
validate(experiment_folder=folder)
# If a survey, and if validates, also validate survey.tsv
experiment = load_experiment(folder)[0]
if experiment["template"] == "survey":
print("Validating survey.tsv...")
survey_repo = os.path.dirname(folder)
validate_surveys(experiment["exp_id"],survey_repo)
# Run the experiment robot
elif args.test == True:
from expfactory.tests import test_experiment
test_experiment(folder=args.folder,battery_folder=args.battery_folder,port=args.port)
# Otherwise, just open the expfactory interface
else:
from expfactory.interface import start
start(port=args.port)
if __name__ == '__main__':
main() | 0.440229 | 0.169784 |
import os, json
from uSocket import send
COUNTRY_NAMES = {
"al": "Albania",
"ar": "Argentina",
"au": "Australia",
"at": "Austria",
"be": "Belgium",
"ba": "Bosnia and Herzegovina",
"br": "Brazil",
"bg": "Bulgaria",
"ca": "Canada",
"cl": "Chile",
"cr": "Costa Rica",
"hr": "Croatia",
"cy": "Cyprus",
"cz": "Czechia",
"dk": "Denmark",
"ee": "Estonia",
"fi": "Finland",
"fr": "France",
"ge": "Georgia",
"de": "Germany",
"gr": "Greece",
"hk": "Hong Kong",
"hu": "Hungary",
"is": "Iceland",
"in": "India",
"id": "Indonesia",
"ie": "Ireland",
"il": "Israel",
"it": "Italy",
"jp": "Japan",
"kr": "Korea, Republic of",
"lv": "Latvia",
"lu": "Luxembourg",
"mk": "Macedonia, The Former Yugoslav Republic Of",
"my": "Malaysia",
"mx": "Mexico",
"md": "Moldova, Republic of",
"nl": "Netherlands",
"nz": "New Zealand",
"no": "Norway",
"pl": "Poland",
"pt": "Portugal",
"rs": "Republic of Serbia",
"ro": "Romania",
"sg": "Singapore",
"sk": "Slovakia",
"si": "Slovenia",
"za": "South Africa",
"es": "Spain",
"se": "Sweden",
"ch": "Switzerland",
"tw": "Taiwan, Province of China",
"th": "Thailand",
"tr": "Turkey",
"ua": "Ukraine",
"uk": "United Kingdom",
"us": "United States",
"vn": "Vietnam",
}
PATH_BASE = os.path.split(os.path.abspath(__file__))[0]
PATH_FLAGS = os.path.join(PATH_BASE, "flags")
COUNTER_FILE = os.path.join(os.path.expanduser("~"), ".nvpn", "counter.json")
def get_flag_dict():
"""
() -> {str: str}
Returns all country names that have an img
"""
flag_files = {x.split(".")[0] for x in os.listdir(PATH_FLAGS)}
return {k: v for k, v in COUNTRY_NAMES.items() if k in flag_files}
def _read_count_flag():
"""
() -> {str: int}
Read the counter file that counts the times a server location is selected.
Returns location and count.
"""
if os.path.exists(COUNTER_FILE):
with open(COUNTER_FILE, "r") as r:
data = json.load(r)
return data
else:
with open(COUNTER_FILE, "w") as f:
data = {k: 0 for k in get_flag_dict()}
json.dump(data, f)
return data
def _write_count_flag(flag):
"""
(str) ->
Updates the counter for a flag
"""
data = _read_count_flag()
if flag in data:
data[flag] += 1
with open(COUNTER_FILE, "w") as w:
json.dump(data, w)
def flag_clicked(flag):
"""
(str) ->
Start action to count clicked flag.
Send connect command to unix socket that controls the vpn.
"""
_write_count_flag(flag)
send(f"connect {flag}")
def get_sorted_flag_dict():
"""
() -> {str: str}
Sort function to return most clicked flags.
Used by gui to display most used servers at the top.
"""
temp = sorted(get_flag_dict().items(), key=lambda x: -_read_count_flag()[x[0]])
return {k: v for k, v in temp} | vpncontrol/helperFlaglist.py | import os, json
from uSocket import send
COUNTRY_NAMES = {
"al": "Albania",
"ar": "Argentina",
"au": "Australia",
"at": "Austria",
"be": "Belgium",
"ba": "Bosnia and Herzegovina",
"br": "Brazil",
"bg": "Bulgaria",
"ca": "Canada",
"cl": "Chile",
"cr": "Costa Rica",
"hr": "Croatia",
"cy": "Cyprus",
"cz": "Czechia",
"dk": "Denmark",
"ee": "Estonia",
"fi": "Finland",
"fr": "France",
"ge": "Georgia",
"de": "Germany",
"gr": "Greece",
"hk": "Hong Kong",
"hu": "Hungary",
"is": "Iceland",
"in": "India",
"id": "Indonesia",
"ie": "Ireland",
"il": "Israel",
"it": "Italy",
"jp": "Japan",
"kr": "Korea, Republic of",
"lv": "Latvia",
"lu": "Luxembourg",
"mk": "Macedonia, The Former Yugoslav Republic Of",
"my": "Malaysia",
"mx": "Mexico",
"md": "Moldova, Republic of",
"nl": "Netherlands",
"nz": "New Zealand",
"no": "Norway",
"pl": "Poland",
"pt": "Portugal",
"rs": "Republic of Serbia",
"ro": "Romania",
"sg": "Singapore",
"sk": "Slovakia",
"si": "Slovenia",
"za": "South Africa",
"es": "Spain",
"se": "Sweden",
"ch": "Switzerland",
"tw": "Taiwan, Province of China",
"th": "Thailand",
"tr": "Turkey",
"ua": "Ukraine",
"uk": "United Kingdom",
"us": "United States",
"vn": "Vietnam",
}
PATH_BASE = os.path.split(os.path.abspath(__file__))[0]
PATH_FLAGS = os.path.join(PATH_BASE, "flags")
COUNTER_FILE = os.path.join(os.path.expanduser("~"), ".nvpn", "counter.json")
def get_flag_dict():
"""
() -> {str: str}
Returns all country names that have an img
"""
flag_files = {x.split(".")[0] for x in os.listdir(PATH_FLAGS)}
return {k: v for k, v in COUNTRY_NAMES.items() if k in flag_files}
def _read_count_flag():
"""
() -> {str: int}
Read the counter file that counts the times a server location is selected.
Returns location and count.
"""
if os.path.exists(COUNTER_FILE):
with open(COUNTER_FILE, "r") as r:
data = json.load(r)
return data
else:
with open(COUNTER_FILE, "w") as f:
data = {k: 0 for k in get_flag_dict()}
json.dump(data, f)
return data
def _write_count_flag(flag):
"""
(str) ->
Updates the counter for a flag
"""
data = _read_count_flag()
if flag in data:
data[flag] += 1
with open(COUNTER_FILE, "w") as w:
json.dump(data, w)
def flag_clicked(flag):
"""
(str) ->
Start action to count clicked flag.
Send connect command to unix socket that controls the vpn.
"""
_write_count_flag(flag)
send(f"connect {flag}")
def get_sorted_flag_dict():
"""
() -> {str: str}
Sort function to return most clicked flags.
Used by gui to display most used servers at the top.
"""
temp = sorted(get_flag_dict().items(), key=lambda x: -_read_count_flag()[x[0]])
return {k: v for k, v in temp} | 0.469034 | 0.370624 |
import string
import os
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
import pandas as pd
"""
references
browsy scraper.py
https://github.com/beepscore/browsy
websearcher browser_driver.py
https://github.com/beepscore/websearcher
"""
def malaria_url(country_name_first_letter):
"""
return url
"""
base_url = 'https://www.cdc.gov/malaria/travelers/country_table'
url_string = base_url + '/' + country_name_first_letter + '.html'
return url_string
def malaria_filename(country_name_first_letter):
"""
return filename
"""
return './data/' + country_name_first_letter + '.html'
def get_table_html(country_name_first_letter):
"""
Uses browser to request info.
Waits for javascript to run and return html. Selects by css_id.
:param country_name_first_letter: e.g. 'a'
return table html. return empty string if timeout or error
"""
# browser = webdriver.Firefox()
browser = webdriver.Chrome()
url = malaria_url(country_name_first_letter)
browser.get(url)
table_tag = 'table'
try:
# http://stackoverflow.com/questions/37422832/waiting-for-a-page-to-load-in-selenium-firefox-w-python?lq=1
# http://stackoverflow.com/questions/5868439/wait-for-page-load-in-selenium
WebDriverWait(browser, 6).until(lambda d: d.find_element_by_tag_name(table_tag).is_displayed())
element = browser.find_element_by_tag_name(table_tag)
return element.get_attribute('outerHTML')
except TimeoutException:
print("TimeoutException, returning empty string")
return ""
except AttributeError:
# http://stackoverflow.com/questions/9823936/python-how-do-i-know-what-type-of-exception-occured#9824050
print("AttributeError, returning empty string")
return ""
finally:
browser.quit()
def get_tables_write_files():
for letter in string.ascii_lowercase:
text = get_table_html(letter)
out_filename = malaria_filename(letter)
with open(out_filename, 'w') as out_file:
out_file.write(text)
def trim_country(df):
"""
:param df: dataframe with column 'country' containing country name, possibly followed by () and/or ;
:return: mutated dataframe by trimming country name
"""
# delete ( and following, escape (
df['country'] = df['country'].str.replace(r'\(.*', '')
# delete ; and following
df['country'] = df['country'].str.replace(r';.*', '')
# delete , and following. For example change Bahamas, The to Bahamas
df['country'] = df['country'].str.replace(r',.*', '')
df['country'] = df['country'].str.strip()
return df
def categorize_estimated_risk(df):
"""
:param df: dataframe with column 'estimated_risk' containing risk, possibly followed by digit for footnote
original data may contain string 'None', different from python object None
:return: mutated dataframe by trimming estimated_risk and converting from string to category
"""
# delete digit and following. For example in Afghanistan change Moderate2 to Moderate
df['estimated_risk'] = df['estimated_risk'].str.replace(r'\d.*', '')
df['estimated_risk'] = df['estimated_risk'].str.strip()
# make case consistent e.g. original data may contain 'Very Low' and 'Very low'
df['estimated_risk'] = df['estimated_risk'].str.lower()
df['estimated_risk'] = df['estimated_risk'].astype('category')
return df
def get_dataframe(country_name_first_letter):
# read from local data file
filename = malaria_filename(country_name_first_letter)
if os.path.getsize(filename) == 0:
# file is empty
# avoid ValueError: No text parsed from document: ./data/x.html
return pd.DataFrame()
df = pd.read_html(filename)[0]
df.columns = ['country', 'areas_with_malaria', 'estimated_risk', 'drug_resistance', 'malaria_species', 'rec_prophylaxis', 'info']
df = df.drop(columns=['drug_resistance', 'malaria_species', 'rec_prophylaxis', 'info'])
trim_country(df)
categorize_estimated_risk(df)
return df
def df_by_merging_iso_a3(df):
"""
Adds column with iso three letter country abbreviation.
For a given country, the name wording/spelling may vary
but the iso 3 abbreviation is consistent.
:param df: dataframe with column 'country' containing country name
:return: dataframe by merging df_iso_a3 into df
"""
df_iso_a3 = pd.read_csv('./data/iso_a3.csv')
# print(len(df_iso_a3))
# 177
df = pd.merge(df, df_iso_a3, how='left')
df = df.sort_values('iso_a3')
# print(len(df))
# 241
# move country name to index
df = df.set_index('country')
# rename ivory coast
df = df.rename({'Côte dâIvoire': "Côte d'Ivoire"})
# In column iso_a3 for rows missing value, add value.
# https://en.wikipedia.org/wiki/ISO_3166-1
# Fixed most countries with moderate or high risk.
# TODO: Consider fixing more missing values
df.loc['Andorra', 'iso_a3'] = 'AND'
df.loc['Bahrain', 'iso_a3'] = 'BHR'
df.loc['Burma', 'iso_a3'] = 'MMR'
df.loc['Cape Verde', 'iso_a3'] = 'CPV'
df.loc['Central African Republic', 'iso_a3'] = 'CAF'
df.loc["Côte d'Ivoire", 'iso_a3'] = 'CIV'
df.loc['Democratic Republic of the Congo', 'iso_a3'] = 'COD'
df.loc['Dominican Republic', 'iso_a3'] = 'DOM'
df.loc['Equatorial Guinea', 'iso_a3'] = 'GNQ'
df.loc['French Guiana', 'iso_a3'] = 'GUF'
df.loc['Laos', 'iso_a3'] = 'LAO'
df.loc['Solomon Islands', 'iso_a3'] = 'SLB'
df.loc['South Korea', 'iso_a3'] = 'ROK'
df.loc['South Sudan', 'iso_a3'] = 'SSD'
# virgin islands have 2 entries British VGB and United States VIR
# TODO: assign each row correctly
df.loc['Virgin Islands', 'iso_a3'] = 'VGB'
df.reset_index()
return df
def get_dataframe_all_countries():
df_all_letters = pd.DataFrame()
for letter in string.ascii_lowercase:
df_letter = get_dataframe(letter)
df_all_letters = df_all_letters.append(df_letter)
df_all_letters = df_by_merging_iso_a3(df_all_letters)
return df_all_letters
if __name__ == '__main__':
pd.set_option('display.max_columns', 6)
pd.set_option('display.max_rows', 200)
# only need to write files once
# get_tables_write_files()
df = get_dataframe_all_countries()
# print(len(df))
# 241
print(df) | malaria_scraper.py |
import string
import os
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
import pandas as pd
"""
references
browsy scraper.py
https://github.com/beepscore/browsy
websearcher browser_driver.py
https://github.com/beepscore/websearcher
"""
def malaria_url(country_name_first_letter):
"""
return url
"""
base_url = 'https://www.cdc.gov/malaria/travelers/country_table'
url_string = base_url + '/' + country_name_first_letter + '.html'
return url_string
def malaria_filename(country_name_first_letter):
"""
return filename
"""
return './data/' + country_name_first_letter + '.html'
def get_table_html(country_name_first_letter):
"""
Uses browser to request info.
Waits for javascript to run and return html. Selects by css_id.
:param country_name_first_letter: e.g. 'a'
return table html. return empty string if timeout or error
"""
# browser = webdriver.Firefox()
browser = webdriver.Chrome()
url = malaria_url(country_name_first_letter)
browser.get(url)
table_tag = 'table'
try:
# http://stackoverflow.com/questions/37422832/waiting-for-a-page-to-load-in-selenium-firefox-w-python?lq=1
# http://stackoverflow.com/questions/5868439/wait-for-page-load-in-selenium
WebDriverWait(browser, 6).until(lambda d: d.find_element_by_tag_name(table_tag).is_displayed())
element = browser.find_element_by_tag_name(table_tag)
return element.get_attribute('outerHTML')
except TimeoutException:
print("TimeoutException, returning empty string")
return ""
except AttributeError:
# http://stackoverflow.com/questions/9823936/python-how-do-i-know-what-type-of-exception-occured#9824050
print("AttributeError, returning empty string")
return ""
finally:
browser.quit()
def get_tables_write_files():
for letter in string.ascii_lowercase:
text = get_table_html(letter)
out_filename = malaria_filename(letter)
with open(out_filename, 'w') as out_file:
out_file.write(text)
def trim_country(df):
"""
:param df: dataframe with column 'country' containing country name, possibly followed by () and/or ;
:return: mutated dataframe by trimming country name
"""
# delete ( and following, escape (
df['country'] = df['country'].str.replace(r'\(.*', '')
# delete ; and following
df['country'] = df['country'].str.replace(r';.*', '')
# delete , and following. For example change Bahamas, The to Bahamas
df['country'] = df['country'].str.replace(r',.*', '')
df['country'] = df['country'].str.strip()
return df
def categorize_estimated_risk(df):
"""
:param df: dataframe with column 'estimated_risk' containing risk, possibly followed by digit for footnote
original data may contain string 'None', different from python object None
:return: mutated dataframe by trimming estimated_risk and converting from string to category
"""
# delete digit and following. For example in Afghanistan change Moderate2 to Moderate
df['estimated_risk'] = df['estimated_risk'].str.replace(r'\d.*', '')
df['estimated_risk'] = df['estimated_risk'].str.strip()
# make case consistent e.g. original data may contain 'Very Low' and 'Very low'
df['estimated_risk'] = df['estimated_risk'].str.lower()
df['estimated_risk'] = df['estimated_risk'].astype('category')
return df
def get_dataframe(country_name_first_letter):
# read from local data file
filename = malaria_filename(country_name_first_letter)
if os.path.getsize(filename) == 0:
# file is empty
# avoid ValueError: No text parsed from document: ./data/x.html
return pd.DataFrame()
df = pd.read_html(filename)[0]
df.columns = ['country', 'areas_with_malaria', 'estimated_risk', 'drug_resistance', 'malaria_species', 'rec_prophylaxis', 'info']
df = df.drop(columns=['drug_resistance', 'malaria_species', 'rec_prophylaxis', 'info'])
trim_country(df)
categorize_estimated_risk(df)
return df
def df_by_merging_iso_a3(df):
"""
Adds column with iso three letter country abbreviation.
For a given country, the name wording/spelling may vary
but the iso 3 abbreviation is consistent.
:param df: dataframe with column 'country' containing country name
:return: dataframe by merging df_iso_a3 into df
"""
df_iso_a3 = pd.read_csv('./data/iso_a3.csv')
# print(len(df_iso_a3))
# 177
df = pd.merge(df, df_iso_a3, how='left')
df = df.sort_values('iso_a3')
# print(len(df))
# 241
# move country name to index
df = df.set_index('country')
# rename ivory coast
df = df.rename({'Côte dâIvoire': "Côte d'Ivoire"})
# In column iso_a3 for rows missing value, add value.
# https://en.wikipedia.org/wiki/ISO_3166-1
# Fixed most countries with moderate or high risk.
# TODO: Consider fixing more missing values
df.loc['Andorra', 'iso_a3'] = 'AND'
df.loc['Bahrain', 'iso_a3'] = 'BHR'
df.loc['Burma', 'iso_a3'] = 'MMR'
df.loc['Cape Verde', 'iso_a3'] = 'CPV'
df.loc['Central African Republic', 'iso_a3'] = 'CAF'
df.loc["Côte d'Ivoire", 'iso_a3'] = 'CIV'
df.loc['Democratic Republic of the Congo', 'iso_a3'] = 'COD'
df.loc['Dominican Republic', 'iso_a3'] = 'DOM'
df.loc['Equatorial Guinea', 'iso_a3'] = 'GNQ'
df.loc['French Guiana', 'iso_a3'] = 'GUF'
df.loc['Laos', 'iso_a3'] = 'LAO'
df.loc['Solomon Islands', 'iso_a3'] = 'SLB'
df.loc['South Korea', 'iso_a3'] = 'ROK'
df.loc['South Sudan', 'iso_a3'] = 'SSD'
# virgin islands have 2 entries British VGB and United States VIR
# TODO: assign each row correctly
df.loc['Virgin Islands', 'iso_a3'] = 'VGB'
df.reset_index()
return df
def get_dataframe_all_countries():
df_all_letters = pd.DataFrame()
for letter in string.ascii_lowercase:
df_letter = get_dataframe(letter)
df_all_letters = df_all_letters.append(df_letter)
df_all_letters = df_by_merging_iso_a3(df_all_letters)
return df_all_letters
if __name__ == '__main__':
pd.set_option('display.max_columns', 6)
pd.set_option('display.max_rows', 200)
# only need to write files once
# get_tables_write_files()
df = get_dataframe_all_countries()
# print(len(df))
# 241
print(df) | 0.383988 | 0.210807 |
#pylint: disable=missing-docstring,invalid-name,bare-except,wrong-import-position,import-error
import sys
from os.path import dirname, abspath
from datetime import datetime
import unittest
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from linga import app, db, User
from linga.auth import user_query
class TestUser(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tmp.db'
db.init_app(app)
db.create_all()
def tearDown(self):
db.drop_all()
def test_should_accept_email_on_create(self):
u = User('<EMAIL>')
self.assertEquals(u.email, '<EMAIL>')
def test_should_hash_password_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
self.assertGreaterEqual(len(u.password), len('<PASSWORD>'))
self.assertNotEquals(u.password, '<PASSWORD>')
def test_should_validate_password_set_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
self.assertTrue(u.check_password('<PASSWORD>'))
def test_should_set_created_date_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
cur_time = datetime.now()
self.assertIsNotNone(u.created)
self.assertGreaterEqual(cur_time, u.created)
def test_should_persist_new_user(self):
u = User('<EMAIL>', '<PASSWORD>')
db.session.add(u)
db.session.commit()
users = user_query().filter_by(email='<EMAIL>').all()
self.assertEqual(len(users), 1)
self.assertEqual(users[0].email, '<EMAIL>')
self.assertTrue(users[0].check_password('<PASSWORD>'))
def test_should_persist_changes(self):
u = User('<EMAIL>', '<PASSWORD>')
db.session.add(u)
db.session.commit()
u.email = '<EMAIL>'
db.session.add(u)
db.session.commit()
users = user_query().all()
self.assertEquals(len(users), 1)
self.assertEquals(users[0].email, '<EMAIL>') | tests/python/test_auth.py |
#pylint: disable=missing-docstring,invalid-name,bare-except,wrong-import-position,import-error
import sys
from os.path import dirname, abspath
from datetime import datetime
import unittest
sys.path.insert(0, dirname(dirname(dirname(abspath(__file__)))))
from linga import app, db, User
from linga.auth import user_query
class TestUser(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tmp.db'
db.init_app(app)
db.create_all()
def tearDown(self):
db.drop_all()
def test_should_accept_email_on_create(self):
u = User('<EMAIL>')
self.assertEquals(u.email, '<EMAIL>')
def test_should_hash_password_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
self.assertGreaterEqual(len(u.password), len('<PASSWORD>'))
self.assertNotEquals(u.password, '<PASSWORD>')
def test_should_validate_password_set_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
self.assertTrue(u.check_password('<PASSWORD>'))
def test_should_set_created_date_on_create(self):
u = User('<EMAIL>', '<PASSWORD>')
cur_time = datetime.now()
self.assertIsNotNone(u.created)
self.assertGreaterEqual(cur_time, u.created)
def test_should_persist_new_user(self):
u = User('<EMAIL>', '<PASSWORD>')
db.session.add(u)
db.session.commit()
users = user_query().filter_by(email='<EMAIL>').all()
self.assertEqual(len(users), 1)
self.assertEqual(users[0].email, '<EMAIL>')
self.assertTrue(users[0].check_password('<PASSWORD>'))
def test_should_persist_changes(self):
u = User('<EMAIL>', '<PASSWORD>')
db.session.add(u)
db.session.commit()
u.email = '<EMAIL>'
db.session.add(u)
db.session.commit()
users = user_query().all()
self.assertEquals(len(users), 1)
self.assertEquals(users[0].email, '<EMAIL>') | 0.180648 | 0.140454 |
#문자열
msg = 'Hello, World!!'
#파이썬에서는 자료구조를 의미하는 접미사를
#변수명에 사용하기도 한다
list1_list=[] #빈 리스트
list2_list=[1,2,3,4,5] #숫자
list3_list=['a','b','c'] #문자
list4_list=['a','b','c',1,2,3,True] #혼합
print(list1_list)
#간단한 연산
#요소 존재 여부 파악 : in/not in 연산자
print(1 in list1_list)
print('a' in list1_list)
print(3 in list2_list)
#길이 연산 : len()
print(len(list1_list))
print(len(list2_list))
#연결 연산 : +
print(list2_list + list3_list)
#반복 연산 : *
print(list2_list * 2)
#요소의 특정값 참조 : index 사용
print(msg[5],msg[7])
print(list2_list[2])
print(list3_list[2])
print(list4_list[5])
#요소값 변경 : index, = 사용
list2_list[2] = -3
print(list2_list)
#주민 코드에서 성별 여부 판별
jumin=[1,2,3,4,5,6,1,2,3,4,5,6,7]
if jumin[6] == 1 :
print('남자입니다')
else :
print('여자입니다')
#주민코드에서 생년월일 추출
for i in range(0,6):
print(jumin[i], end=' ') # end =' ' 줄바꿈없이 출력시 종결문자를 지정
#특정범위내 요소들을 추출할때는 슬라이스를 사용 [i:j]
print(jumin[0:6]) # 생년월일
print(jumin[:6]) # 시작 위치부터
print(jumin[6:]) # 6번째 인덱스에서 부터 나머지
print(jumin[:]) # 모두출력
print(jumin[0:6:2]) # 홀수자리만 추출
print(jumin[::-1]) # 역순 출력
#print(jumin[100]) # 인덱스 초과 - 오류
#print(jumin[0:100:2]) # 인덱스 초과 - 오류
#리스트 관련 통계 함수
print(sum(list2_list)) # 합
print(min(list2_list)) # 최소
print(max(list2_list)) # 최대
#리스트가 주어지면
# 이것의 가운데에 있는 요소 값을 출력
#list = [1,2,3,4,5]
list = [1,2,6,8,4,10]
size = len(list)
mid=int(size / 2)
print('가운데 값:',list[mid]) #요소 수가 홀수
print('가운데 값:',list[mid-1:mid+1]) #요소 수가 짝수
def listcenter(list):
size = len(list)
mid = int(size/2)
if size %2 == 0 : #짝수인경우
print(list[mid-1:mid+1])
else:
print(list[mid])
listcenter([1,2,3])
listcenter([1,2,3,4])
# 리스트 조작 함수
# 요소 추가 : append
list = [1,2,3,4,5]
list.append(9)
list.append(8)
print(list)
# 요소 추가 : insert(위치, 값)
list.insert(6, 7)
print(list)
# 요소 제거 : remove(값), 왼쪽부터 검색 후 삭제
list.remove(9)
print(list)
# 요소 제거 : pop(), pop(위치)
list.pop(5)
print(list)
list.pop() # 마지막 요소 제거
print(list)
# 모두 제거 : clear()
list.clear()
print(list) | Python3Lab/List.py |
#문자열
msg = 'Hello, World!!'
#파이썬에서는 자료구조를 의미하는 접미사를
#변수명에 사용하기도 한다
list1_list=[] #빈 리스트
list2_list=[1,2,3,4,5] #숫자
list3_list=['a','b','c'] #문자
list4_list=['a','b','c',1,2,3,True] #혼합
print(list1_list)
#간단한 연산
#요소 존재 여부 파악 : in/not in 연산자
print(1 in list1_list)
print('a' in list1_list)
print(3 in list2_list)
#길이 연산 : len()
print(len(list1_list))
print(len(list2_list))
#연결 연산 : +
print(list2_list + list3_list)
#반복 연산 : *
print(list2_list * 2)
#요소의 특정값 참조 : index 사용
print(msg[5],msg[7])
print(list2_list[2])
print(list3_list[2])
print(list4_list[5])
#요소값 변경 : index, = 사용
list2_list[2] = -3
print(list2_list)
#주민 코드에서 성별 여부 판별
jumin=[1,2,3,4,5,6,1,2,3,4,5,6,7]
if jumin[6] == 1 :
print('남자입니다')
else :
print('여자입니다')
#주민코드에서 생년월일 추출
for i in range(0,6):
print(jumin[i], end=' ') # end =' ' 줄바꿈없이 출력시 종결문자를 지정
#특정범위내 요소들을 추출할때는 슬라이스를 사용 [i:j]
print(jumin[0:6]) # 생년월일
print(jumin[:6]) # 시작 위치부터
print(jumin[6:]) # 6번째 인덱스에서 부터 나머지
print(jumin[:]) # 모두출력
print(jumin[0:6:2]) # 홀수자리만 추출
print(jumin[::-1]) # 역순 출력
#print(jumin[100]) # 인덱스 초과 - 오류
#print(jumin[0:100:2]) # 인덱스 초과 - 오류
#리스트 관련 통계 함수
print(sum(list2_list)) # 합
print(min(list2_list)) # 최소
print(max(list2_list)) # 최대
#리스트가 주어지면
# 이것의 가운데에 있는 요소 값을 출력
#list = [1,2,3,4,5]
list = [1,2,6,8,4,10]
size = len(list)
mid=int(size / 2)
print('가운데 값:',list[mid]) #요소 수가 홀수
print('가운데 값:',list[mid-1:mid+1]) #요소 수가 짝수
def listcenter(list):
size = len(list)
mid = int(size/2)
if size %2 == 0 : #짝수인경우
print(list[mid-1:mid+1])
else:
print(list[mid])
listcenter([1,2,3])
listcenter([1,2,3,4])
# 리스트 조작 함수
# 요소 추가 : append
list = [1,2,3,4,5]
list.append(9)
list.append(8)
print(list)
# 요소 추가 : insert(위치, 값)
list.insert(6, 7)
print(list)
# 요소 제거 : remove(값), 왼쪽부터 검색 후 삭제
list.remove(9)
print(list)
# 요소 제거 : pop(), pop(위치)
list.pop(5)
print(list)
list.pop() # 마지막 요소 제거
print(list)
# 모두 제거 : clear()
list.clear()
print(list) | 0.064403 | 0.331674 |
from __future__ import annotations
import math
from prettyqt import constants, core, custom_models
def size_to_string(size):
if size <= 0:
return "0 b"
decimals = 2
units = ["b", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
power = int(math.log(size, 1024))
try:
unit = units[power]
except IndexError:
unit = units[-1]
power = len(units) - 1
if power == 0:
decimals = 0
normsize = size / math.pow(1024, power)
#: this should expand to "1.23 GB"
return "%0.*f %s" % (decimals, normsize, unit)
class StorageInfoModel(core.AbstractTableModel):
(
ColumnRootPath,
ColumnName,
ColumnDevice,
ColumnFileSystemName,
ColumnTotal,
ColumnFree,
ColumnAvailable,
ColumnIsReady,
ColumnIsReadOnly,
ColumnIsValid,
ColumnCount,
) = range(11)
columnFuncMap = {
ColumnRootPath: lambda volume: str(volume.get_root_path()),
ColumnName: lambda volume: volume.name(),
ColumnDevice: lambda volume: volume.get_device(),
ColumnFileSystemName: lambda volume: volume.get_file_system_type(),
ColumnTotal: lambda volume: size_to_string(volume.bytesTotal()),
ColumnFree: lambda volume: size_to_string(volume.bytesFree()),
ColumnAvailable: lambda volume: size_to_string(volume.bytesAvailable()),
ColumnIsReady: lambda volume: volume.isReady(),
ColumnIsReadOnly: lambda volume: volume.isReadOnly(),
ColumnIsValid: lambda volume: volume.isValid(),
}
columnNameMap = {
ColumnRootPath: "Root path",
ColumnName: "Volume Name",
ColumnDevice: "Device",
ColumnFileSystemName: "File system",
ColumnTotal: "Total",
ColumnFree: "Free",
ColumnAvailable: "Available",
ColumnIsReady: "Ready",
ColumnIsReadOnly: "Read-only",
ColumnIsValid: "Valid",
}
def __init__(self, parent=None):
super().__init__(parent)
self.volumes = core.StorageInfo.get_mounted_volumes()
def columnCount(self, parent=None):
return self.ColumnCount
def rowCount(self, parent):
if parent.isValid():
return 0
return len(self.volumes)
def data(self, index, role):
if not index.isValid():
return None
if role == constants.DISPLAY_ROLE:
volume = self.volumes[index.row()]
func = self.columnFuncMap.get(index.column())
if func is not None:
return func(volume)
elif role == constants.TOOLTIP_ROLE:
volume = self.volumes[index.row()]
tooltip = []
for column in range(self.ColumnCount):
label = self.columnNameMap.get(column)
value = self.columnFuncMap[column](volume)
tooltip.append(f"{label}: {value}")
return "\n".join(tooltip)
def headerData(self, section, orientation, role):
if orientation != constants.HORIZONTAL:
return None
if role != constants.DISPLAY_ROLE:
return None
return self.columnNameMap.get(section)
column_root_path = custom_models.ColumnItem(
name="Root path",
doc="Root path",
label=lambda volume: str(volume.get_root_path()),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_device = custom_models.ColumnItem(
name="Device",
doc="Device",
label=lambda volume: volume.get_device(),
)
column_file_system_name = custom_models.ColumnItem(
name="File system",
doc="File system",
label=lambda volume: volume.get_file_system_type(),
)
column_total = custom_models.ColumnItem(
name="Total",
doc="Total",
label=lambda volume: size_to_string(volume.bytesTotal()),
)
column_free = custom_models.ColumnItem(
name="Free",
doc="Free",
label=lambda volume: size_to_string(volume.bytesFree()),
)
column_available = custom_models.ColumnItem(
name="Available",
doc="Available",
label=lambda volume: size_to_string(volume.bytesAvailable()),
)
column_ready = custom_models.ColumnItem(
name="Available",
doc="Available",
label=None,
checkstate=lambda volume: volume.isReady(),
)
column_readonly = custom_models.ColumnItem(
name="Read-only",
doc="Read-only",
label=None,
checkstate=lambda volume: volume.isReadOnly(),
)
if __name__ == "__main__":
import sys
from prettyqt import widgets
app = widgets.Application(sys.argv)
view = widgets.TreeView()
view.setModel(StorageInfoModel(view))
view.resize(640, 480)
view.set_selection_behaviour("rows")
for column in range(view.model().columnCount()):
view.resizeColumnToContents(column)
view.show()
app.main_loop() | prettyqt/custom_models/storageinfomodel.py | from __future__ import annotations
import math
from prettyqt import constants, core, custom_models
def size_to_string(size):
if size <= 0:
return "0 b"
decimals = 2
units = ["b", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]
power = int(math.log(size, 1024))
try:
unit = units[power]
except IndexError:
unit = units[-1]
power = len(units) - 1
if power == 0:
decimals = 0
normsize = size / math.pow(1024, power)
#: this should expand to "1.23 GB"
return "%0.*f %s" % (decimals, normsize, unit)
class StorageInfoModel(core.AbstractTableModel):
(
ColumnRootPath,
ColumnName,
ColumnDevice,
ColumnFileSystemName,
ColumnTotal,
ColumnFree,
ColumnAvailable,
ColumnIsReady,
ColumnIsReadOnly,
ColumnIsValid,
ColumnCount,
) = range(11)
columnFuncMap = {
ColumnRootPath: lambda volume: str(volume.get_root_path()),
ColumnName: lambda volume: volume.name(),
ColumnDevice: lambda volume: volume.get_device(),
ColumnFileSystemName: lambda volume: volume.get_file_system_type(),
ColumnTotal: lambda volume: size_to_string(volume.bytesTotal()),
ColumnFree: lambda volume: size_to_string(volume.bytesFree()),
ColumnAvailable: lambda volume: size_to_string(volume.bytesAvailable()),
ColumnIsReady: lambda volume: volume.isReady(),
ColumnIsReadOnly: lambda volume: volume.isReadOnly(),
ColumnIsValid: lambda volume: volume.isValid(),
}
columnNameMap = {
ColumnRootPath: "Root path",
ColumnName: "Volume Name",
ColumnDevice: "Device",
ColumnFileSystemName: "File system",
ColumnTotal: "Total",
ColumnFree: "Free",
ColumnAvailable: "Available",
ColumnIsReady: "Ready",
ColumnIsReadOnly: "Read-only",
ColumnIsValid: "Valid",
}
def __init__(self, parent=None):
super().__init__(parent)
self.volumes = core.StorageInfo.get_mounted_volumes()
def columnCount(self, parent=None):
return self.ColumnCount
def rowCount(self, parent):
if parent.isValid():
return 0
return len(self.volumes)
def data(self, index, role):
if not index.isValid():
return None
if role == constants.DISPLAY_ROLE:
volume = self.volumes[index.row()]
func = self.columnFuncMap.get(index.column())
if func is not None:
return func(volume)
elif role == constants.TOOLTIP_ROLE:
volume = self.volumes[index.row()]
tooltip = []
for column in range(self.ColumnCount):
label = self.columnNameMap.get(column)
value = self.columnFuncMap[column](volume)
tooltip.append(f"{label}: {value}")
return "\n".join(tooltip)
def headerData(self, section, orientation, role):
if orientation != constants.HORIZONTAL:
return None
if role != constants.DISPLAY_ROLE:
return None
return self.columnNameMap.get(section)
column_root_path = custom_models.ColumnItem(
name="Root path",
doc="Root path",
label=lambda volume: str(volume.get_root_path()),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_volume_name = custom_models.ColumnItem(
name="Volume name",
doc="Volume name",
label=lambda volume: volume.name(),
)
column_device = custom_models.ColumnItem(
name="Device",
doc="Device",
label=lambda volume: volume.get_device(),
)
column_file_system_name = custom_models.ColumnItem(
name="File system",
doc="File system",
label=lambda volume: volume.get_file_system_type(),
)
column_total = custom_models.ColumnItem(
name="Total",
doc="Total",
label=lambda volume: size_to_string(volume.bytesTotal()),
)
column_free = custom_models.ColumnItem(
name="Free",
doc="Free",
label=lambda volume: size_to_string(volume.bytesFree()),
)
column_available = custom_models.ColumnItem(
name="Available",
doc="Available",
label=lambda volume: size_to_string(volume.bytesAvailable()),
)
column_ready = custom_models.ColumnItem(
name="Available",
doc="Available",
label=None,
checkstate=lambda volume: volume.isReady(),
)
column_readonly = custom_models.ColumnItem(
name="Read-only",
doc="Read-only",
label=None,
checkstate=lambda volume: volume.isReadOnly(),
)
if __name__ == "__main__":
import sys
from prettyqt import widgets
app = widgets.Application(sys.argv)
view = widgets.TreeView()
view.setModel(StorageInfoModel(view))
view.resize(640, 480)
view.set_selection_behaviour("rows")
for column in range(view.model().columnCount()):
view.resizeColumnToContents(column)
view.show()
app.main_loop() | 0.626353 | 0.394376 |
import click
import re
import sys
class Tester:
def __init__(self):
self.errors = []
def __str__(self):
str = ""
if self.errors:
str += f"{self.title} Errors\n"
for item in self.errors:
str += f"\t{item}\n"
return str
def __bool__(self):
return bool(self.errors)
def truncate_line(self, line):
trail = "..." if len(line) > 40 else ""
self.trunc = f"{line[:40]}{trail}"
def add_error(self, index, msg, origLine=None):
line = ""
if origLine:
trail = "..." if len(origLine) > 40 else ""
line = f": {origLine[:40]}{trail}"
self.errors.append(f"{index:5}: {msg}{line}")
class LineTester(Tester):
def __init__(self):
super().__init__()
self.title = "Base Class Only"
self.in_code_block = False
def test_lines(self, lines):
"""Keeps a state machine of whether or not we're in a code block as
some tests only want to look outside code blocks."""
self.lines = [line.strip() for line in lines]
for index, line in enumerate(lines, start=1):
self.truncate_line(line)
if line.startswith("```"):
self.in_code_block = not self.in_code_block
self.test_line(index, line)
class WordTester(Tester):
def __init__(self):
super().__init__()
self.title = "Base Class Only"
def test_lines(self, lines):
for index, line in enumerate(lines, start=1):
self.truncate_line(line)
line = line.strip()
for word in self.extract(line):
self.test_word(index, word, line)
def extract(self, text):
word_regex = re.compile(
r"([\w\-'’`]+)([.,?!-:;><@#$%^&*()_+=/\]\[])?"
) # noqa W605
previous = None
final_word = None
for match in word_regex.finditer(text):
try:
word = match.group(1)
if not word:
raise Exception("No word matches found. Bad regex?")
if previous:
yield previous
yield previous + " " + word
if match.group(2): # hit punctuation, yield word by itself
yield word
previous = None
else:
previous = word
final_word = previous
except IndexError:
word = match.group(0)
yield word
if final_word:
yield final_word
class TestLineLen(LineTester):
def __init__(self, limit):
super().__init__()
self.title = "Line Length"
# JHA TODO get these in command line args with defaults
self.error_len = limit
def test_line(self, index, line):
url_pat = r"""
\[ # literal opening square bracket
([\w\s]*) # the shown text from the line
\] # literal closing square bracket
\s* # optional whitespace (is this needed?)
\( # literal opening paren
([^\)]*) # group the url
\) # literal closing paren
"""
line = re.sub(url_pat, r"\g<1>", line, flags=re.VERBOSE)
if len(line) > self.error_len:
self.add_error(index, f"Line length: {len(line)}")
class TestBadWords(WordTester):
def __init__(self, use_extra_words):
super().__init__()
self.title = "Bad Word"
self.bad_words = [
"aka",
"etc",
"OK",
"very",
"actually",
"article",
]
if use_extra_words:
self.bad_words.extend(
[
"JHA",
"TODO",
"easy",
"simple",
"obvious",
"trivial",
"complex",
"difficult",
"unsurprising",
]
)
def test_word(self, index, word, line):
if word in self.bad_words:
self.add_error(index, f"Found '{word}' in line")
class TestPhrases(LineTester):
def __init__(self):
super().__init__()
self.title = "Bad Phrase"
self.bad_words = [
"exact same",
"built in",
"those of you",
"some of you",
"as you can imagine",
"our tutorial",
]
self.error_format = "Found '%s' in line"
def test_line(self, index, line):
for word in self.bad_words:
if word in line:
# self.add_error(index, f"Found '{word}' in line")
self.add_error(index, self.error_format % word)
class TestContractions(TestPhrases):
def __init__(self):
super().__init__()
self.error_format = "Found '%s' (should be a contraction) in line"
self.title = "Contraction"
self.bad_words = [
"has not",
"it is",
"that is",
"they are",
"they will",
"you will",
"you are",
]
class TestCodeFormatter(LineTester):
def __init__(self):
super().__init__()
self.title = "Code Formatter"
def test_line(self, index, line):
"""Tracks that all code blocks have formatters. Keeps a state machine
of whether or not we're in a code block as we only want to look for
formatters on starting lines."""
lline = line.lower()
if line.startswith("```") and self.in_code_block:
if len(line) == 3:
self.add_error(index, "Code block has no formatter")
if "c++" in line.lower():
self.add_error(
index,
"Code block has bad formatter (c++ " "instead of cpp)",
)
if "linenums=" in lline and 'linenums="' not in lline:
self.add_error(index, "Poorly formed linenums spec")
class TestLeadingColon(LineTester):
def __init__(self):
super().__init__()
self.title = "Colon"
self.in_code_block = False
def test_line(self, index, line):
"""ensures that line before a code block is blank and two lines before
ends with a colon."""
if line.startswith("```") and self.in_code_block:
"""Because we're using a 1-based index, the actual indices into
the self.lines array are offset by one."""
blank = self.lines[index - 2]
text = self.lines[index - 3]
# sanity check to avoid issues
if index < 3:
self.add_error(index, "code block starts before text!")
# previous line (n-2) must be blank
elif len(blank) > 0:
self.add_error(index, "line preceding code block must be blank")
# line before that (n-3) must have text ending in colon
elif len(text) == 0:
self.add_error(index, "two blank lines before code block")
elif text[-1] != ":":
self.add_error(
index,
"final text preceding code block must end in colon",
)
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option("-l", "--line-length", default=500)
@click.option("-j", "--jima", is_flag=True, help="use extra bad word list")
@click.argument("filename", type=str)
def rplint(line_length, jima, filename):
testers = [
TestLineLen(line_length),
TestBadWords(jima),
TestPhrases(),
TestContractions(),
TestCodeFormatter(),
TestLeadingColon(),
]
# testers = [TestLeadingColon(), ]
with open(filename) as infile:
lines = infile.readlines()
for tester in testers:
tester.test_lines(lines)
if tester:
print(tester)
if __name__ == "__main__":
rplint(sys.argv[1]) | rplint/__main__.py | import click
import re
import sys
class Tester:
def __init__(self):
self.errors = []
def __str__(self):
str = ""
if self.errors:
str += f"{self.title} Errors\n"
for item in self.errors:
str += f"\t{item}\n"
return str
def __bool__(self):
return bool(self.errors)
def truncate_line(self, line):
trail = "..." if len(line) > 40 else ""
self.trunc = f"{line[:40]}{trail}"
def add_error(self, index, msg, origLine=None):
line = ""
if origLine:
trail = "..." if len(origLine) > 40 else ""
line = f": {origLine[:40]}{trail}"
self.errors.append(f"{index:5}: {msg}{line}")
class LineTester(Tester):
def __init__(self):
super().__init__()
self.title = "Base Class Only"
self.in_code_block = False
def test_lines(self, lines):
"""Keeps a state machine of whether or not we're in a code block as
some tests only want to look outside code blocks."""
self.lines = [line.strip() for line in lines]
for index, line in enumerate(lines, start=1):
self.truncate_line(line)
if line.startswith("```"):
self.in_code_block = not self.in_code_block
self.test_line(index, line)
class WordTester(Tester):
def __init__(self):
super().__init__()
self.title = "Base Class Only"
def test_lines(self, lines):
for index, line in enumerate(lines, start=1):
self.truncate_line(line)
line = line.strip()
for word in self.extract(line):
self.test_word(index, word, line)
def extract(self, text):
word_regex = re.compile(
r"([\w\-'’`]+)([.,?!-:;><@#$%^&*()_+=/\]\[])?"
) # noqa W605
previous = None
final_word = None
for match in word_regex.finditer(text):
try:
word = match.group(1)
if not word:
raise Exception("No word matches found. Bad regex?")
if previous:
yield previous
yield previous + " " + word
if match.group(2): # hit punctuation, yield word by itself
yield word
previous = None
else:
previous = word
final_word = previous
except IndexError:
word = match.group(0)
yield word
if final_word:
yield final_word
class TestLineLen(LineTester):
def __init__(self, limit):
super().__init__()
self.title = "Line Length"
# JHA TODO get these in command line args with defaults
self.error_len = limit
def test_line(self, index, line):
url_pat = r"""
\[ # literal opening square bracket
([\w\s]*) # the shown text from the line
\] # literal closing square bracket
\s* # optional whitespace (is this needed?)
\( # literal opening paren
([^\)]*) # group the url
\) # literal closing paren
"""
line = re.sub(url_pat, r"\g<1>", line, flags=re.VERBOSE)
if len(line) > self.error_len:
self.add_error(index, f"Line length: {len(line)}")
class TestBadWords(WordTester):
def __init__(self, use_extra_words):
super().__init__()
self.title = "Bad Word"
self.bad_words = [
"aka",
"etc",
"OK",
"very",
"actually",
"article",
]
if use_extra_words:
self.bad_words.extend(
[
"JHA",
"TODO",
"easy",
"simple",
"obvious",
"trivial",
"complex",
"difficult",
"unsurprising",
]
)
def test_word(self, index, word, line):
if word in self.bad_words:
self.add_error(index, f"Found '{word}' in line")
class TestPhrases(LineTester):
def __init__(self):
super().__init__()
self.title = "Bad Phrase"
self.bad_words = [
"exact same",
"built in",
"those of you",
"some of you",
"as you can imagine",
"our tutorial",
]
self.error_format = "Found '%s' in line"
def test_line(self, index, line):
for word in self.bad_words:
if word in line:
# self.add_error(index, f"Found '{word}' in line")
self.add_error(index, self.error_format % word)
class TestContractions(TestPhrases):
def __init__(self):
super().__init__()
self.error_format = "Found '%s' (should be a contraction) in line"
self.title = "Contraction"
self.bad_words = [
"has not",
"it is",
"that is",
"they are",
"they will",
"you will",
"you are",
]
class TestCodeFormatter(LineTester):
def __init__(self):
super().__init__()
self.title = "Code Formatter"
def test_line(self, index, line):
"""Tracks that all code blocks have formatters. Keeps a state machine
of whether or not we're in a code block as we only want to look for
formatters on starting lines."""
lline = line.lower()
if line.startswith("```") and self.in_code_block:
if len(line) == 3:
self.add_error(index, "Code block has no formatter")
if "c++" in line.lower():
self.add_error(
index,
"Code block has bad formatter (c++ " "instead of cpp)",
)
if "linenums=" in lline and 'linenums="' not in lline:
self.add_error(index, "Poorly formed linenums spec")
class TestLeadingColon(LineTester):
def __init__(self):
super().__init__()
self.title = "Colon"
self.in_code_block = False
def test_line(self, index, line):
"""ensures that line before a code block is blank and two lines before
ends with a colon."""
if line.startswith("```") and self.in_code_block:
"""Because we're using a 1-based index, the actual indices into
the self.lines array are offset by one."""
blank = self.lines[index - 2]
text = self.lines[index - 3]
# sanity check to avoid issues
if index < 3:
self.add_error(index, "code block starts before text!")
# previous line (n-2) must be blank
elif len(blank) > 0:
self.add_error(index, "line preceding code block must be blank")
# line before that (n-3) must have text ending in colon
elif len(text) == 0:
self.add_error(index, "two blank lines before code block")
elif text[-1] != ":":
self.add_error(
index,
"final text preceding code block must end in colon",
)
@click.command(context_settings=dict(help_option_names=["-h", "--help"]))
@click.option("-l", "--line-length", default=500)
@click.option("-j", "--jima", is_flag=True, help="use extra bad word list")
@click.argument("filename", type=str)
def rplint(line_length, jima, filename):
testers = [
TestLineLen(line_length),
TestBadWords(jima),
TestPhrases(),
TestContractions(),
TestCodeFormatter(),
TestLeadingColon(),
]
# testers = [TestLeadingColon(), ]
with open(filename) as infile:
lines = infile.readlines()
for tester in testers:
tester.test_lines(lines)
if tester:
print(tester)
if __name__ == "__main__":
rplint(sys.argv[1]) | 0.400632 | 0.431644 |
import os, subprocess, glob, time, shutil
import argparse, uuid, json, csv
import logging
from logging.handlers import RotatingFileHandler
import Selenzy
from flask import Flask, flash, render_template, request, redirect, url_for, send_from_directory, jsonify
from flask_restful import Resource, Api
from flask import session
from werkzeug import secure_filename
import pandas as pd
import numpy as np
global session
app = Flask(__name__)
api = Api(app)
app.config['SECRET_KEY'] = str(uuid.uuid4())
app.config['MARVIN'] = False
app.config['KEEPDAYS'] = 10
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['txt', 'rxn', 'smi', 'smarts', 'smirks', 'csv', 'fasta', 'fas', 'fa'])
def arguments():
parser = argparse.ArgumentParser(description='Options for the webserver')
parser.add_argument('-uploaddir', default='uploads',
help='Upload folder')
parser.add_argument('-datadir', default='data',
help='Data directory for required databases files')
parser.add_argument('-logdir', default='log',
help='Logging folder')
parser.add_argument('-d', action='store_true',
help='Run in debug mode (no preload)')
arg = parser.parse_args()
return arg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
def file_path(uniqueid, filename):
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
uniquename = os.path.join(uniquefolder, filename)
return uniquename
def save_rxn(rxninfo):
global session
filename = secure_filename(rxninfo.filename)
try:
uniquename = file_path(session['uniqueid'], filename)
except:
init_session()
uniquename = file_path(session['uniqueid'], filename)
rxninfo.save(uniquename)
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeRxn(uniquename, outname)
session['rxninfo'] = rxninfo
return rxninfo
def init_session():
global session
maintenance(app.config['KEEPDAYS'])
reset_session()
uniqueid = session['uniqueid']
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
if not os.path.exists(uniquefolder):
os.mkdir(uniquefolder)
session['uniquefolder'] = uniquefolder
session['rxnifo'] = None
session['status'] = False
session['username'] = session['uniqueid']
# Restart the Score for each new session
session['SCORE'] = Selenzy.seqScore()
def reset_session():
global session
uniqueid = str(uuid.uuid4())
app.logger.info( 'New session: %s' % (uniqueid,) )
session['uniqueid'] = uniqueid
def run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA):
global session
uniqueid = session['uniqueid']
uniquefolder = session['uniquefolder']
csvfile = "selenzy_results.csv"
app.logger.info( 'Run session: %s' % (uniqueid,) )
success, app.config['TABLES'] = Selenzy.analyse(['-'+rxntype, rxninfo],
targets,
app.config['DATA_FOLDER'],
uniquefolder,
csvfile,
pdir = int(direction),
host = host,
fp = fp,
NoMSA = noMSA,
pc = app.config['TABLES']
) # this creates CSV file in Uploads directory
if success:
data = Selenzy.updateScore(file_path(uniqueid, csvfile), session['SCORE'])
return data, csvfile, uniqueid
def retrieve_session(csvinfo):
global session
uniqueid = session['uniqueid']
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
if not os.path.exists(uniquefolder):
os.mkdir(uniquefolder)
filename = secure_filename(csvinfo.filename)
uniquename = file_path(uniqueid, filename)
csvinfo.save(uniquename)
data = pd.read_csv(uniquename)
data.index = data.index + 1
csvfile = os.path.basename(uniquename)
data.rename_axis('Select', axis="columns")
return data, csvfile, uniqueid
def maintenance(expDay=10):
secs = expDay*24*60*60
for folder in glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], '*')):
name = os.path.basename(folder)
if name.startswith('debug'):
continue
modiftime = os.path.getmtime(folder)
lapse = time.time() - modiftime
if lapse > secs:
# Double check that this an upload folder containing reactions
if len( glob.glob( os.path.join(folder, '*.rxn') ) ) > 0:
try:
for x in glob.glob(os.path.join(folder, '*')):
os.unlink(x)
except:
pass
try:
os.rmdir(folder)
app.logger.info( 'Clean up: %s' % (folder,) )
except:
pass
class RestGate(Resource):
""" REST interface, returns api info """
def get(self):
return {'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem'}
class RestQuery(Resource):
""" REST interface to Selenzy, by default it does not run the MSA to be faster.
We init an independent session for the REST request."""
def post(self):
global session
args = request.json
init_session()
if 'rxnid' in args and 'db' in args and 'smarts' not in args:
""" Retrieve the SMARTS from the database id """
db = args['db']
rxnid = db+':'+args['rxnid']
if rxnid in app.config['TABLES'].rxnref and app.config['TABLES'].rxnref[rxnid] in app.config['TABLES'].smir:
mnxrid = app.config['TABLES'].rxnref[rxnid]
smarts = app.config['TABLES'].smir[ mnxrid ][0]
if mnxrid in app.config['TABLES'].rxndir:
if app.config['TABLES'].rxndir[mnxrid] == '-1':
smarts = app.config['TABLES'].smir[ mnxrid ][1]
try:
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(smarts, outname)
args['smarts'] = rxninfo
except:
pass
if 'smarts' in args:
""" Submit SMARTS query """
rxntype = 'smarts'
rxninfo = args['smarts']
if 'targets' in args:
targets = args['targets']
else:
targets = '50'
if 'direction' in args:
direction = int(args['direction'])
else:
direction = 0
if 'noMSA' in args:
noMSA = args['noMSA']
else:
noMSA = True
if 'host' in args:
host = args['host']
else:
host = '83333'
if 'fp' in args:
fp = args['fp']
else:
fp = 'RDK'
if 'score' in args:
session['SCORE'] = Selenzy.seqScore(args['score'])
try:
if isinstance(rxninfo, (list, tuple) ):
data = []
for instance in rxninfo:
dat, csvfile, sessionid = run_session(rxntype, instance, targets, direction, host, fp, noMSA)
data.append(dat)
data = pd.DataFrame(data)
else:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': data.to_json()})
except:
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': None})
else:
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': None})
class RestSource(Resource):
""" REST interface, returns api info """
def get(self):
orgs = {}
for seq in app.config['ORG']:
orgs[app.config['ORG'][seq][1]] = app.config['ORG'][seq][0]
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': orgs})
class RestFinger(Resource):
""" REST interface, returns api info """
def get(self):
fp = Selenzy.availableFingerprints()
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': list(fp)})
api.add_resource(RestGate, '/REST')
api.add_resource(RestQuery, '/REST/Query')
api.add_resource(RestSource, '/REST/Source')
api.add_resource(RestFinger, '/REST/Fingerprints')
@app.errorhandler(404)
def page_not_found(e):
return redirect(url_for('upload_form'))
@app.route('/')
def upload_form():
if 'username' not in session:
return redirect(url_for('login'))
return render_template("my_form.html", username=session['username'], fingerprints=Selenzy.availableFingerprints())
@app.route('/login', methods=['GET', 'POST'])
def login():
if app.debug == True:
session['username'] = 'debug'
init_session()
return redirect(url_for('upload_form'))
if request.method == 'POST':
session['username'] = request.form['username']
init_session()
return redirect(url_for('upload_form'))
else:
init_session()
return redirect(url_for('upload_form'))
return '''
<form method="post">
<p><input type=text name=username>
<p><input type=submit value=Login>
</form>
'''
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('login'))
@app.route('/msa', methods=['POST'])
def post_msa():
""" Post safely the MSA """
if request.method == 'POST':
sessionid = json.loads(request.values['sessionid'])
msafile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences_aln.fasta')
treefile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences.dnd')
if os.path.exists(msafile) and os.path.exists(treefile):
msa = open(msafile).readlines()
tree = open(treefile).readlines()
return json.dumps({'msa': ''.join(msa), 'tree': ' '.join(tree)})
return redirect ( url_for('upload_form') )
@app.route('/msaview', methods=['GET'])
def display_msa():
""" Display the MSA """
if request.method == 'GET':
if 'id' in request.values:
sessionid = request.values['id']
msafile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences_aln.fasta')
if os.path.exists(msafile):
return render_template('viewmsa.html', sessionid=sessionid)
return redirect ( url_for('upload_form') )
@app.route('/display', methods=['POST'])
def display_reaction(marvin=app.config['MARVIN']):
""" Validates the query and displays the reaction """
if request.method == 'POST':
size = (600,400)
if 'file' in request.files and len(request.files['file'].filename) > 0:
fileinfo = request.files['file']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
rxninfo = save_rxn(fileinfo)
success = True
if len(rxninfo) == 0:
success = False
data = ''
else:
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'],
outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'],
outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
success = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts': rxninfo} )
elif len(request.form['smarts']) > 0:
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(request.form['smarts'], outname)
success = True
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts': rxninfo} )
elif len(request.form['rxnid']) > 0:
db = request.form['rdb']
rxnid = db+':'+request.form['rxnid']
if rxnid in app.config['TABLES'].rxnref and app.config['TABLES'].rxnref[rxnid] in app.config['TABLES'].smir:
mnxrid = app.config['TABLES'].rxnref[rxnid]
smarts = app.config['TABLES'].smir[ mnxrid ][0]
if mnxrid in app.config['TABLES'].rxndir:
if app.config['TABLES'].rxndir[mnxrid] == '-1':
smarts = app.config['TABLES'].smir[ mnxrid ][1]
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(smarts, outname)
success = True
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts':smarts} )
@app.route('/sorter', methods=['POST'])
def sort_table():
""" Sorts table """
if request.method == 'POST':
jfilter = json.loads(request.values.get('filter'))
try:
filt = [int(x) for x in jfilter]
except:
return
session = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], session, csvname)
outdir = os.path.join(app.config['UPLOAD_FOLDER'], session)
head, rows = Selenzy.read_csv(csvfile)
sortrows = Selenzy.sort_rows(rows, filt)
Selenzy.updateMSA(outdir, sortrows)
Selenzy.write_csv(csvfile, head, sortrows)
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
return json.dumps( {'data': {'csv': data.to_html(), 'filter': filt}} )
@app.route('/adder', methods=['POST'])
def add_rows():
""" Add rows to table """
if request.method == 'POST':
if 'session' in request.values:
sessionid = request.values['session']
else:
flash("Bad request")
return redirect (request.url)
if 'fasta' in request.files and len(request.files['fasta'].filename) > 0:
fileinfo = request.files['fasta']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], sessionid)
fastafile = sessionid+'.fasta'
uniquename = os.path.join(uniquefolder, fastafile)
fileinfo.save(uniquename)
dndFile = os.path.join(uniquefolder, 'sequences.dnd')
if os.path.exists(dndFile):
noMSA = False
else:
noMSA = True
csvfile = Selenzy.extend_sequences('sequences.fasta', fastafile, uniquefolder, noMSA)
data = Selenzy.updateScore(file_path(uniquefolder, csvfile), session['SCORE'])
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
# TO DO: update fasta file
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/remover', methods=['POST'])
def delete_rows():
""" Sorts table """
if request.method == 'POST':
selrows = json.loads(request.values.get('filter'))
session = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
outdir = os.path.join(app.config['UPLOAD_FOLDER'], session)
csvfile = os.path.join(outdir, csvname)
head, rows = Selenzy.read_csv(csvfile)
filt = []
for i in selrows:
try:
index = int(i) - 1
filt.append(index)
except:
continue
newtargets = []
newrows = []
for j in range(0, len(rows)):
if j not in filt:
newtargets.append(rows[j][head.index('Seq. ID')])
newrows.append(rows[j])
fastaFile = os.path.join(outdir, "sequences.fasta")
Selenzy.write_fasta(fastaFile, newtargets, app.config['TABLES'])
# Avoid issues with sequence ids
fastaShortNameFile = os.path.join(outdir, "seqids.fasta")
Selenzy.write_fasta(fastaShortNameFile, newtargets, app.config['TABLES'], short=True)
# Recompute MSA if exists
dndFile = os.path.join(outdir, 'sequences.dnd')
if os.path.exists(dndFile):
cons = Selenzy.doMSA(fastaShortNameFile, outdir)
for i in range(0, len(newrows)):
try:
newrows[i][head.index('Consv. Score')] = cons[newrows[i][head.index('Seq. ID')]]
except:
pass
Selenzy.write_csv(csvfile, head, newrows)
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/scorer', methods=['POST'])
def score_table():
""" Score table """
if request.method == 'POST':
score = json.loads(request.values.get('score'))
sessid = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], sessid, csvname)
session['SCORE'] = Selenzy.seqScore(score)
data = Selenzy.updateScore(csvfile, session['SCORE'])
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/debug', methods=['GET'])
def show_table():
if app.debug == True:
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], 'debug', 'selenzy_results.csv')
data = Selenzy.updateScore(csvfile, session['SCORE'])
sessionid = 'debug'
data.rename_axis('Select', axis="columns")
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid,
flags={'fasta': False, 'msa': False}, score=session['SCORE'])
else:
return redirect ( url_for('upload_form') )
@app.route('/results', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
""" The POST request should come from an already initalised session """
if 'uniqueid' not in session:
return redirect ( url_for('upload_form') )
# check if post request has smarts part
if 'csv' in request.files and len(request.files['csv'].filename) > 0:
fileinfo = request.files['csv']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
data, csvfile, sessionid = retrieve_session(fileinfo)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid, flags={'fasta': False, 'msa': False}, score=session['SCORE'])
else:
try:
rxninfo = session['rxninfo']
rxntype = session['rxntype']
except:
return redirect(url_for('login'))
direction = 0
noMSA = False
targets = request.form['targets']
host = request.form['host']
fp = request.form['finger']
if request.form.get('direction'):
direction = 1
if request.form.get('noMSA'):
noMSA = True
try:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid, flags={'fasta': True, 'msa': not noMSA}, score=session['SCORE'])
except:
return redirect( url_for("upload_form") )
elif request.method == 'GET':
if request.args.get('fasta') is not None:
""" This is a request that is handled by ajax, do not return anything """
sessionid = request.args.get('session')
return ('', 204)
else:
""" A GET request would require an independently initialised session """
init_session()
smarts = request.args.get('smarts')
if smarts is None:
return redirect( url_for("upload_form") )
host = request.args.get('host')
if host is None:
host = '83333'
fp = request.args.get('fp')
if fp is None:
fp = 'RDK'
rxntype = 'smarts'
rxninfo = smarts
direction = 0
noMSA = False
targets = 20
session['rxninfo'] = rxninfo
session['rxntype'] = rxntype
try:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid,
flags={'fasta': True, 'msa': not noMSA}, score=session['SCORE'])
except:
return redirect( url_for("upload_form") )
return redirect( url_for("upload_form") )
@app.route('/results/<sessionid>/files/<filename>')
def results_file(sessionid,filename):
return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER'], sessionid), filename)
# Reconfigure for gunicorn
if __name__== "__main__": #only run server if file is called directly
arg = arguments()
app.config['UPLOAD_FOLDER'] = os.path.abspath(arg.uploaddir)
app.config['LOG_FOLDER'] = os.path.abspath(arg.logdir)
app.config['DATA_FOLDER'] = os.path.abspath(arg.datadir)
if arg.d:
app.config['DEBUG'] = True
app.config['PRELOAD'] = True
else:
app.config['DEBUG'] = False
app.config['PRELOAD'] = True
app.config['ORG'] = Selenzy.seqOrganism(arg.datadir, "seq_org.tsv")
if app.config['PRELOAD']:
app.config['TABLES'] = Selenzy.readData(arg.datadir)
else:
app.config['TABLES'] = None
handler = RotatingFileHandler(os.path.join(app.config['LOG_FOLDER'], 'selenzy.log'), maxBytes=10000, backupCount=1)
log = logging.getLogger('werkzeug')
log.addHandler(handler)
app.logger.addHandler(handler)
app.run(host="0.0.0.0",port=5000, debug=app.config['DEBUG'], threaded=True)
# app.run(port=5000, debug=True) | selenzy_wrapper/selenzy/flaskform.py | import os, subprocess, glob, time, shutil
import argparse, uuid, json, csv
import logging
from logging.handlers import RotatingFileHandler
import Selenzy
from flask import Flask, flash, render_template, request, redirect, url_for, send_from_directory, jsonify
from flask_restful import Resource, Api
from flask import session
from werkzeug import secure_filename
import pandas as pd
import numpy as np
global session
app = Flask(__name__)
api = Api(app)
app.config['SECRET_KEY'] = str(uuid.uuid4())
app.config['MARVIN'] = False
app.config['KEEPDAYS'] = 10
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['txt', 'rxn', 'smi', 'smarts', 'smirks', 'csv', 'fasta', 'fas', 'fa'])
def arguments():
parser = argparse.ArgumentParser(description='Options for the webserver')
parser.add_argument('-uploaddir', default='uploads',
help='Upload folder')
parser.add_argument('-datadir', default='data',
help='Data directory for required databases files')
parser.add_argument('-logdir', default='log',
help='Logging folder')
parser.add_argument('-d', action='store_true',
help='Run in debug mode (no preload)')
arg = parser.parse_args()
return arg
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1].lower() in ALLOWED_EXTENSIONS
def file_path(uniqueid, filename):
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
uniquename = os.path.join(uniquefolder, filename)
return uniquename
def save_rxn(rxninfo):
global session
filename = secure_filename(rxninfo.filename)
try:
uniquename = file_path(session['uniqueid'], filename)
except:
init_session()
uniquename = file_path(session['uniqueid'], filename)
rxninfo.save(uniquename)
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeRxn(uniquename, outname)
session['rxninfo'] = rxninfo
return rxninfo
def init_session():
global session
maintenance(app.config['KEEPDAYS'])
reset_session()
uniqueid = session['uniqueid']
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
if not os.path.exists(uniquefolder):
os.mkdir(uniquefolder)
session['uniquefolder'] = uniquefolder
session['rxnifo'] = None
session['status'] = False
session['username'] = session['uniqueid']
# Restart the Score for each new session
session['SCORE'] = Selenzy.seqScore()
def reset_session():
global session
uniqueid = str(uuid.uuid4())
app.logger.info( 'New session: %s' % (uniqueid,) )
session['uniqueid'] = uniqueid
def run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA):
global session
uniqueid = session['uniqueid']
uniquefolder = session['uniquefolder']
csvfile = "selenzy_results.csv"
app.logger.info( 'Run session: %s' % (uniqueid,) )
success, app.config['TABLES'] = Selenzy.analyse(['-'+rxntype, rxninfo],
targets,
app.config['DATA_FOLDER'],
uniquefolder,
csvfile,
pdir = int(direction),
host = host,
fp = fp,
NoMSA = noMSA,
pc = app.config['TABLES']
) # this creates CSV file in Uploads directory
if success:
data = Selenzy.updateScore(file_path(uniqueid, csvfile), session['SCORE'])
return data, csvfile, uniqueid
def retrieve_session(csvinfo):
global session
uniqueid = session['uniqueid']
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], uniqueid)
if not os.path.exists(uniquefolder):
os.mkdir(uniquefolder)
filename = secure_filename(csvinfo.filename)
uniquename = file_path(uniqueid, filename)
csvinfo.save(uniquename)
data = pd.read_csv(uniquename)
data.index = data.index + 1
csvfile = os.path.basename(uniquename)
data.rename_axis('Select', axis="columns")
return data, csvfile, uniqueid
def maintenance(expDay=10):
secs = expDay*24*60*60
for folder in glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], '*')):
name = os.path.basename(folder)
if name.startswith('debug'):
continue
modiftime = os.path.getmtime(folder)
lapse = time.time() - modiftime
if lapse > secs:
# Double check that this an upload folder containing reactions
if len( glob.glob( os.path.join(folder, '*.rxn') ) ) > 0:
try:
for x in glob.glob(os.path.join(folder, '*')):
os.unlink(x)
except:
pass
try:
os.rmdir(folder)
app.logger.info( 'Clean up: %s' % (folder,) )
except:
pass
class RestGate(Resource):
""" REST interface, returns api info """
def get(self):
return {'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem'}
class RestQuery(Resource):
""" REST interface to Selenzy, by default it does not run the MSA to be faster.
We init an independent session for the REST request."""
def post(self):
global session
args = request.json
init_session()
if 'rxnid' in args and 'db' in args and 'smarts' not in args:
""" Retrieve the SMARTS from the database id """
db = args['db']
rxnid = db+':'+args['rxnid']
if rxnid in app.config['TABLES'].rxnref and app.config['TABLES'].rxnref[rxnid] in app.config['TABLES'].smir:
mnxrid = app.config['TABLES'].rxnref[rxnid]
smarts = app.config['TABLES'].smir[ mnxrid ][0]
if mnxrid in app.config['TABLES'].rxndir:
if app.config['TABLES'].rxndir[mnxrid] == '-1':
smarts = app.config['TABLES'].smir[ mnxrid ][1]
try:
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(smarts, outname)
args['smarts'] = rxninfo
except:
pass
if 'smarts' in args:
""" Submit SMARTS query """
rxntype = 'smarts'
rxninfo = args['smarts']
if 'targets' in args:
targets = args['targets']
else:
targets = '50'
if 'direction' in args:
direction = int(args['direction'])
else:
direction = 0
if 'noMSA' in args:
noMSA = args['noMSA']
else:
noMSA = True
if 'host' in args:
host = args['host']
else:
host = '83333'
if 'fp' in args:
fp = args['fp']
else:
fp = 'RDK'
if 'score' in args:
session['SCORE'] = Selenzy.seqScore(args['score'])
try:
if isinstance(rxninfo, (list, tuple) ):
data = []
for instance in rxninfo:
dat, csvfile, sessionid = run_session(rxntype, instance, targets, direction, host, fp, noMSA)
data.append(dat)
data = pd.DataFrame(data)
else:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': data.to_json()})
except:
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': None})
else:
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': None})
class RestSource(Resource):
""" REST interface, returns api info """
def get(self):
orgs = {}
for seq in app.config['ORG']:
orgs[app.config['ORG'][seq][1]] = app.config['ORG'][seq][0]
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': orgs})
class RestFinger(Resource):
""" REST interface, returns api info """
def get(self):
fp = Selenzy.availableFingerprints()
return jsonify({'app': 'Selenzy', 'version': '1.0', 'author': 'Synbiochem', 'data': list(fp)})
api.add_resource(RestGate, '/REST')
api.add_resource(RestQuery, '/REST/Query')
api.add_resource(RestSource, '/REST/Source')
api.add_resource(RestFinger, '/REST/Fingerprints')
@app.errorhandler(404)
def page_not_found(e):
return redirect(url_for('upload_form'))
@app.route('/')
def upload_form():
if 'username' not in session:
return redirect(url_for('login'))
return render_template("my_form.html", username=session['username'], fingerprints=Selenzy.availableFingerprints())
@app.route('/login', methods=['GET', 'POST'])
def login():
if app.debug == True:
session['username'] = 'debug'
init_session()
return redirect(url_for('upload_form'))
if request.method == 'POST':
session['username'] = request.form['username']
init_session()
return redirect(url_for('upload_form'))
else:
init_session()
return redirect(url_for('upload_form'))
return '''
<form method="post">
<p><input type=text name=username>
<p><input type=submit value=Login>
</form>
'''
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('login'))
@app.route('/msa', methods=['POST'])
def post_msa():
""" Post safely the MSA """
if request.method == 'POST':
sessionid = json.loads(request.values['sessionid'])
msafile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences_aln.fasta')
treefile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences.dnd')
if os.path.exists(msafile) and os.path.exists(treefile):
msa = open(msafile).readlines()
tree = open(treefile).readlines()
return json.dumps({'msa': ''.join(msa), 'tree': ' '.join(tree)})
return redirect ( url_for('upload_form') )
@app.route('/msaview', methods=['GET'])
def display_msa():
""" Display the MSA """
if request.method == 'GET':
if 'id' in request.values:
sessionid = request.values['id']
msafile = os.path.join(app.config['UPLOAD_FOLDER'], sessionid, 'sequences_aln.fasta')
if os.path.exists(msafile):
return render_template('viewmsa.html', sessionid=sessionid)
return redirect ( url_for('upload_form') )
@app.route('/display', methods=['POST'])
def display_reaction(marvin=app.config['MARVIN']):
""" Validates the query and displays the reaction """
if request.method == 'POST':
size = (600,400)
if 'file' in request.files and len(request.files['file'].filename) > 0:
fileinfo = request.files['file']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
rxninfo = save_rxn(fileinfo)
success = True
if len(rxninfo) == 0:
success = False
data = ''
else:
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'],
outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'],
outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
success = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts': rxninfo} )
elif len(request.form['smarts']) > 0:
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(request.form['smarts'], outname)
success = True
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts': rxninfo} )
elif len(request.form['rxnid']) > 0:
db = request.form['rdb']
rxnid = db+':'+request.form['rxnid']
if rxnid in app.config['TABLES'].rxnref and app.config['TABLES'].rxnref[rxnid] in app.config['TABLES'].smir:
mnxrid = app.config['TABLES'].rxnref[rxnid]
smarts = app.config['TABLES'].smir[ mnxrid ][0]
if mnxrid in app.config['TABLES'].rxndir:
if app.config['TABLES'].rxndir[mnxrid] == '-1':
smarts = app.config['TABLES'].smir[ mnxrid ][1]
outname = file_path(session['uniqueid'], session['uniqueid'])
rxninfo = Selenzy.sanitizeSmarts(smarts, outname)
success = True
if marvin:
svgstream = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=True)
data = svgstream.decode('utf-8')
if len(data) == 0:
success = False
else:
outfile, size = Selenzy.display_reaction(rxninfo, outfolder=session['uniquefolder'], outname = str(uuid.uuid4()), marvin=False)
if len(outfile) == 0:
success = False
data = os.path.join('/results', session['uniqueid'], 'files', os.path.basename(outfile))
session['rxninfo'] = rxninfo
session['rxntype'] = 'smarts'
session['status'] = True
return json.dumps( {'data': data, 'status': session['status'], 'success': success, 'svg': marvin, 'size': size, 'smarts':smarts} )
@app.route('/sorter', methods=['POST'])
def sort_table():
""" Sorts table """
if request.method == 'POST':
jfilter = json.loads(request.values.get('filter'))
try:
filt = [int(x) for x in jfilter]
except:
return
session = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], session, csvname)
outdir = os.path.join(app.config['UPLOAD_FOLDER'], session)
head, rows = Selenzy.read_csv(csvfile)
sortrows = Selenzy.sort_rows(rows, filt)
Selenzy.updateMSA(outdir, sortrows)
Selenzy.write_csv(csvfile, head, sortrows)
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
return json.dumps( {'data': {'csv': data.to_html(), 'filter': filt}} )
@app.route('/adder', methods=['POST'])
def add_rows():
""" Add rows to table """
if request.method == 'POST':
if 'session' in request.values:
sessionid = request.values['session']
else:
flash("Bad request")
return redirect (request.url)
if 'fasta' in request.files and len(request.files['fasta'].filename) > 0:
fileinfo = request.files['fasta']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
uniquefolder = os.path.join(app.config['UPLOAD_FOLDER'], sessionid)
fastafile = sessionid+'.fasta'
uniquename = os.path.join(uniquefolder, fastafile)
fileinfo.save(uniquename)
dndFile = os.path.join(uniquefolder, 'sequences.dnd')
if os.path.exists(dndFile):
noMSA = False
else:
noMSA = True
csvfile = Selenzy.extend_sequences('sequences.fasta', fastafile, uniquefolder, noMSA)
data = Selenzy.updateScore(file_path(uniquefolder, csvfile), session['SCORE'])
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
# TO DO: update fasta file
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/remover', methods=['POST'])
def delete_rows():
""" Sorts table """
if request.method == 'POST':
selrows = json.loads(request.values.get('filter'))
session = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
outdir = os.path.join(app.config['UPLOAD_FOLDER'], session)
csvfile = os.path.join(outdir, csvname)
head, rows = Selenzy.read_csv(csvfile)
filt = []
for i in selrows:
try:
index = int(i) - 1
filt.append(index)
except:
continue
newtargets = []
newrows = []
for j in range(0, len(rows)):
if j not in filt:
newtargets.append(rows[j][head.index('Seq. ID')])
newrows.append(rows[j])
fastaFile = os.path.join(outdir, "sequences.fasta")
Selenzy.write_fasta(fastaFile, newtargets, app.config['TABLES'])
# Avoid issues with sequence ids
fastaShortNameFile = os.path.join(outdir, "seqids.fasta")
Selenzy.write_fasta(fastaShortNameFile, newtargets, app.config['TABLES'], short=True)
# Recompute MSA if exists
dndFile = os.path.join(outdir, 'sequences.dnd')
if os.path.exists(dndFile):
cons = Selenzy.doMSA(fastaShortNameFile, outdir)
for i in range(0, len(newrows)):
try:
newrows[i][head.index('Consv. Score')] = cons[newrows[i][head.index('Seq. ID')]]
except:
pass
Selenzy.write_csv(csvfile, head, newrows)
data = pd.read_csv(csvfile)
data.index = data.index + 1
data.rename_axis('Select', axis="columns")
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/scorer', methods=['POST'])
def score_table():
""" Score table """
if request.method == 'POST':
score = json.loads(request.values.get('score'))
sessid = json.loads(request.values.get('session'))
csvname = os.path.basename(json.loads(request.values.get('csv')))
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], sessid, csvname)
session['SCORE'] = Selenzy.seqScore(score)
data = Selenzy.updateScore(csvfile, session['SCORE'])
return json.dumps( {'data': {'csv': data.to_html()}} )
@app.route('/debug', methods=['GET'])
def show_table():
if app.debug == True:
csvfile = os.path.join(app.config['UPLOAD_FOLDER'], 'debug', 'selenzy_results.csv')
data = Selenzy.updateScore(csvfile, session['SCORE'])
sessionid = 'debug'
data.rename_axis('Select', axis="columns")
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid,
flags={'fasta': False, 'msa': False}, score=session['SCORE'])
else:
return redirect ( url_for('upload_form') )
@app.route('/results', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
""" The POST request should come from an already initalised session """
if 'uniqueid' not in session:
return redirect ( url_for('upload_form') )
# check if post request has smarts part
if 'csv' in request.files and len(request.files['csv'].filename) > 0:
fileinfo = request.files['csv']
if fileinfo.filename == '' or not allowed_file(fileinfo.filename):
flash("No file selected")
return redirect (request.url)
data, csvfile, sessionid = retrieve_session(fileinfo)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid, flags={'fasta': False, 'msa': False}, score=session['SCORE'])
else:
try:
rxninfo = session['rxninfo']
rxntype = session['rxntype']
except:
return redirect(url_for('login'))
direction = 0
noMSA = False
targets = request.form['targets']
host = request.form['host']
fp = request.form['finger']
if request.form.get('direction'):
direction = 1
if request.form.get('noMSA'):
noMSA = True
try:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid, flags={'fasta': True, 'msa': not noMSA}, score=session['SCORE'])
except:
return redirect( url_for("upload_form") )
elif request.method == 'GET':
if request.args.get('fasta') is not None:
""" This is a request that is handled by ajax, do not return anything """
sessionid = request.args.get('session')
return ('', 204)
else:
""" A GET request would require an independently initialised session """
init_session()
smarts = request.args.get('smarts')
if smarts is None:
return redirect( url_for("upload_form") )
host = request.args.get('host')
if host is None:
host = '83333'
fp = request.args.get('fp')
if fp is None:
fp = 'RDK'
rxntype = 'smarts'
rxninfo = smarts
direction = 0
noMSA = False
targets = 20
session['rxninfo'] = rxninfo
session['rxntype'] = rxntype
try:
data, csvfile, sessionid = run_session(rxntype, rxninfo, targets, direction, host, fp, noMSA)
return render_template('results.html', tables=data.to_html(), csvfile=csvfile, sessionid=sessionid,
flags={'fasta': True, 'msa': not noMSA}, score=session['SCORE'])
except:
return redirect( url_for("upload_form") )
return redirect( url_for("upload_form") )
@app.route('/results/<sessionid>/files/<filename>')
def results_file(sessionid,filename):
return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER'], sessionid), filename)
# Reconfigure for gunicorn
if __name__== "__main__": #only run server if file is called directly
arg = arguments()
app.config['UPLOAD_FOLDER'] = os.path.abspath(arg.uploaddir)
app.config['LOG_FOLDER'] = os.path.abspath(arg.logdir)
app.config['DATA_FOLDER'] = os.path.abspath(arg.datadir)
if arg.d:
app.config['DEBUG'] = True
app.config['PRELOAD'] = True
else:
app.config['DEBUG'] = False
app.config['PRELOAD'] = True
app.config['ORG'] = Selenzy.seqOrganism(arg.datadir, "seq_org.tsv")
if app.config['PRELOAD']:
app.config['TABLES'] = Selenzy.readData(arg.datadir)
else:
app.config['TABLES'] = None
handler = RotatingFileHandler(os.path.join(app.config['LOG_FOLDER'], 'selenzy.log'), maxBytes=10000, backupCount=1)
log = logging.getLogger('werkzeug')
log.addHandler(handler)
app.logger.addHandler(handler)
app.run(host="0.0.0.0",port=5000, debug=app.config['DEBUG'], threaded=True)
# app.run(port=5000, debug=True) | 0.29696 | 0.042942 |
import string
import sys
import json
from os import curdir, sep
from sets import Set
allRoutes = Set()
goldstandard = []
computed_results = []
# route user iso_ts responsetime
# 1002 8550102e445ac626 2013-12-19 21:24:04 1444
with open('extract_route_user_ts_responsetime_out.csv', 'r') as f:
for line in f:
cols = line.split(";")
print cols
computed_results.append(cols)
# unix_ts route user iso_ts
# $ sudo su postgres
# $ COPY (SELECT ts, tag, user_id from sensor_tags, trip where sensor_tags.trip_id = trip.trip_id AND tag like '%Manually selected line with%') TO '/tmp/query.csv' (format csv, delimiter ';');
# $ psql -d liveandgov
# mvn exec:java -Dexec.mainClass=eu.liveandgov.wp1.backend.Timestamp2String -Dexec.args="query-ts.csv"
# 2014-01-24 08:05:42;"""Manually selected line with id: 1004direction : 1""";35
# 2014-01-24 08:05:42;"""Manually selected line with id: 1065A""";8550102e445ac626
with open('query.csv', 'r') as f:
for line in f:
cols = line.split("\t")
cols[1] = string.replace(cols[1],'"""Manually selected line with id: ', "")
cols[1] = string.replace(cols[1],'"""', "")
cols[1] = string.replace(cols[1],'direction : 1', "")
cols[1] = string.replace(cols[1],'direction : 2', "")
#print cols[0], cols[1], cols[2]
goldstandard.append(cols)
print cols
found = False
total_hits = 0
total = 0
for g in goldstandard:
found_in_this_loop = 0
for c in computed_results:
if g[2].strip() == c[1].strip(): # same user
if g[0][0:10].strip() == c[2][0:10].strip(): # same day
if g[1].strip() == c[0].strip(): # route
found = True
found_in_this_loop += 1
allRoutes.add(g[1].strip())
print total, "|", g[1], "<->", c[0], "|", g[0], g[2].strip(), c[2], found_in_this_loop
if found: # found a match for this day
total_hits +=1
found = False
total += 1
print "total API calls: " + str(len(computed_results)) + " maches: " + str(total_hits) + " total: " + str(total) + " hit rate: " + str(float(total_hits)/float(total))
print allRoutes | server/SLDLogViewer/merge_and_count.py |
import string
import sys
import json
from os import curdir, sep
from sets import Set
allRoutes = Set()
goldstandard = []
computed_results = []
# route user iso_ts responsetime
# 1002 8550102e445ac626 2013-12-19 21:24:04 1444
with open('extract_route_user_ts_responsetime_out.csv', 'r') as f:
for line in f:
cols = line.split(";")
print cols
computed_results.append(cols)
# unix_ts route user iso_ts
# $ sudo su postgres
# $ COPY (SELECT ts, tag, user_id from sensor_tags, trip where sensor_tags.trip_id = trip.trip_id AND tag like '%Manually selected line with%') TO '/tmp/query.csv' (format csv, delimiter ';');
# $ psql -d liveandgov
# mvn exec:java -Dexec.mainClass=eu.liveandgov.wp1.backend.Timestamp2String -Dexec.args="query-ts.csv"
# 2014-01-24 08:05:42;"""Manually selected line with id: 1004direction : 1""";35
# 2014-01-24 08:05:42;"""Manually selected line with id: 1065A""";8550102e445ac626
with open('query.csv', 'r') as f:
for line in f:
cols = line.split("\t")
cols[1] = string.replace(cols[1],'"""Manually selected line with id: ', "")
cols[1] = string.replace(cols[1],'"""', "")
cols[1] = string.replace(cols[1],'direction : 1', "")
cols[1] = string.replace(cols[1],'direction : 2', "")
#print cols[0], cols[1], cols[2]
goldstandard.append(cols)
print cols
found = False
total_hits = 0
total = 0
for g in goldstandard:
found_in_this_loop = 0
for c in computed_results:
if g[2].strip() == c[1].strip(): # same user
if g[0][0:10].strip() == c[2][0:10].strip(): # same day
if g[1].strip() == c[0].strip(): # route
found = True
found_in_this_loop += 1
allRoutes.add(g[1].strip())
print total, "|", g[1], "<->", c[0], "|", g[0], g[2].strip(), c[2], found_in_this_loop
if found: # found a match for this day
total_hits +=1
found = False
total += 1
print "total API calls: " + str(len(computed_results)) + " maches: " + str(total_hits) + " total: " + str(total) + " hit rate: " + str(float(total_hits)/float(total))
print allRoutes | 0.128717 | 0.178795 |
import connexion
from swagger_server.models.inline_response20013 import InlineResponse20013
from swagger_server.models.inline_response202 import InlineResponse202
from swagger_server.models.transition_request import TransitionRequest
from datetime import date, datetime
from typing import List, Dict
from six import iteritems
from ..util import deserialize_date, deserialize_datetime
# business logic imports
from flask import abort
from flask import current_app as app
import json
import uuid
from .driver_config import ConfigReader
from .requests import RequestHandler
def lifecycle_transitions_id_status_get(id):
"""
get details on transition request status
Returns information about the specified transition or operation request
:param id: Unique id for the transition request
:type id: str
:rtype: InlineResponse20013
"""
rI = RequestHandler()
app.logger.info('getting request status for :' + id)
try:
val = uuid.UUID(id)
except ValueError:
app.logger.error('id ' + id + ' is not a valid UUID')
abort(400, 'not a valid UUID')
rc, rcMsg, resp200 = rI.get_request(id)
if (rc != 200):
app.logger.error('request ' + id + ': ' + rcMsg)
abort(rc, rcMsg)
else:
app.logger.debug('request ' + id + 'details: ' + json.dumps(resp200))
return resp200
def lifecycle_transitions_post(transitionRequest=None):
"""
Performs a transition against a Resource.
Requests this Resource Manager performs a specific
transition against a resource
:param transitionRequest:
:type transitionRequest: dict | bytes
:rtype: InlineResponse202
"""
if connexion.request.is_json:
transitionRequest = TransitionRequest.from_dict(connexion.request.get_json())
# create the request
app.logger.info('working on transition request ')
requestHandler = RequestHandler()
rc, resp = requestHandler.start_request(transitionRequest)
if rc == 202:
app.logger.info('transition started')
else:
app.logger.info('transition start failed')
# app.logger.debug('transition request response: ' + json.dumps(resp))
return resp, rc | osm-adaptor/swagger_server/controllers/lifecycle_controller_controller.py | import connexion
from swagger_server.models.inline_response20013 import InlineResponse20013
from swagger_server.models.inline_response202 import InlineResponse202
from swagger_server.models.transition_request import TransitionRequest
from datetime import date, datetime
from typing import List, Dict
from six import iteritems
from ..util import deserialize_date, deserialize_datetime
# business logic imports
from flask import abort
from flask import current_app as app
import json
import uuid
from .driver_config import ConfigReader
from .requests import RequestHandler
def lifecycle_transitions_id_status_get(id):
"""
get details on transition request status
Returns information about the specified transition or operation request
:param id: Unique id for the transition request
:type id: str
:rtype: InlineResponse20013
"""
rI = RequestHandler()
app.logger.info('getting request status for :' + id)
try:
val = uuid.UUID(id)
except ValueError:
app.logger.error('id ' + id + ' is not a valid UUID')
abort(400, 'not a valid UUID')
rc, rcMsg, resp200 = rI.get_request(id)
if (rc != 200):
app.logger.error('request ' + id + ': ' + rcMsg)
abort(rc, rcMsg)
else:
app.logger.debug('request ' + id + 'details: ' + json.dumps(resp200))
return resp200
def lifecycle_transitions_post(transitionRequest=None):
"""
Performs a transition against a Resource.
Requests this Resource Manager performs a specific
transition against a resource
:param transitionRequest:
:type transitionRequest: dict | bytes
:rtype: InlineResponse202
"""
if connexion.request.is_json:
transitionRequest = TransitionRequest.from_dict(connexion.request.get_json())
# create the request
app.logger.info('working on transition request ')
requestHandler = RequestHandler()
rc, resp = requestHandler.start_request(transitionRequest)
if rc == 202:
app.logger.info('transition started')
else:
app.logger.info('transition start failed')
# app.logger.debug('transition request response: ' + json.dumps(resp))
return resp, rc | 0.686265 | 0.048294 |
import doctest
import errno
import os
import socket
import unittest
import webbrowser
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from mock import Mock, patch
import docutils.utils
from restview.restviewhttp import (MyRequestHandler, RestViewer,
get_host_name, launch_browser, main)
try:
unicode
except NameError:
unicode = str
class PopenStub(object):
def __init__(self, stdout='', stderr='', retcode=0):
self._stdout = stdout
self._stderr = stderr
self.returncode = retcode
def __call__(self, *args, **kw):
return self
def communicate(self, stdin=''):
self._stdin = stdin
return (self._stdout, self._stderr)
class MyRequestHandlerForTests(MyRequestHandler):
def __init__(self):
self.headers = {'Host': 'localhost'} # request headers
self._headers = {} # response headers
self.log = []
self.server = Mock()
self.server.renderer.command = None
self.server.renderer.watch = None
self.server.renderer.allowed_hosts = ['localhost']
self.server.renderer.rest_to_html = lambda data, mtime=None, filename=None: \
unicode('HTML for %s with AJAX poller for %s' % (data, mtime))
self.server.renderer.render_exception = lambda title, error, source, mtime=None: \
unicode('HTML for error %s: %s: %s' % (title, error, source))
def send_response(self, status):
self.status = status
def send_header(self, header, value):
self._headers[header] = value
def end_headers(self):
self.headers = self._headers
def send_error(self, status, body):
self.status = status
self.error_body = body
def log_error(self, message, *args):
if args:
message = message % args
self.log.append(message)
class TestMyRequestHandler(unittest.TestCase):
def _os_walk(self, dirpath):
dirnames = ['.svn', '.tox', 'subdir', 'mypackage.egg-info']
filenames = ['a.txt', 'z.rst', 'unrelated.py']
yield dirpath, dirnames, filenames
for subdir in dirnames:
yield os.path.join(dirpath, subdir), [], ['b.txt', 'c.py']
def _raise_oserror(self, *args, **kw):
raise OSError(errno.ENOENT, "no such file or directory")
def _raise_socket_error(self, *args):
raise socket.error("connection reset by peer")
def setUp(self):
self.root = os.path.normpath('/root')
self.root2 = os.path.normpath('/root2')
def filepath(self, *names):
return os.path.join(self.root, *names)
def filepath2(self, *names):
return os.path.join(self.root2, *names)
def test_do_GET(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.txt'
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
handler.wfile = StringIO()
handler.do_GET()
self.assertEqual(handler.wfile.getvalue(),
'HTML for %s' % self.filepath('a.txt'))
def test_do_HEAD(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.txt'
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
handler.wfile = StringIO()
handler.do_HEAD()
self.assertEqual(handler.wfile.getvalue(), '')
def test_do_GET_or_HEAD_root_when_file(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = self.filepath('file.txt')
handler.server.renderer.command = None
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
with patch('os.path.isdir', lambda dir: False):
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'HTML for %s' % self.filepath('file.txt'))
def test_do_GET_or_HEAD_root_when_dir(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = self.root
handler.server.renderer.command = None
handler.handle_dir = lambda fn: 'Files in %s' % fn
with patch('os.path.isdir', lambda dir: True):
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Files in %s' % self.root)
def test_do_GET_or_HEAD_root_when_list(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = [self.root, self.root2]
handler.server.renderer.command = None
handler.handle_list = lambda roots: 'Files in %s' % ", ".join(roots)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Files in %s, %s' % (self.root, self.root2))
def test_do_GET_or_HEAD_root_when_command(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = None
handler.server.renderer.command = 'cat README.rst'
handler.handle_command = lambda cmd, watch: 'Output of %s' % cmd
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Output of cat README.rst')
def test_do_GET_or_HEAD_polling(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=a.txt&mtime=12345'
handler.server.renderer.root = self.root
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
with patch('os.path.isdir', lambda dir: dir == self.root):
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s since 12345' % expected_fn)
def test_do_GET_or_HEAD_polling_of_root(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s since 12345' % expected_fn)
def test_do_GET_or_HEAD_polling_of_command_with_watch_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.command = 'python setup.py --long-description'
handler.server.renderer.watch = ['setup.py', 'README.rst']
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Got update for setup.py,README.rst since 12345')
def test_do_GET_or_HEAD_polling_of_root_with_watch_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.server.renderer.watch = ['my.css']
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s,my.css since 12345' % expected_fn)
def test_do_GET_or_HEAD_prevent_sandbox_climbing_attacks(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=../../../etc/passwd&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 400)
self.assertEqual(handler.error_body, "Bad request")
def test_do_GET_or_HEAD_prevent_dns_rebinding(self):
handler = MyRequestHandlerForTests()
handler.headers['Host'] = 'mallory.example.com:80'
handler.path = '/'
handler.server.renderer.root = self.filepath('a.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 400)
self.assertEqual(handler.error_body, "Host header not in allowed list")
def test_do_GET_or_HEAD_images(self):
for filename, ctype in [('a.png', 'image/png'),
('a.gif', 'image/gif'),
('a.jpg', 'image/jpeg'),
('a.jpeg', 'image/jpeg'),
('a.svg', 'image/svg+xml'),
('favicon.ico', 'image/x-icon')]:
handler = MyRequestHandlerForTests()
handler.path = '/' + filename
handler.server.renderer.root = self.filepath('a.txt')
handler.server.renderer.favicon_path = self.filepath('favicon.ico')
handler.handle_image = lambda fn, ct: '%s (%s)' % (fn, ct)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, '%s (%s)' % (self.filepath(filename), ctype))
def test_do_GET_or_HEAD_rst_files(self):
for filename in ['a.txt', 'a.rst']:
handler = MyRequestHandlerForTests()
handler.path = '/' + filename
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'HTML for %s' % self.filepath(filename))
def test_do_GET_or_HEAD_other_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.py'
handler.server.renderer.root = self.filepath('file.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 501)
self.assertEqual(handler.error_body, "File type not supported: /a.py")
def test_handle_polling(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
with patch('time.sleep') as sleep:
stat = {filename: [Mock(st_mtime=123455), Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)):
handler.handle_polling([filename], 123455)
sleep.assert_called_once_with(0.2)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
def test_handle_polling_handles_interruptions(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=__init__.py&mtime=123455'
handler.send_response = self._raise_socket_error
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
stat = {filename: [Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)):
handler.handle_polling([filename], 123455)
self.assertEqual(
handler.log,
['connection reset by peer'
' (client closed "%s" before acknowledgement)' % handler.path])
def test_handle_polling_handles_disappearing_files(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
with patch('time.sleep'):
stat = {filename: [lambda: Mock(st_mtime=123455),
self._raise_oserror,
lambda: Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)()):
handler.handle_polling([filename], 123455)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
def test_translate_path_when_root_is_a_file(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = self.filepath('file.txt')
handler.path = '/a.txt'
with patch('os.path.isdir', lambda dir: False):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/file.png'),
self.filepath('file.png'))
def test_translate_path_when_root_is_a_directory(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = self.root
handler.path = '/a.txt'
with patch('os.path.isdir', lambda dir: True):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/file.txt'),
self.filepath('file.txt'))
def test_translate_path_when_root_is_a_sequence(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = [self.root, self.root2]
handler.path = '/0/a.txt'
with patch('os.path.isdir', lambda dir: '.' not in dir):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/1/b.txt'),
self.filepath2('b.txt'))
def test_handle_image(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
body = handler.handle_image(filename, 'image/python') # ha ha
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"image/python")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertTrue(isinstance(body, bytes))
def test_handle_image_error(self):
handler = MyRequestHandlerForTests()
handler.path = '/nosuchfile.png'
handler.handle_image('nosuchfile.png', 'image/png')
self.assertEqual(handler.status, 404)
self.assertEqual(handler.error_body,
"File not found: /nosuchfile.png")
def test_handle_rest_file(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
mtime = os.stat(filename).st_mtime
body = handler.handle_rest_file(filename)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertTrue(body.startswith(b'HTML for'))
self.assertTrue(body.endswith(('with AJAX poller for %s' % mtime).encode()))
def test_handle_rest_file_extra_watch(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
mtime = os.stat(filename).st_mtime
with patch('os.stat', lambda fn: {filename: Mock(st_mtime=mtime),
'my.css': Mock(st_mtime=mtime + 1)}[fn]):
body = handler.handle_rest_file(filename, watch=['my.css'])
self.assertEqual(handler.status, 200)
self.assertTrue(body.endswith(('with AJAX poller for %s' % (mtime + 1)).encode()))
with patch('os.stat', lambda fn: {filename: Mock(st_mtime=mtime),
'my.css': Mock(st_mtime=mtime - 1)}[fn]):
body = handler.handle_rest_file(filename, watch=['my.css'])
self.assertEqual(handler.status, 200)
self.assertTrue(body.endswith(('with AJAX poller for %s' % mtime).encode()))
def test_handle_rest_file_error(self):
handler = MyRequestHandlerForTests()
handler.path = '/nosuchfile.txt'
handler.handle_rest_file('nosuchfile.txt')
self.assertEqual(handler.status, 404)
self.assertEqual(handler.error_body,
"File not found: /nosuchfile.txt")
self.assertEqual(handler.log,
["[Errno 2] No such file or directory: 'nosuchfile.txt'"])
def test_handle_command(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('data from cat README.rst')):
body = handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertEqual(body,
b'HTML for data from cat README.rst'
b' with AJAX poller for None')
def test_handle_command_returns_error(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('', 'cat: README.rst: no such file', 1)):
body = handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertFalse('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'cat: README.rst: no such file' in body,
body)
def test_handle_command_with_warnings(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('hello', 'warning: blah blah', 0)):
body = handler.handle_command('python setup.py --long-description')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertFalse('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'hello' in body, body)
self.assertTrue(b'blah blah' not in body, body)
def test_handle_command_returns_error_with_watch_files(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('', 'cat: README.rst: no such file', 1)):
body = handler.handle_command('cat README.rst', watch=['README.rst'])
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertTrue('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'cat: README.rst: no such file' in body,
body)
def test_handle_command_error(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', self._raise_oserror):
handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 500)
self.assertEqual(handler.error_body,
'Command execution failed')
self.assertEqual(handler.log, ["[Errno 2] no such file or directory"])
def test_handle_rest_data(self):
handler = MyRequestHandlerForTests()
body = handler.handle_rest_data("*Hello*", mtime=1364808683)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertEqual(body,
b'HTML for *Hello* with AJAX poller for 1364808683')
def test_collect_files(self):
handler = MyRequestHandlerForTests()
with patch('os.walk', self._os_walk):
files = handler.collect_files('/path/to/dir')
self.assertEqual(files,
['a.txt', os.path.join('subdir', 'b.txt'), 'z.rst'])
def test_handle_dir(self):
handler = MyRequestHandlerForTests()
handler.collect_files = lambda dir: ['a.txt', 'b/c.txt']
handler.render_dir_listing = lambda title, files: \
unicode("<title>%s</title>\n%s" % (
title,
'\n'.join('%s - %s' % (path, fn) for path, fn in files)))
body = handler.handle_dir('/path/to/dir')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
where = os.path.abspath('/path/to/dir').encode()
self.assertEqual(body,
b"<title>RST files in " + where + b"</title>\n"
b"a.txt - a.txt\n"
b"b/c.txt - b/c.txt")
def test_handle_list(self):
handler = MyRequestHandlerForTests()
handler.collect_files = lambda dir: ['a.txt', os.path.join('b', 'c.txt')]
handler.render_dir_listing = lambda title, files: \
unicode("<title>%s</title>\n%s" % (
title,
'\n'.join('%s - %s' % (path, fn) for path, fn in files)))
with patch('os.path.isdir', lambda fn: fn == 'subdir'):
body = handler.handle_list([os.path.normpath('/path/to/file.txt'),
'subdir'])
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(body,
b"<title>RST files</title>\n"
b"0/file.txt - #path#to#file.txt\n"
b"1/a.txt - subdir#a.txt\n"
b"1/b/c.txt - subdir#b#c.txt".replace(b"#", os.path.sep.encode()))
def doctest_MyRequestHandler_render_dir_listing():
"""Test for MyRequestHandler.render_dir_listing
>>> handler = MyRequestHandlerForTests()
>>> print(handler.render_dir_listing('Files in .', [
... ('1/README.rst', 'README.rst'),
... ('2/CHANGES.rst', 'CHANGES.rst'),
... ]))
<!DOCTYPE html>
<html>
<head>
<title>Files in .</title>
</head>
<body>
<h1>Files in .</h1>
<ul>
<li><a href="1/README.rst">README.rst</a></li>
<li><a href="2/CHANGES.rst">CHANGES.rst</a></li>
</ul>
</body>
</html>
<BLANKLINE>
"""
def doctest_RestViewer_rest_to_html():
"""Test for RestViewer.rest_to_html
>>> viewer = RestViewer('.')
>>> print(viewer.rest_to_html(b'''
... example
... -------
...
... This is a doctest:
...
... >>> 2 + 2
...
... This is a local file reference: README.rst
...
... This is a reference: `README.rst <http://example.com/README.rst>`_
...
... This is an email: <EMAIL>
...
... This is a literal block::
...
... See CHANGES.rst, mkay?
...
... This is an inline literal: ``README.txt``.
... ''', settings={'cloak_email_addresses': True}).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>example</title>
<style type="text/css">
...
/*
* Stylesheet overrides for ReSTview
*/
...
</style>
</head>
<body>
<div class="document" id="example">
<h1 class="title">example</h1>
<BLANKLINE>
<p>This is a doctest:</p>
<blockquote>
<pre class="doctest-block">
<span class="gp">>>> </span><span class="mi">2</span> <span class="o">+</span> <span class="mi">2</span>
<BLANKLINE>
</pre>
</blockquote>
<p>This is a local file reference: <a href="README.rst">README.rst</a></p>
<p>This is a reference: <a class="reference external" href="http://example.com/README.rst">README.rst</a></p>
<p>This is an email: <a class="reference external" href="mailto:marius%40gedmin.as">marius<span>@</span>gedmin<span>.</span>as</a></p>
<p>This is a literal block:</p>
<pre class="literal-block">
See <a href="CHANGES.rst">CHANGES.rst</a>, mkay?
</pre>
<p>This is an inline literal: <tt class="docutils literal"><a href="README.txt">README.txt</a></tt>.</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_css_url():
"""Test for RestViewer.rest_to_html
XXX: this shows pygments styles inlined *after* the external css, which
means it's hard to override them!
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = 'http://example.com/my.css'
>>> print(viewer.rest_to_html(b'''
... Some text
... ''').strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>...</title>
<link rel="stylesheet" href="http://example.com/my.css" type="text/css" />
<style type="text/css">
...
</style>
</head>
<body>
<div class="document">
<BLANKLINE>
<BLANKLINE>
<p>Some text</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_strict_and_error_handling():
"""Test for RestViewer.rest_to_html
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> stderr = stderr_patcher.start()
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.halt_level = 2
>>> print(viewer.rest_to_html(b'''
... Some text with an `error
... ''', mtime=1364808683).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>SystemMessage</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>SystemMessage</h1>
<pre class="error">
<string>:2: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
</pre>
<pre>
<BLANKLINE>
Some text with an `error
<BLANKLINE>
</pre>
<BLANKLINE>
<script type="text/javascript">
var mtime = '1364808683';
...
</script>
</body>
</html>
>>> stderr_patcher.stop()
"""
def doctest_RestViewer_rest_to_html_pypi_strict_and_error_handling():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> stderr = stderr_patcher.start()
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... Hello
... -----
...
... .. include:: /etc/passwd
...
... ''').strip().replace(""", '"'))
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>SystemMessage</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>SystemMessage</h1>
<pre class="error">
<string>:5: (WARNING/2) "include" directive disabled.
</pre>
<pre>
<BLANKLINE>
Hello
-----
<BLANKLINE>
.. include:: /etc/passwd
<BLANKLINE>
<BLANKLINE>
</pre>
</body>
</html>
>>> stderr_patcher.stop()
"""
def doctest_RestViewer_rest_to_html_pypi_strict():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... Hello
... -----
...
... `This is fine <http://www.example.com>`__.
...
... ''').strip().replace(""", '"'))
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>Hello</title>
<style type="text/css">
...
</head>
<body>
<div class="document" id="hello">
<h1 class="title">Hello</h1>
<BLANKLINE>
<p><a href="http://www.example.com" rel="nofollow">This is fine</a>.</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_pypi_strict_clean_failure():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... [http://localhost:3000](http://localhost:3000)
... ''').strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>ValueError</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>ValueError</h1>
<pre class="error">
Output cleaning failed
</pre>
<pre>
<BLANKLINE>
[http://localhost:3000](http://localhost:3000)
<BLANKLINE>
</pre>
</body>
</html>
"""
def doctest_RestViewer_inject_ajax():
"""Test for RestViewer.inject_ajax
>>> viewer = RestViewer('.')
>>> print(viewer.inject_ajax('''
... <html>
... <head>
... <title>Title</title>
... </head>
... <body>
... <p>Some body text</p>
... </body>
... </html>
... ''').strip())
<html>
<head>
<title>Title</title>
</head>
<body>
<p>Some body text</p>
</body>
</html>
"""
def doctest_RestViewer_inject_ajax_adds_ajax():
"""Test for RestViewer.inject_ajax
>>> viewer = RestViewer('.')
>>> print(viewer.inject_ajax('''
... <html>
... <head>
... <title>Title</title>
... </head>
... <body>
... <p>Some body text</p>
... </body>
... </html>
... ''', mtime=1364808683).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<html>
<head>
<title>Title</title>
</head>
<body>
<p>Some body text</p>
<BLANKLINE>
<script type="text/javascript">
var mtime = '1364808683';
...
</script>
</body>
</html>
"""
class TestRestViewer(unittest.TestCase):
def test_serve(self):
viewer = RestViewer('.')
viewer.server = Mock()
viewer.serve()
self.assertEqual(viewer.server.serve_forever.call_count, 1)
def test_rest_to_html_halt_level(self):
viewer = RestViewer('.')
viewer.halt_level = 2
html = viewer.rest_to_html(b'`Hello')
self.assertIn('<title>SystemMessage</title>', html)
def test_rest_to_html_report_level(self):
viewer = RestViewer('.')
viewer.report_level = 1
html = viewer.rest_to_html(b'.. _unused:\n\nEtc.')
self.assertIn('System Message: INFO/1', html)
def make_error(self, msg, source='file.rst', line=None,
level=docutils.utils.Reporter.ERROR_LEVEL):
sm = docutils.nodes.system_message(
msg, level=level, type=docutils.utils.Reporter.levels[level],
source=source, line=line)
return docutils.utils.SystemMessage(sm, level)
def test_docutils_exception_messages(self):
err = self.make_error('dangling participle', 'file.rst', 42)
# Let's make sure docutils hasn't changed their error format.
self.assertEqual(str(err), 'file.rst:42: (ERROR/3) dangling participle')
def test_docutils_exception_messages_no_source_path(self):
err = self.make_error('dangling participle', None, 42)
# Let's make sure docutils hasn't changed their error format.
self.assertEqual(str(err), 'None:42: (ERROR/3) dangling participle')
def test_extract_line_info(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', 'file.rst', 42)
self.assertEqual(eli(err, 'file.rst'), 42)
def test_extract_line_unknown_file(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', None, 42)
self.assertEqual(eli(err, None), 42)
def test_extract_line_info_wrong_file(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', 'file.rst', 42)
self.assertEqual(eli(err, 'unrelated.rst'), None)
def test_extract_line_info_other_kind_of_exception(self):
eli = RestViewer.extract_line_info
err = KeyboardInterrupt()
self.assertEqual(eli(err, 'file.rst'), None)
def test_highlight_line_no_line(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, None),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_beyond_eof(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, 42),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_before_bof(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, 0),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_first(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 1),
'<span class="highlight">line <1>\n</span>'
'line <2>\n'
'line <3>\n'
)
def test_highlight_line_middle(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 2),
'line <1>\n'
'<span class="highlight">line <2>\n</span>'
'line <3>\n'
)
def test_highlight_line_last(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 3),
'line <1>\n'
'line <2>\n'
'<span class="highlight">line <3>\n</span>'
)
class TestGlobals(unittest.TestCase):
def test_get_host_name(self):
with patch('socket.gethostname', lambda: 'myhostname.local'):
self.assertEqual(get_host_name(''), 'myhostname.local')
self.assertEqual(get_host_name('0.0.0.0'), 'myhostname.local')
self.assertEqual(get_host_name('localhost'), 'localhost')
def test_launch_browser(self):
with patch('threading.Thread') as Thread:
launch_browser('http://example.com')
Thread.assert_called_once_with(target=webbrowser.open,
args=('http://example.com',))
Thread.return_value.setDaemon.assert_called_once_with(True)
self.assertEqual(Thread.return_value.start.call_count, 1)
class TestMain(unittest.TestCase):
def _serve(self):
self._serve_called = True
raise KeyboardInterrupt()
def run_main(self, *args, **kw):
expected_exit_code = kw.pop('rc', 0)
serve_called = kw.pop('serve_called', False)
browser_launched = kw.pop('browser_launched', False)
if kw: # pragma: nocover
raise TypeError("unexpected keyword arguments: %s"
% ", ".join(sorted(kw)))
self._serve_called = False
with patch('sys.argv', ['restview'] + list(args)):
with patch('sys.stdout', StringIO()) as stdout:
with patch('sys.stderr', StringIO()) as stderr:
with patch('restview.restviewhttp.launch_browser') as launch_browser:
with patch.object(RestViewer, 'serve', self._serve):
try:
main()
except SystemExit as e:
self.assertEqual(e.args[0], expected_exit_code)
else: # pragma: nocover
if not serve_called:
self.fail("main() did not raise SystemExit")
if serve_called:
self.assertTrue(self._serve_called)
if browser_launched:
self.assertEqual(launch_browser.call_count, 1)
return stdout.getvalue(), stderr.getvalue()
def test_help(self):
stdout, stderr = self.run_main('--help')
self.assertTrue('restview [options] root' in stdout, stdout)
def test_error_when_no_arguments(self):
stdout, stderr = self.run_main(rc=2)
self.assertEqual(stderr.splitlines()[-1],
'restview: error: at least one argument expected')
def test_error_when_both_command_and_file_specified(self):
stdout, stderr = self.run_main('-e', 'cat README.rst', 'CHANGES.rst',
rc=2)
self.assertEqual(
stderr.splitlines()[-1],
'restview: error: specify a command (-e) or a file/directory, but not both',
stderr)
def test_all_is_well(self):
self.run_main('.', serve_called=True, browser_launched=True)
def test_multiple_files(self):
self.run_main('README.rst', 'CHANGES.rst', serve_called=True,
browser_launched=True)
def test_command(self):
self.run_main('--long-description',
serve_called=True, browser_launched=True)
def test_specify_listen_address(self):
with patch.object(RestViewer, 'listen'):
with patch.object(RestViewer, 'close'):
self.run_main('-l', '0.0.0.0:8080', '.',
serve_called=True, browser_launched=False)
def test_specify_invalid_listen_address(self):
stdout, stderr = self.run_main('-l', 'nonsense', '.', rc=2)
self.assertEqual(stderr.splitlines()[-1],
'restview: error: Invalid address: nonsense')
def test_specify_allowed_hosts(self):
with patch.object(RestViewer, 'listen'):
with patch.object(RestViewer, 'close'):
self.run_main('--allowed-hosts', 'localhost,example.com', '.',
serve_called=True, browser_launched=False)
def test_custom_css_url(self):
self.run_main('.', '--css', 'http://example.com/my.css',
serve_called=True, browser_launched=True)
def test_custom_css_file(self):
self.run_main('.', '--css', 'my.css',
serve_called=True, browser_launched=True)
def test_suite():
return unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite(optionflags=doctest.ELLIPSIS | doctest.REPORT_NDIFF),
doctest.DocTestSuite('restview.restviewhttp'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') | Libraries/Python/restview/v2.9.1/restview/tests.py | import doctest
import errno
import os
import socket
import unittest
import webbrowser
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from mock import Mock, patch
import docutils.utils
from restview.restviewhttp import (MyRequestHandler, RestViewer,
get_host_name, launch_browser, main)
try:
unicode
except NameError:
unicode = str
class PopenStub(object):
def __init__(self, stdout='', stderr='', retcode=0):
self._stdout = stdout
self._stderr = stderr
self.returncode = retcode
def __call__(self, *args, **kw):
return self
def communicate(self, stdin=''):
self._stdin = stdin
return (self._stdout, self._stderr)
class MyRequestHandlerForTests(MyRequestHandler):
def __init__(self):
self.headers = {'Host': 'localhost'} # request headers
self._headers = {} # response headers
self.log = []
self.server = Mock()
self.server.renderer.command = None
self.server.renderer.watch = None
self.server.renderer.allowed_hosts = ['localhost']
self.server.renderer.rest_to_html = lambda data, mtime=None, filename=None: \
unicode('HTML for %s with AJAX poller for %s' % (data, mtime))
self.server.renderer.render_exception = lambda title, error, source, mtime=None: \
unicode('HTML for error %s: %s: %s' % (title, error, source))
def send_response(self, status):
self.status = status
def send_header(self, header, value):
self._headers[header] = value
def end_headers(self):
self.headers = self._headers
def send_error(self, status, body):
self.status = status
self.error_body = body
def log_error(self, message, *args):
if args:
message = message % args
self.log.append(message)
class TestMyRequestHandler(unittest.TestCase):
def _os_walk(self, dirpath):
dirnames = ['.svn', '.tox', 'subdir', 'mypackage.egg-info']
filenames = ['a.txt', 'z.rst', 'unrelated.py']
yield dirpath, dirnames, filenames
for subdir in dirnames:
yield os.path.join(dirpath, subdir), [], ['b.txt', 'c.py']
def _raise_oserror(self, *args, **kw):
raise OSError(errno.ENOENT, "no such file or directory")
def _raise_socket_error(self, *args):
raise socket.error("connection reset by peer")
def setUp(self):
self.root = os.path.normpath('/root')
self.root2 = os.path.normpath('/root2')
def filepath(self, *names):
return os.path.join(self.root, *names)
def filepath2(self, *names):
return os.path.join(self.root2, *names)
def test_do_GET(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.txt'
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
handler.wfile = StringIO()
handler.do_GET()
self.assertEqual(handler.wfile.getvalue(),
'HTML for %s' % self.filepath('a.txt'))
def test_do_HEAD(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.txt'
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
handler.wfile = StringIO()
handler.do_HEAD()
self.assertEqual(handler.wfile.getvalue(), '')
def test_do_GET_or_HEAD_root_when_file(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = self.filepath('file.txt')
handler.server.renderer.command = None
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
with patch('os.path.isdir', lambda dir: False):
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'HTML for %s' % self.filepath('file.txt'))
def test_do_GET_or_HEAD_root_when_dir(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = self.root
handler.server.renderer.command = None
handler.handle_dir = lambda fn: 'Files in %s' % fn
with patch('os.path.isdir', lambda dir: True):
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Files in %s' % self.root)
def test_do_GET_or_HEAD_root_when_list(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = [self.root, self.root2]
handler.server.renderer.command = None
handler.handle_list = lambda roots: 'Files in %s' % ", ".join(roots)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Files in %s, %s' % (self.root, self.root2))
def test_do_GET_or_HEAD_root_when_command(self):
handler = MyRequestHandlerForTests()
handler.path = '/'
handler.server.renderer.root = None
handler.server.renderer.command = 'cat README.rst'
handler.handle_command = lambda cmd, watch: 'Output of %s' % cmd
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Output of cat README.rst')
def test_do_GET_or_HEAD_polling(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=a.txt&mtime=12345'
handler.server.renderer.root = self.root
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
with patch('os.path.isdir', lambda dir: dir == self.root):
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s since 12345' % expected_fn)
def test_do_GET_or_HEAD_polling_of_root(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s since 12345' % expected_fn)
def test_do_GET_or_HEAD_polling_of_command_with_watch_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.command = 'python setup.py --long-description'
handler.server.renderer.watch = ['setup.py', 'README.rst']
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'Got update for setup.py,README.rst since 12345')
def test_do_GET_or_HEAD_polling_of_root_with_watch_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=/&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.server.renderer.watch = ['my.css']
handler.handle_polling = lambda fns, mt: 'Got update for %s since %s' % (','.join(fns), mt)
body = handler.do_GET_or_HEAD()
expected_fn = self.filepath('a.txt')
self.assertEqual(body, 'Got update for %s,my.css since 12345' % expected_fn)
def test_do_GET_or_HEAD_prevent_sandbox_climbing_attacks(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=../../../etc/passwd&mtime=12345'
handler.server.renderer.root = self.filepath('a.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 400)
self.assertEqual(handler.error_body, "Bad request")
def test_do_GET_or_HEAD_prevent_dns_rebinding(self):
handler = MyRequestHandlerForTests()
handler.headers['Host'] = 'mallory.example.com:80'
handler.path = '/'
handler.server.renderer.root = self.filepath('a.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 400)
self.assertEqual(handler.error_body, "Host header not in allowed list")
def test_do_GET_or_HEAD_images(self):
for filename, ctype in [('a.png', 'image/png'),
('a.gif', 'image/gif'),
('a.jpg', 'image/jpeg'),
('a.jpeg', 'image/jpeg'),
('a.svg', 'image/svg+xml'),
('favicon.ico', 'image/x-icon')]:
handler = MyRequestHandlerForTests()
handler.path = '/' + filename
handler.server.renderer.root = self.filepath('a.txt')
handler.server.renderer.favicon_path = self.filepath('favicon.ico')
handler.handle_image = lambda fn, ct: '%s (%s)' % (fn, ct)
body = handler.do_GET_or_HEAD()
self.assertEqual(body, '%s (%s)' % (self.filepath(filename), ctype))
def test_do_GET_or_HEAD_rst_files(self):
for filename in ['a.txt', 'a.rst']:
handler = MyRequestHandlerForTests()
handler.path = '/' + filename
handler.server.renderer.root = self.filepath('file.txt')
handler.handle_rest_file = lambda fn, watch=None: 'HTML for %s' % fn
body = handler.do_GET_or_HEAD()
self.assertEqual(body, 'HTML for %s' % self.filepath(filename))
def test_do_GET_or_HEAD_other_files(self):
handler = MyRequestHandlerForTests()
handler.path = '/a.py'
handler.server.renderer.root = self.filepath('file.txt')
handler.do_GET_or_HEAD()
self.assertEqual(handler.status, 501)
self.assertEqual(handler.error_body, "File type not supported: /a.py")
def test_handle_polling(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
with patch('time.sleep') as sleep:
stat = {filename: [Mock(st_mtime=123455), Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)):
handler.handle_polling([filename], 123455)
sleep.assert_called_once_with(0.2)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
def test_handle_polling_handles_interruptions(self):
handler = MyRequestHandlerForTests()
handler.path = '/polling?pathname=__init__.py&mtime=123455'
handler.send_response = self._raise_socket_error
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
stat = {filename: [Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)):
handler.handle_polling([filename], 123455)
self.assertEqual(
handler.log,
['connection reset by peer'
' (client closed "%s" before acknowledgement)' % handler.path])
def test_handle_polling_handles_disappearing_files(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
with patch('time.sleep'):
stat = {filename: [lambda: Mock(st_mtime=123455),
self._raise_oserror,
lambda: Mock(st_mtime=123456)]}
with patch('os.stat', lambda fn: stat[fn].pop(0)()):
handler.handle_polling([filename], 123455)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
def test_translate_path_when_root_is_a_file(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = self.filepath('file.txt')
handler.path = '/a.txt'
with patch('os.path.isdir', lambda dir: False):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/file.png'),
self.filepath('file.png'))
def test_translate_path_when_root_is_a_directory(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = self.root
handler.path = '/a.txt'
with patch('os.path.isdir', lambda dir: True):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/file.txt'),
self.filepath('file.txt'))
def test_translate_path_when_root_is_a_sequence(self):
handler = MyRequestHandlerForTests()
handler.server.renderer.root = [self.root, self.root2]
handler.path = '/0/a.txt'
with patch('os.path.isdir', lambda dir: '.' not in dir):
self.assertEqual(handler.translate_path(), self.filepath('a.txt'))
self.assertEqual(handler.translate_path('/1/b.txt'),
self.filepath2('b.txt'))
def test_handle_image(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
body = handler.handle_image(filename, 'image/python') # ha ha
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"image/python")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertTrue(isinstance(body, bytes))
def test_handle_image_error(self):
handler = MyRequestHandlerForTests()
handler.path = '/nosuchfile.png'
handler.handle_image('nosuchfile.png', 'image/png')
self.assertEqual(handler.status, 404)
self.assertEqual(handler.error_body,
"File not found: /nosuchfile.png")
def test_handle_rest_file(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
mtime = os.stat(filename).st_mtime
body = handler.handle_rest_file(filename)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertTrue(body.startswith(b'HTML for'))
self.assertTrue(body.endswith(('with AJAX poller for %s' % mtime).encode()))
def test_handle_rest_file_extra_watch(self):
handler = MyRequestHandlerForTests()
filename = os.path.join(os.path.dirname(__file__), '__init__.py')
mtime = os.stat(filename).st_mtime
with patch('os.stat', lambda fn: {filename: Mock(st_mtime=mtime),
'my.css': Mock(st_mtime=mtime + 1)}[fn]):
body = handler.handle_rest_file(filename, watch=['my.css'])
self.assertEqual(handler.status, 200)
self.assertTrue(body.endswith(('with AJAX poller for %s' % (mtime + 1)).encode()))
with patch('os.stat', lambda fn: {filename: Mock(st_mtime=mtime),
'my.css': Mock(st_mtime=mtime - 1)}[fn]):
body = handler.handle_rest_file(filename, watch=['my.css'])
self.assertEqual(handler.status, 200)
self.assertTrue(body.endswith(('with AJAX poller for %s' % mtime).encode()))
def test_handle_rest_file_error(self):
handler = MyRequestHandlerForTests()
handler.path = '/nosuchfile.txt'
handler.handle_rest_file('nosuchfile.txt')
self.assertEqual(handler.status, 404)
self.assertEqual(handler.error_body,
"File not found: /nosuchfile.txt")
self.assertEqual(handler.log,
["[Errno 2] No such file or directory: 'nosuchfile.txt'"])
def test_handle_command(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('data from cat README.rst')):
body = handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertEqual(body,
b'HTML for data from cat README.rst'
b' with AJAX poller for None')
def test_handle_command_returns_error(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('', 'cat: README.rst: no such file', 1)):
body = handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertFalse('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'cat: README.rst: no such file' in body,
body)
def test_handle_command_with_warnings(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('hello', 'warning: blah blah', 0)):
body = handler.handle_command('python setup.py --long-description')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertFalse('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'hello' in body, body)
self.assertTrue(b'blah blah' not in body, body)
def test_handle_command_returns_error_with_watch_files(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', PopenStub('', 'cat: README.rst: no such file', 1)):
body = handler.handle_command('cat README.rst', watch=['README.rst'])
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertTrue('X-Restview-Mtime' in handler.headers)
self.assertTrue(b'cat: README.rst: no such file' in body,
body)
def test_handle_command_error(self):
handler = MyRequestHandlerForTests()
with patch('subprocess.Popen', self._raise_oserror):
handler.handle_command('cat README.rst')
self.assertEqual(handler.status, 500)
self.assertEqual(handler.error_body,
'Command execution failed')
self.assertEqual(handler.log, ["[Errno 2] no such file or directory"])
def test_handle_rest_data(self):
handler = MyRequestHandlerForTests()
body = handler.handle_rest_data("*Hello*", mtime=1364808683)
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(handler.headers['Cache-Control'],
"no-cache, no-store, max-age=0")
self.assertEqual(body,
b'HTML for *Hello* with AJAX poller for 1364808683')
def test_collect_files(self):
handler = MyRequestHandlerForTests()
with patch('os.walk', self._os_walk):
files = handler.collect_files('/path/to/dir')
self.assertEqual(files,
['a.txt', os.path.join('subdir', 'b.txt'), 'z.rst'])
def test_handle_dir(self):
handler = MyRequestHandlerForTests()
handler.collect_files = lambda dir: ['a.txt', 'b/c.txt']
handler.render_dir_listing = lambda title, files: \
unicode("<title>%s</title>\n%s" % (
title,
'\n'.join('%s - %s' % (path, fn) for path, fn in files)))
body = handler.handle_dir('/path/to/dir')
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
where = os.path.abspath('/path/to/dir').encode()
self.assertEqual(body,
b"<title>RST files in " + where + b"</title>\n"
b"a.txt - a.txt\n"
b"b/c.txt - b/c.txt")
def test_handle_list(self):
handler = MyRequestHandlerForTests()
handler.collect_files = lambda dir: ['a.txt', os.path.join('b', 'c.txt')]
handler.render_dir_listing = lambda title, files: \
unicode("<title>%s</title>\n%s" % (
title,
'\n'.join('%s - %s' % (path, fn) for path, fn in files)))
with patch('os.path.isdir', lambda fn: fn == 'subdir'):
body = handler.handle_list([os.path.normpath('/path/to/file.txt'),
'subdir'])
self.assertEqual(handler.status, 200)
self.assertEqual(handler.headers['Content-Type'],
"text/html; charset=UTF-8")
self.assertEqual(handler.headers['Content-Length'],
str(len(body)))
self.assertEqual(body,
b"<title>RST files</title>\n"
b"0/file.txt - #path#to#file.txt\n"
b"1/a.txt - subdir#a.txt\n"
b"1/b/c.txt - subdir#b#c.txt".replace(b"#", os.path.sep.encode()))
def doctest_MyRequestHandler_render_dir_listing():
"""Test for MyRequestHandler.render_dir_listing
>>> handler = MyRequestHandlerForTests()
>>> print(handler.render_dir_listing('Files in .', [
... ('1/README.rst', 'README.rst'),
... ('2/CHANGES.rst', 'CHANGES.rst'),
... ]))
<!DOCTYPE html>
<html>
<head>
<title>Files in .</title>
</head>
<body>
<h1>Files in .</h1>
<ul>
<li><a href="1/README.rst">README.rst</a></li>
<li><a href="2/CHANGES.rst">CHANGES.rst</a></li>
</ul>
</body>
</html>
<BLANKLINE>
"""
def doctest_RestViewer_rest_to_html():
"""Test for RestViewer.rest_to_html
>>> viewer = RestViewer('.')
>>> print(viewer.rest_to_html(b'''
... example
... -------
...
... This is a doctest:
...
... >>> 2 + 2
...
... This is a local file reference: README.rst
...
... This is a reference: `README.rst <http://example.com/README.rst>`_
...
... This is an email: <EMAIL>
...
... This is a literal block::
...
... See CHANGES.rst, mkay?
...
... This is an inline literal: ``README.txt``.
... ''', settings={'cloak_email_addresses': True}).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>example</title>
<style type="text/css">
...
/*
* Stylesheet overrides for ReSTview
*/
...
</style>
</head>
<body>
<div class="document" id="example">
<h1 class="title">example</h1>
<BLANKLINE>
<p>This is a doctest:</p>
<blockquote>
<pre class="doctest-block">
<span class="gp">>>> </span><span class="mi">2</span> <span class="o">+</span> <span class="mi">2</span>
<BLANKLINE>
</pre>
</blockquote>
<p>This is a local file reference: <a href="README.rst">README.rst</a></p>
<p>This is a reference: <a class="reference external" href="http://example.com/README.rst">README.rst</a></p>
<p>This is an email: <a class="reference external" href="mailto:marius%40gedmin.as">marius<span>@</span>gedmin<span>.</span>as</a></p>
<p>This is a literal block:</p>
<pre class="literal-block">
See <a href="CHANGES.rst">CHANGES.rst</a>, mkay?
</pre>
<p>This is an inline literal: <tt class="docutils literal"><a href="README.txt">README.txt</a></tt>.</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_css_url():
"""Test for RestViewer.rest_to_html
XXX: this shows pygments styles inlined *after* the external css, which
means it's hard to override them!
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = 'http://example.com/my.css'
>>> print(viewer.rest_to_html(b'''
... Some text
... ''').strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>...</title>
<link rel="stylesheet" href="http://example.com/my.css" type="text/css" />
<style type="text/css">
...
</style>
</head>
<body>
<div class="document">
<BLANKLINE>
<BLANKLINE>
<p>Some text</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_strict_and_error_handling():
"""Test for RestViewer.rest_to_html
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> stderr = stderr_patcher.start()
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.halt_level = 2
>>> print(viewer.rest_to_html(b'''
... Some text with an `error
... ''', mtime=1364808683).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>SystemMessage</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>SystemMessage</h1>
<pre class="error">
<string>:2: (WARNING/2) Inline interpreted text or phrase reference start-string without end-string.
</pre>
<pre>
<BLANKLINE>
Some text with an `error
<BLANKLINE>
</pre>
<BLANKLINE>
<script type="text/javascript">
var mtime = '1364808683';
...
</script>
</body>
</html>
>>> stderr_patcher.stop()
"""
def doctest_RestViewer_rest_to_html_pypi_strict_and_error_handling():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> stderr = stderr_patcher.start()
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... Hello
... -----
...
... .. include:: /etc/passwd
...
... ''').strip().replace(""", '"'))
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>SystemMessage</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>SystemMessage</h1>
<pre class="error">
<string>:5: (WARNING/2) "include" directive disabled.
</pre>
<pre>
<BLANKLINE>
Hello
-----
<BLANKLINE>
.. include:: /etc/passwd
<BLANKLINE>
<BLANKLINE>
</pre>
</body>
</html>
>>> stderr_patcher.stop()
"""
def doctest_RestViewer_rest_to_html_pypi_strict():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> stderr_patcher = patch('sys.stderr', StringIO())
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... Hello
... -----
...
... `This is fine <http://www.example.com>`__.
...
... ''').strip().replace(""", '"'))
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<?xml version="1.0" encoding="utf-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
...
<title>Hello</title>
<style type="text/css">
...
</head>
<body>
<div class="document" id="hello">
<h1 class="title">Hello</h1>
<BLANKLINE>
<p><a href="http://www.example.com" rel="nofollow">This is fine</a>.</p>
</div>
</body>
</html>
"""
def doctest_RestViewer_rest_to_html_pypi_strict_clean_failure():
"""Test for RestViewer.rest_to_html in --pypi-strict mode
>>> viewer = RestViewer('.')
>>> viewer.stylesheets = None
>>> viewer.pypi_strict = True
>>> print(viewer.rest_to_html(b'''
... [http://localhost:3000](http://localhost:3000)
... ''').strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<!DOCTYPE html>
<html>
<head>
<title>ValueError</title>
<style type="text/css">
pre.error {
...
}
</style>
</head>
<body>
<h1>ValueError</h1>
<pre class="error">
Output cleaning failed
</pre>
<pre>
<BLANKLINE>
[http://localhost:3000](http://localhost:3000)
<BLANKLINE>
</pre>
</body>
</html>
"""
def doctest_RestViewer_inject_ajax():
"""Test for RestViewer.inject_ajax
>>> viewer = RestViewer('.')
>>> print(viewer.inject_ajax('''
... <html>
... <head>
... <title>Title</title>
... </head>
... <body>
... <p>Some body text</p>
... </body>
... </html>
... ''').strip())
<html>
<head>
<title>Title</title>
</head>
<body>
<p>Some body text</p>
</body>
</html>
"""
def doctest_RestViewer_inject_ajax_adds_ajax():
"""Test for RestViewer.inject_ajax
>>> viewer = RestViewer('.')
>>> print(viewer.inject_ajax('''
... <html>
... <head>
... <title>Title</title>
... </head>
... <body>
... <p>Some body text</p>
... </body>
... </html>
... ''', mtime=1364808683).strip())
... # doctest: +ELLIPSIS,+REPORT_NDIFF
<html>
<head>
<title>Title</title>
</head>
<body>
<p>Some body text</p>
<BLANKLINE>
<script type="text/javascript">
var mtime = '1364808683';
...
</script>
</body>
</html>
"""
class TestRestViewer(unittest.TestCase):
def test_serve(self):
viewer = RestViewer('.')
viewer.server = Mock()
viewer.serve()
self.assertEqual(viewer.server.serve_forever.call_count, 1)
def test_rest_to_html_halt_level(self):
viewer = RestViewer('.')
viewer.halt_level = 2
html = viewer.rest_to_html(b'`Hello')
self.assertIn('<title>SystemMessage</title>', html)
def test_rest_to_html_report_level(self):
viewer = RestViewer('.')
viewer.report_level = 1
html = viewer.rest_to_html(b'.. _unused:\n\nEtc.')
self.assertIn('System Message: INFO/1', html)
def make_error(self, msg, source='file.rst', line=None,
level=docutils.utils.Reporter.ERROR_LEVEL):
sm = docutils.nodes.system_message(
msg, level=level, type=docutils.utils.Reporter.levels[level],
source=source, line=line)
return docutils.utils.SystemMessage(sm, level)
def test_docutils_exception_messages(self):
err = self.make_error('dangling participle', 'file.rst', 42)
# Let's make sure docutils hasn't changed their error format.
self.assertEqual(str(err), 'file.rst:42: (ERROR/3) dangling participle')
def test_docutils_exception_messages_no_source_path(self):
err = self.make_error('dangling participle', None, 42)
# Let's make sure docutils hasn't changed their error format.
self.assertEqual(str(err), 'None:42: (ERROR/3) dangling participle')
def test_extract_line_info(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', 'file.rst', 42)
self.assertEqual(eli(err, 'file.rst'), 42)
def test_extract_line_unknown_file(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', None, 42)
self.assertEqual(eli(err, None), 42)
def test_extract_line_info_wrong_file(self):
eli = RestViewer.extract_line_info
err = self.make_error('dangling participle', 'file.rst', 42)
self.assertEqual(eli(err, 'unrelated.rst'), None)
def test_extract_line_info_other_kind_of_exception(self):
eli = RestViewer.extract_line_info
err = KeyboardInterrupt()
self.assertEqual(eli(err, 'file.rst'), None)
def test_highlight_line_no_line(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, None),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_beyond_eof(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, 42),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_before_bof(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(hl(source, 0),
'line <1>\nline <2>\nline <3>\n')
def test_highlight_line_first(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 1),
'<span class="highlight">line <1>\n</span>'
'line <2>\n'
'line <3>\n'
)
def test_highlight_line_middle(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 2),
'line <1>\n'
'<span class="highlight">line <2>\n</span>'
'line <3>\n'
)
def test_highlight_line_last(self):
hl = RestViewer.highlight_line
source = 'line <1>\nline <2>\nline <3>\n'
self.assertEqual(
hl(source, 3),
'line <1>\n'
'line <2>\n'
'<span class="highlight">line <3>\n</span>'
)
class TestGlobals(unittest.TestCase):
def test_get_host_name(self):
with patch('socket.gethostname', lambda: 'myhostname.local'):
self.assertEqual(get_host_name(''), 'myhostname.local')
self.assertEqual(get_host_name('0.0.0.0'), 'myhostname.local')
self.assertEqual(get_host_name('localhost'), 'localhost')
def test_launch_browser(self):
with patch('threading.Thread') as Thread:
launch_browser('http://example.com')
Thread.assert_called_once_with(target=webbrowser.open,
args=('http://example.com',))
Thread.return_value.setDaemon.assert_called_once_with(True)
self.assertEqual(Thread.return_value.start.call_count, 1)
class TestMain(unittest.TestCase):
def _serve(self):
self._serve_called = True
raise KeyboardInterrupt()
def run_main(self, *args, **kw):
expected_exit_code = kw.pop('rc', 0)
serve_called = kw.pop('serve_called', False)
browser_launched = kw.pop('browser_launched', False)
if kw: # pragma: nocover
raise TypeError("unexpected keyword arguments: %s"
% ", ".join(sorted(kw)))
self._serve_called = False
with patch('sys.argv', ['restview'] + list(args)):
with patch('sys.stdout', StringIO()) as stdout:
with patch('sys.stderr', StringIO()) as stderr:
with patch('restview.restviewhttp.launch_browser') as launch_browser:
with patch.object(RestViewer, 'serve', self._serve):
try:
main()
except SystemExit as e:
self.assertEqual(e.args[0], expected_exit_code)
else: # pragma: nocover
if not serve_called:
self.fail("main() did not raise SystemExit")
if serve_called:
self.assertTrue(self._serve_called)
if browser_launched:
self.assertEqual(launch_browser.call_count, 1)
return stdout.getvalue(), stderr.getvalue()
def test_help(self):
stdout, stderr = self.run_main('--help')
self.assertTrue('restview [options] root' in stdout, stdout)
def test_error_when_no_arguments(self):
stdout, stderr = self.run_main(rc=2)
self.assertEqual(stderr.splitlines()[-1],
'restview: error: at least one argument expected')
def test_error_when_both_command_and_file_specified(self):
stdout, stderr = self.run_main('-e', 'cat README.rst', 'CHANGES.rst',
rc=2)
self.assertEqual(
stderr.splitlines()[-1],
'restview: error: specify a command (-e) or a file/directory, but not both',
stderr)
def test_all_is_well(self):
self.run_main('.', serve_called=True, browser_launched=True)
def test_multiple_files(self):
self.run_main('README.rst', 'CHANGES.rst', serve_called=True,
browser_launched=True)
def test_command(self):
self.run_main('--long-description',
serve_called=True, browser_launched=True)
def test_specify_listen_address(self):
with patch.object(RestViewer, 'listen'):
with patch.object(RestViewer, 'close'):
self.run_main('-l', '0.0.0.0:8080', '.',
serve_called=True, browser_launched=False)
def test_specify_invalid_listen_address(self):
stdout, stderr = self.run_main('-l', 'nonsense', '.', rc=2)
self.assertEqual(stderr.splitlines()[-1],
'restview: error: Invalid address: nonsense')
def test_specify_allowed_hosts(self):
with patch.object(RestViewer, 'listen'):
with patch.object(RestViewer, 'close'):
self.run_main('--allowed-hosts', 'localhost,example.com', '.',
serve_called=True, browser_launched=False)
def test_custom_css_url(self):
self.run_main('.', '--css', 'http://example.com/my.css',
serve_called=True, browser_launched=True)
def test_custom_css_file(self):
self.run_main('.', '--css', 'my.css',
serve_called=True, browser_launched=True)
def test_suite():
return unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite(optionflags=doctest.ELLIPSIS | doctest.REPORT_NDIFF),
doctest.DocTestSuite('restview.restviewhttp'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') | 0.394201 | 0.075176 |
from unittest import TestCase
from unittest.mock import patch, MagicMock, PropertyMock
from bson import ObjectId
from django_mock_queries.query import MockSet
from mlplaygrounds.datasets.tests.mocks.managers import MockDatasetManager
from ..models import User, CustomUserManager
class TestUserModel(TestCase):
def setUp(self):
mock_manager = MockDatasetManager()
mock_manager.collection.insert_many([
{'_id': 1, 'user_id': 'john', 'name': 'jdata', 'data': {}},
{'_id': 2, 'user_id': 'test', 'name': 'test', 'data': {}}
])
self.manager_patcher = patch(
'mlplaygrounds.users.models.Dataset.objects',
new_callable=PropertyMock(return_value=mock_manager)
)
self.manager_patcher.start()
def test_datasets(self):
expected_dataset = {'uid': 2, 'name': 'test'}
user = User()
user.username = 'test'
user_datasets = [
{'uid': dataset.uid, 'name': dataset.name}
for dataset in user.datasets
]
self.assertEqual(user_datasets, [expected_dataset])
def tearDown(self):
self.manager_patcher.stop()
class TestUserManager(TestCase):
def setUp(self):
self.save_patcher = patch(
'mlplaygrounds.users.models.User.save',
return_value=MagicMock()
)
self.save_patcher.start()
def tearDown(self):
self.save_patcher.stop()
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_user('usr', 'pass', 'email', 'name', 'sname')
self.assertEqual(user.username, 'usr')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_user('usr', 'pass', 'email', 'name', 'sname')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
self.assertEqual(user.is_superuser, True)
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = True
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('mary', '<EMAIL>')
self.assertEqual(in_use, True)
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_not_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = False
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('patrick', '<EMAIL>')
self.assertEqual(in_use, False) | mlplaygrounds/users/tests/test_user_model.py | from unittest import TestCase
from unittest.mock import patch, MagicMock, PropertyMock
from bson import ObjectId
from django_mock_queries.query import MockSet
from mlplaygrounds.datasets.tests.mocks.managers import MockDatasetManager
from ..models import User, CustomUserManager
class TestUserModel(TestCase):
def setUp(self):
mock_manager = MockDatasetManager()
mock_manager.collection.insert_many([
{'_id': 1, 'user_id': 'john', 'name': 'jdata', 'data': {}},
{'_id': 2, 'user_id': 'test', 'name': 'test', 'data': {}}
])
self.manager_patcher = patch(
'mlplaygrounds.users.models.Dataset.objects',
new_callable=PropertyMock(return_value=mock_manager)
)
self.manager_patcher.start()
def test_datasets(self):
expected_dataset = {'uid': 2, 'name': 'test'}
user = User()
user.username = 'test'
user_datasets = [
{'uid': dataset.uid, 'name': dataset.name}
for dataset in user.datasets
]
self.assertEqual(user_datasets, [expected_dataset])
def tearDown(self):
self.manager_patcher.stop()
class TestUserManager(TestCase):
def setUp(self):
self.save_patcher = patch(
'mlplaygrounds.users.models.User.save',
return_value=MagicMock()
)
self.save_patcher.start()
def tearDown(self):
self.save_patcher.stop()
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_user('usr', 'pass', 'email', 'name', 'sname')
self.assertEqual(user.username, 'usr')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_user(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_user('usr', 'pass', 'email', 'name', 'sname')
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = False
manager = CustomUserManager()
user = manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
self.assertEqual(user.is_superuser, True)
@patch('mlplaygrounds.users.models.CustomUserManager._credentials_already_in_use')
def test_create_invalid_superuser(self, mock_credentials_in_use):
mock_credentials_in_use.return_value = True
manager = CustomUserManager()
with self.assertRaises(ValueError):
manager.create_superuser('susr', 'pass', 'semail', 'name', 'sname')
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = True
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('mary', '<EMAIL>')
self.assertEqual(in_use, True)
@patch('mlplaygrounds.users.models.User.objects')
def test_check_credentials_not_in_use(self, mock_objects):
mock_objects.filter.return_value = mock_objects
mock_objects.exists.return_value = False
manager = CustomUserManager()
in_use = manager._credentials_already_in_use('patrick', '<EMAIL>')
self.assertEqual(in_use, False) | 0.597256 | 0.333666 |
from app import app
import datetime
import urllib.request,json
from .models import news,sources,entertainment
News = news.News
Sources = sources.Sources
Entertainment = entertainment.Entertainment
# Getting api key
api_key = app.config['NEWS_API_KEY']
# Getting the news base url
news_base_url = app.config["NEWS_HIGHLIGHTS_BASE_URL"]
# Getting the sources base url
sources_base_url = app.config["NEWS_SOURCE_BASE_URL"]
# Getting the entertainment news base url
entertainment_base_url = app.config["NEWS_ENTERTAINMENT_BASE_URL"]
def get_news(country):
'''
Function that gets the json response to our url request
'''
get_news_url = news_base_url.format(country,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_result_list = get_news_response['articles']
news_results = process_newsResults(news_result_list)
return news_results
def process_newsResults(news_list):
'''
Function that processes the news results and transforms them to a list of objects
'''
news_results = []
for news_item in news_list:
title = news_item.get('title')
description = news_item.get('description')
publishedAt = news_item.get('publishedAt')
content = news_item.get('content')
url = news_item.get('url')
img_url = news_item.get('urlToImage')
date_time_obj = datetime.datetime.strptime(publishedAt, '%Y-%m-%dT%H:%M:%SZ')
publishedAt = date_time_obj.date()
if img_url:
news_object = News(title,description,publishedAt,content,url,img_url)
news_results.append(news_object)
return news_results
def get_sources():
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_result_list = get_sources_response['sources']
sources_results = process_sourcesResults(sources_result_list)
return sources_results
def process_sourcesResults(sources_list):
'''
Function that processes the sources results and transforms them to a list of objects
'''
sources_results = []
for source_item in sources_list:
name = source_item.get('name')
description = source_item.get('description')
source_url = source_item.get('url')
source_object = Sources(name,description,source_url)
sources_results.append(source_object)
return sources_results
def getEntertainmentNews(category):
'''
Function that processes the entertainment news results and transforms them to a list of objects
'''
get_entertainmentNews_url = entertainment_base_url.format(category,api_key)
with urllib.request.urlopen(get_entertainmentNews_url) as url:
get_entertainmentNews_data = url.read()
get_entertainmentNews_response = json.loads(get_entertainmentNews_data)
entertainmentNews_results = None
if get_entertainmentNews_response['articles']:
entertainmentNews_result_list = get_entertainmentNews_response['articles']
entertainmentNews_results = process_EntertainmentNews_Results(entertainmentNews_result_list)
return entertainmentNews_results
def process_EntertainmentNews_Results(entertainmentNews_result_list):
'''
Function that processes the sources results and transforms them to a list of objects
'''
idNumber = 1
entertainmentNews_results = []
for entertainment_item in entertainmentNews_result_list:
name = entertainment_item.get('name')
description = entertainment_item.get('description')
publishedAt = entertainment_item.get('publishedAt')
source_url = entertainment_item.get('url')
date_time_obj = datetime.datetime.strptime(publishedAt, '%Y-%m-%dT%H:%M:%SZ')
publishedAt = date_time_obj.date()
entertainmentNews_object = Entertainment(idNumber,name,description,publishedAt,source_url)
entertainmentNews_results.append(entertainmentNews_object)
idNumber=idNumber+1
return entertainmentNews_results | app/request.py | from app import app
import datetime
import urllib.request,json
from .models import news,sources,entertainment
News = news.News
Sources = sources.Sources
Entertainment = entertainment.Entertainment
# Getting api key
api_key = app.config['NEWS_API_KEY']
# Getting the news base url
news_base_url = app.config["NEWS_HIGHLIGHTS_BASE_URL"]
# Getting the sources base url
sources_base_url = app.config["NEWS_SOURCE_BASE_URL"]
# Getting the entertainment news base url
entertainment_base_url = app.config["NEWS_ENTERTAINMENT_BASE_URL"]
def get_news(country):
'''
Function that gets the json response to our url request
'''
get_news_url = news_base_url.format(country,api_key)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_result_list = get_news_response['articles']
news_results = process_newsResults(news_result_list)
return news_results
def process_newsResults(news_list):
'''
Function that processes the news results and transforms them to a list of objects
'''
news_results = []
for news_item in news_list:
title = news_item.get('title')
description = news_item.get('description')
publishedAt = news_item.get('publishedAt')
content = news_item.get('content')
url = news_item.get('url')
img_url = news_item.get('urlToImage')
date_time_obj = datetime.datetime.strptime(publishedAt, '%Y-%m-%dT%H:%M:%SZ')
publishedAt = date_time_obj.date()
if img_url:
news_object = News(title,description,publishedAt,content,url,img_url)
news_results.append(news_object)
return news_results
def get_sources():
'''
Function that gets the json response to our url request
'''
get_sources_url = sources_base_url.format(api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
sources_results = None
if get_sources_response['sources']:
sources_result_list = get_sources_response['sources']
sources_results = process_sourcesResults(sources_result_list)
return sources_results
def process_sourcesResults(sources_list):
'''
Function that processes the sources results and transforms them to a list of objects
'''
sources_results = []
for source_item in sources_list:
name = source_item.get('name')
description = source_item.get('description')
source_url = source_item.get('url')
source_object = Sources(name,description,source_url)
sources_results.append(source_object)
return sources_results
def getEntertainmentNews(category):
'''
Function that processes the entertainment news results and transforms them to a list of objects
'''
get_entertainmentNews_url = entertainment_base_url.format(category,api_key)
with urllib.request.urlopen(get_entertainmentNews_url) as url:
get_entertainmentNews_data = url.read()
get_entertainmentNews_response = json.loads(get_entertainmentNews_data)
entertainmentNews_results = None
if get_entertainmentNews_response['articles']:
entertainmentNews_result_list = get_entertainmentNews_response['articles']
entertainmentNews_results = process_EntertainmentNews_Results(entertainmentNews_result_list)
return entertainmentNews_results
def process_EntertainmentNews_Results(entertainmentNews_result_list):
'''
Function that processes the sources results and transforms them to a list of objects
'''
idNumber = 1
entertainmentNews_results = []
for entertainment_item in entertainmentNews_result_list:
name = entertainment_item.get('name')
description = entertainment_item.get('description')
publishedAt = entertainment_item.get('publishedAt')
source_url = entertainment_item.get('url')
date_time_obj = datetime.datetime.strptime(publishedAt, '%Y-%m-%dT%H:%M:%SZ')
publishedAt = date_time_obj.date()
entertainmentNews_object = Entertainment(idNumber,name,description,publishedAt,source_url)
entertainmentNews_results.append(entertainmentNews_object)
idNumber=idNumber+1
return entertainmentNews_results | 0.220007 | 0.048631 |
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
plt.style.use(['science', 'no-latex'])
df = pd.read_excel('data/pf_result_new_with_benchmark.xlsx')
df.columns = ['timestamp', 'Portfolio Monthly Return', 'Benchmark Monthly Return', 'Portfolio Total Return (0 -> t)',
'Benchmark Total Return (0 -> t)', 'Difference Monthly Return']
df = df.set_index('timestamp')
df = df.iloc[:, 2:]
# add actual first row for at inception [1,1,0]; 1s are the index at 100% so no return at inception (obviously)
# 0 is the not existing difference in returns between the portfolio and the returns
# 30.06.2012
first_date = datetime.datetime(2012, 6, 30)
df.loc[first_date] = [1, 1, 0]
df = df.sort_index()
# Graph drawing begins here
t = df.index
x = df['Portfolio Total Return (0 -> t)'].values
y = df['Benchmark Total Return (0 -> t)'].values
z = df['Difference Monthly Return'].values
mpl.rcParams['font.serif'] = 'Times New Roman'
fig, axs = plt.subplots(2, 1, figsize=(10, 10), dpi=300, gridspec_kw={'height_ratios': [5, 2]})
fig.suptitle('Performance Overview', fontsize=16)
ax1 = axs[0]
ax2 = axs[1]
ax1.sub1(t, x, label='Portfolio')
ax1.sub1(t, y, label='Benchmark')
# plt.axhline(linewidth=2, color='black', y=1)
ax2.bar(t, z, label='\u0394 Return', width=15)
ax1.legend()
ax2.legend()
ax1.axhline(linewidth=0.5, color='black', y=1)
ax2.axhline(linewidth=0.5, color='black', y=0)
ax1.get_shared_x_axes().join(ax1, ax2)
ax1.set_xticklabels([])
# ax2.autoscale() ## call autoscale if needed
# reduce white space on left or right side; lib/python3.7/site-packages/matplotlib/figure.py -> class SubplotParams
# plt.subplots_adjust(left=0.1, right=0.9)
# !HINT! : Layout settings here
ax1.grid(linestyle='--', linewidth=0.3)
# todo uncomment to save to respective file_types
# plt.savefig('results/graphs/performance_overview.eps')
# plt.savefig('results/graphs/performance_overview.png')
# plt.savefig('results/graphs/performance_overview.tiff')
plt.savefig('results/graphs/performance_overview.pdf')
plt.show() | draw_graph_performance.py | import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import datetime
plt.style.use(['science', 'no-latex'])
df = pd.read_excel('data/pf_result_new_with_benchmark.xlsx')
df.columns = ['timestamp', 'Portfolio Monthly Return', 'Benchmark Monthly Return', 'Portfolio Total Return (0 -> t)',
'Benchmark Total Return (0 -> t)', 'Difference Monthly Return']
df = df.set_index('timestamp')
df = df.iloc[:, 2:]
# add actual first row for at inception [1,1,0]; 1s are the index at 100% so no return at inception (obviously)
# 0 is the not existing difference in returns between the portfolio and the returns
# 30.06.2012
first_date = datetime.datetime(2012, 6, 30)
df.loc[first_date] = [1, 1, 0]
df = df.sort_index()
# Graph drawing begins here
t = df.index
x = df['Portfolio Total Return (0 -> t)'].values
y = df['Benchmark Total Return (0 -> t)'].values
z = df['Difference Monthly Return'].values
mpl.rcParams['font.serif'] = 'Times New Roman'
fig, axs = plt.subplots(2, 1, figsize=(10, 10), dpi=300, gridspec_kw={'height_ratios': [5, 2]})
fig.suptitle('Performance Overview', fontsize=16)
ax1 = axs[0]
ax2 = axs[1]
ax1.sub1(t, x, label='Portfolio')
ax1.sub1(t, y, label='Benchmark')
# plt.axhline(linewidth=2, color='black', y=1)
ax2.bar(t, z, label='\u0394 Return', width=15)
ax1.legend()
ax2.legend()
ax1.axhline(linewidth=0.5, color='black', y=1)
ax2.axhline(linewidth=0.5, color='black', y=0)
ax1.get_shared_x_axes().join(ax1, ax2)
ax1.set_xticklabels([])
# ax2.autoscale() ## call autoscale if needed
# reduce white space on left or right side; lib/python3.7/site-packages/matplotlib/figure.py -> class SubplotParams
# plt.subplots_adjust(left=0.1, right=0.9)
# !HINT! : Layout settings here
ax1.grid(linestyle='--', linewidth=0.3)
# todo uncomment to save to respective file_types
# plt.savefig('results/graphs/performance_overview.eps')
# plt.savefig('results/graphs/performance_overview.png')
# plt.savefig('results/graphs/performance_overview.tiff')
plt.savefig('results/graphs/performance_overview.pdf')
plt.show() | 0.490724 | 0.550366 |
from collections import deque
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
def solution(land, height):
labels = [[0 for _ in range(len(land))] for _ in range(len(land))]
label_number = 1
for y in range(len(land)):
for x in range(len(land)):
if labels[y][x] == 0:
bfs(y, x, label_number, labels, land, height)
label_number += 1
edges = []
for y in range(len(land)):
for x in range(len(land)):
for i in range(4):
ny = y + dy[i]
nx = x + dx[i]
if ny < 0 or ny >= len(land) or nx < 0 or nx >= len(land):
continue
if labels[y][x] != labels[ny][nx]:
edges.append((labels[y][x], labels[ny][nx], abs(land[ny][nx] - land[y][x])))
return kruskal(edges, label_number - 1)
def kruskal(edges, nodes):
edge = 0
s = 0
parents = [i for i in range(nodes + 1)]
edges.sort(key=lambda x: x[2])
for a, b, c in edges:
if get_parent(parents, a) != get_parent(parents, b):
union_parent(parents, a, b)
s += c
edge += 1
if edge == nodes - 1:
break
return s
def get_parent(parents, a):
if parents[a] == a:
return a
parents[a] = get_parent(parents, parents[a])
return parents[a]
def union_parent(parents, a, b):
a = get_parent(parents, a)
b = get_parent(parents, b)
if a < b:
parents[b] = a
else:
parents[a] = b
def bfs(sy, sx, k, labels, land, height):
que = deque()
labels[sy][sx] = k
que.append((sy, sx))
while que:
y, x = que.popleft()
for i in range(4):
ny = y + dy[i]
nx = x + dx[i]
if ny < 0 or ny >= len(land) or nx < 0 or nx >= len(land):
continue
if abs(land[ny][nx] - land[y][x]) > height:
continue
if labels[ny][nx] == 0:
labels[ny][nx] = k
que.append((ny, nx))
if __name__ == '__main__':
land = [[10, 11, 10, 11], [2, 21, 20, 10], [1, 20, 21, 11], [2, 1, 2, 1]]
height = 1
print(solution(land, height)) | programmers/lv4_review/p2_1.py | from collections import deque
dy = [-1, 1, 0, 0]
dx = [0, 0, -1, 1]
def solution(land, height):
labels = [[0 for _ in range(len(land))] for _ in range(len(land))]
label_number = 1
for y in range(len(land)):
for x in range(len(land)):
if labels[y][x] == 0:
bfs(y, x, label_number, labels, land, height)
label_number += 1
edges = []
for y in range(len(land)):
for x in range(len(land)):
for i in range(4):
ny = y + dy[i]
nx = x + dx[i]
if ny < 0 or ny >= len(land) or nx < 0 or nx >= len(land):
continue
if labels[y][x] != labels[ny][nx]:
edges.append((labels[y][x], labels[ny][nx], abs(land[ny][nx] - land[y][x])))
return kruskal(edges, label_number - 1)
def kruskal(edges, nodes):
edge = 0
s = 0
parents = [i for i in range(nodes + 1)]
edges.sort(key=lambda x: x[2])
for a, b, c in edges:
if get_parent(parents, a) != get_parent(parents, b):
union_parent(parents, a, b)
s += c
edge += 1
if edge == nodes - 1:
break
return s
def get_parent(parents, a):
if parents[a] == a:
return a
parents[a] = get_parent(parents, parents[a])
return parents[a]
def union_parent(parents, a, b):
a = get_parent(parents, a)
b = get_parent(parents, b)
if a < b:
parents[b] = a
else:
parents[a] = b
def bfs(sy, sx, k, labels, land, height):
que = deque()
labels[sy][sx] = k
que.append((sy, sx))
while que:
y, x = que.popleft()
for i in range(4):
ny = y + dy[i]
nx = x + dx[i]
if ny < 0 or ny >= len(land) or nx < 0 or nx >= len(land):
continue
if abs(land[ny][nx] - land[y][x]) > height:
continue
if labels[ny][nx] == 0:
labels[ny][nx] = k
que.append((ny, nx))
if __name__ == '__main__':
land = [[10, 11, 10, 11], [2, 21, 20, 10], [1, 20, 21, 11], [2, 1, 2, 1]]
height = 1
print(solution(land, height)) | 0.331336 | 0.498413 |
import numpy as np
import pandas as pd
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from database.base import Session, engine, Base
from database.cip_history import CipCode
class CipFile:
""" Class for a particular year's CIP assignments """
def __init__(self, year: int):
""" class constructor """
self.year = year
self.rows = list()
if self.year < 2020:
df = pd.read_excel(
io = 'data/Legacy_CIP.xlsx',
sheet_name = f'CIP{self.year}',
index_col = None,
dtype = object)
else:
df = pd.read_csv(
f'https://nces.ed.gov/ipeds/cipcode/Files/CIPCode{year}.csv',
dtype = object,
index_col = None,
low_memory = False,
encoding = "iso-8859-1")
df.columns = df.columns.str.strip().str.lower()
df = df.rename(columns = {
'cip85': 'cipcode',
'cip90': 'cipcode',
'ciptitle': 'title',
'cipcode2k': 'cipcode',
'ciptext2k': 'title',
'cipfamily': 'family',
})
if self.year == 1985:
df['description'] = 'None'
cols = ['cipcode', 'family', 'title']
df = df[cols]
df = df.applymap(self.clean_string)
for row in df.itertuples(index=False):
self.rows.append(
CipCode(
cipcode = row.cipcode,
version = self.year,
title = row.title,
family = row.family,
))
def row_count(self):
return(len(self.rows))
@property
def year(self):
""" return year value """
return self.__year
@year.setter
def year(self, year: int):
""" set year value """
self.__year = year
@staticmethod
def clean_string(text: str):
""" strips Excel formula text and trailing periods """
text = str(text).strip()
if text[:1] == '=':
text = text[2:-1]
if str(text).endswith('.'):
text = text[:-1]
return(text)
def write(self):
session = Session()
if len(self.rows) > 0:
try:
_ = session.query(self.rows[0].__class__).filter(self.rows[0].__class__.version==self.year).delete(synchronize_session=False)
session.bulk_save_objects(self.rows)
session.commit()
except Exception as e:
print(f'An error occurred:\n{str(e)}.')
session.rollback()
print('No changes were made to the database due to error.')
else:
print('Rows successfully written to database.')
else:
print('No rows were available to insert.')
session.close()
def __repr__(self):
""" class repr method """
return f'{self.__class__.__name__}(year={self.__year!r})'
if __name__ == '__main__':
adm = CipFile(2020)
print(adm.year)
print(adm.url)
print(adm.rows[0]) | netfile/cip_file.py |
import numpy as np
import pandas as pd
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
from database.base import Session, engine, Base
from database.cip_history import CipCode
class CipFile:
""" Class for a particular year's CIP assignments """
def __init__(self, year: int):
""" class constructor """
self.year = year
self.rows = list()
if self.year < 2020:
df = pd.read_excel(
io = 'data/Legacy_CIP.xlsx',
sheet_name = f'CIP{self.year}',
index_col = None,
dtype = object)
else:
df = pd.read_csv(
f'https://nces.ed.gov/ipeds/cipcode/Files/CIPCode{year}.csv',
dtype = object,
index_col = None,
low_memory = False,
encoding = "iso-8859-1")
df.columns = df.columns.str.strip().str.lower()
df = df.rename(columns = {
'cip85': 'cipcode',
'cip90': 'cipcode',
'ciptitle': 'title',
'cipcode2k': 'cipcode',
'ciptext2k': 'title',
'cipfamily': 'family',
})
if self.year == 1985:
df['description'] = 'None'
cols = ['cipcode', 'family', 'title']
df = df[cols]
df = df.applymap(self.clean_string)
for row in df.itertuples(index=False):
self.rows.append(
CipCode(
cipcode = row.cipcode,
version = self.year,
title = row.title,
family = row.family,
))
def row_count(self):
return(len(self.rows))
@property
def year(self):
""" return year value """
return self.__year
@year.setter
def year(self, year: int):
""" set year value """
self.__year = year
@staticmethod
def clean_string(text: str):
""" strips Excel formula text and trailing periods """
text = str(text).strip()
if text[:1] == '=':
text = text[2:-1]
if str(text).endswith('.'):
text = text[:-1]
return(text)
def write(self):
session = Session()
if len(self.rows) > 0:
try:
_ = session.query(self.rows[0].__class__).filter(self.rows[0].__class__.version==self.year).delete(synchronize_session=False)
session.bulk_save_objects(self.rows)
session.commit()
except Exception as e:
print(f'An error occurred:\n{str(e)}.')
session.rollback()
print('No changes were made to the database due to error.')
else:
print('Rows successfully written to database.')
else:
print('No rows were available to insert.')
session.close()
def __repr__(self):
""" class repr method """
return f'{self.__class__.__name__}(year={self.__year!r})'
if __name__ == '__main__':
adm = CipFile(2020)
print(adm.year)
print(adm.url)
print(adm.rows[0]) | 0.422386 | 0.230184 |
import os, sys;
import h5py, user_config, cppext;
from numpy import *;
from share_fun import val_def;
from functions import generate_Umatrix;
def init_solver(parms, np):
solver_type = parms['SOLVER_TYPE'];
print '%s solver is used...'%solver_type;
input_args = {
'solver_path' : parms.get('SOLVER_EXE_PATH', ''),
'mpirun_path' : parms.get('SOLVER_MPIRUN_PATH', user_config.mpirun),
'np' : np
}
if solver_type == 'CTHYB_Matrix':
input_args['parm2xml'] = val_def(parms, 'PARMS2XML', user_config.parm2xml);
input_args['solver_path'] = user_config.solver_matrix;
solver = HybridizationMatrixSolver(input_args);
elif solver_type == 'CTHYB_Segment':
input_args['solver_path'] = user_config.solver_segment;
solver = HybridizationSegmentSolver(input_args);
elif solver_type == 'TRIQS':
input_args['solver_path'] = user_config.solver_triqs;
solver = TRIQSSolver(input_args);
elif solver_type == 'TRIQSOld':
input_args['solver_path'] = user_config.solver_triqs_old;
solver = TRIQSSolverOld(input_args);
else: print 'Solver %s unknown'%solver_type;
return solver;
class HybridizationMatrixSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
hyb_tau = in_data['hybtau'];
FLAVORS = int(parms['FLAVORS']);
for f in range(FLAVORS): hyb_tau[:, f] = -hyb_tau[::-1, f];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
if FLAVORS/2 == 3: Lattice = '"t2g system"';
if FLAVORS/2 == 2: Lattice = '"eg system"';
if FLAVORS/2 == 1: Lattice = '"site"';
# prepare parms file for CTQMC
green_only = 1; self.list_obs = None;
if int(parms['MEASURE']) > 0:
green_only = 0
self.list_obs = parms['OBSERVABLES'].split(',')
QMC_parms = {
'LATTICE_LIBRARY' : user_config.LatticeLibrary,
'LATTICE' : Lattice,
'MODEL_LIBRARY' : user_config.ModelLibrary,
'MODEL' : user_config.Model,
'L' : FLAVORS/2,
'SITES' : FLAVORS/2,
'GREEN_ONLY' : green_only,
'SEED' : random.random_integers(10000),
'SWEEPS' : val_def(parms, 'SWEEPS', 500000),
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 300),
'N' : parms['N_TAU'],
'N_ORDER' : val_def(parms, 'N_ORDER', 50),
'N_MEAS' : val_def(parms, 'N_MEAS', 200),
'N_SHIFT' : val_def(parms, 'N_SHIFT', 0),
'N_SWAP' : val_def(parms, 'N_SWAP', 0),
'BETA' : parms['BETA'],
'U' : parms['U'],
"U'" : float(parms['U']) - 2*float(parms['J']),
'J' : parms['J'],
'SPINS' : 2,
'CONSERVED_QUANTUMNUMBERS': '"Nup, Ndown"',
'F' : prefix + '.hybtau'
};
for f in range(FLAVORS/2):
QMC_parms['MUUP'+str(f)] = in_data['MU'][2*f];
QMC_parms['MUDOWN'+str(f)] = in_data['MU'][2*f+1];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n');
solver_parms_file.write('{}');
solver_parms_file.close();
def run(self):
cmd = '%s %s.parms %s 1>&2'%(self.args['parm2xml'], self.prefix,self.prefix);
print cmd; os.system(cmd);
cmd = '%s -n %d %s %s.in.xml'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd; return os.system(cmd);
def collect(self):
print 'Collect data from ' + self.prefix;
measure = 0;
collect_error = False;
if self.list_obs is not None:
print 'also collect data for observables ', self.list_obs;
if 'error' in self.list_obs:
collect_error = True;
self.list_obs.pop(self.list_obs.index('error'));
measure = 1;
Gtau, Gerr, obs = cppext.get_raw_data(self.prefix, measure, self.list_obs);
if collect_error: obs.update({'GreenError' : mean(Gerr, 0) });
return Gtau, obs;
class HybridizationSegmentSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
self.list_obs = None;
self.parms = in_data['parms'];
self.MEASURE_freq = int(val_def(in_data['parms'], 'MEASURE_freq', 1));
parms = in_data['parms'];
FLAVORS = int(parms['FLAVORS']);
# prepare parms file for CTQMC
QMC_parms = {
'SEED' : random.random_integers(10000),
'SWEEPS' : int(val_def(parms, 'SWEEPS', 500000)),
'THERMALIZATION' : int(val_def(parms, 'THERMALIZATION', 300)),
'N_TAU' : int(parms['N_TAU']),
'N_HISTOGRAM_ORDERS' : int(val_def(parms, 'N_ORDER', 50)),
'N_MEAS' : int(val_def(parms, 'N_MEAS', 100)),
'N_CYCLES' : int(val_def(parms, 'N_CYCLES', 30)),
'BETA' : float(parms['BETA']),
'U_MATRIX' : self.prefix+'.Umatrix',
'MU_VECTOR' : self.prefix+'.MUvector',
'BASENAME' : prefix,
'DELTA' : prefix + '.hybtau',
'N_ORBITALS' : FLAVORS,
'MEASURE_freq' : self.MEASURE_freq,
'N_MATSUBARA' : int(parms['N_CUTOFF']),
'MAX_TIME' : val_def(parms, 'MAX_TIME', 80000),
};
self.Norder = QMC_parms['N_HISTOGRAM_ORDERS'];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
# Umatrix: either Slater-Kanamori form or using Slater integrals
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
FLAVORS/2, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
hyb_tau = in_data['hybtau'];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
savetxt(self.prefix+'.Umatrix', Umatrix);
savetxt(self.prefix+'.MUvector', in_data['MU']);
def run(self):
FLAVORS = int(self.parms['FLAVORS']);
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
sign = float(gh5['/simulation/results/Sign/mean/value'][...]);
if sign < 0.99: print >> sys.stderr, 'sign = %.4f: Run QMC again for %s!'%(sign, self.prefix); retval = 1;
for i in range(FLAVORS):
norder = float(gh5['/simulation/results/order_%d/mean/value'%i][...]);
if norder > self.Norder:
print sys.stderr >> "mean Norder of flavor %d > Norder = %d"%(norder, self.Norder);
retval = 1;
gh5.close(); del gh5;
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
FLAVORS = int(self.parms['FLAVORS']);
obs = None;
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
Gtau = array([gh5['/G_tau/%d/mean/value'%f][:] for f in range(FLAVORS)]).T;
Serr = None;
if self.MEASURE_freq:
Giwn = array([gh5['/G_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/G_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
Siwn = array([gh5['/S_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/S_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
if int(self.parms['MEASURE']) > 0:
if 'error' in self.parms['OBSERVABLES']:
Serr = zeros((len(Siwn), FLAVORS));
for f in range(FLAVORS):
Fval = gh5['simulation/results/fw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/value'%f][:];
Ferr = gh5['simulation/results/fw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/error'%f][:];
Gval = gh5['simulation/results/gw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/value'%f][:];
Gerr = gh5['simulation/results/gw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/error'%f][:];
Serr[:, f] = abs(Fval/Gval) * sqrt(abs(Ferr/Fval)**2 + abs(Gerr/Gval)**2);
nn = array([]);
nf = -Gtau[-1, :];
for i in range(FLAVORS):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(gh5['/simulation/results/nn_%d_%d/mean/value'%(i,j)][...]);
nn = r_[nn, tmp];
gh5.close();
obs = { 'nn' : nn };
if Serr is not None: obs.update({'SelfEnergyError': Serr});
if self.MEASURE_freq: return Gtau, obs, Giwn, Siwn;
else: return Gtau, obs;
class TRIQSSolverOld:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
BETA = float(parms['BETA']);
NCOR = int(parms['FLAVORS']) / 2;
self.beta = BETA;
self.Ntau = int(parms['N_TAU']) + 1;
self.Ncor = NCOR;
self.measure = int(parms['MEASURE'])
hyb_mat = in_data['hybmat'];
hyb_tail = in_data['hybtail'];
wn = (2*arange(size(hyb_mat, 0))+1)*pi/BETA;
savetxt(prefix+'.hybmat.real', c_[wn, hyb_mat.real]);
savetxt(prefix+'.hybmat.imag', c_[wn, hyb_mat.imag]);
savetxt(prefix+'.hybmat.tail', hyb_tail);
savetxt(prefix+'.MUvector', in_data['MU']);
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
NCOR, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
savetxt(prefix+'.Umatrix', Umatrix);
# prepare parms file for CTQMC
QMC_parms = {
'SWEEPS_EACH_NODE' : int(val_def(parms, 'SWEEPS', 500000))/self.args['np'],
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 50000),
'N_MEAS' : val_def(parms, 'N_MEAS', 100),
'BETA' : parms['BETA'],
'U_MATRIX' : prefix+'.Umatrix',
'MU_VECTOR' : prefix + '.MUvector',
'HYB_MAT' : prefix + '.hybmat',
'NCOR' : NCOR,
'HDF5_OUTPUT' : prefix + '.solution.h5',
'N_LEGENDRE' : val_def(parms, 'TRIQS_N_LEGENDRE', 50),
'ACCUMULATION' : val_def(parms, 'TRIQS_ACCUMULATION', 'legendre'),
'SPINFLIP' : val_def(parms, 'TRIQS_SPINFLIP', 1),
'MEASURE' : self.measure,
};
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
def run(self):
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
R = h5py.File(self.prefix+'.solution.h5', 'r');
BETA = self.beta;
SPINS = 2; spins = ('up', 'dn');
NCOR = self.Ncor;
G = []; S = []; nf = []; Gl = [];
is_legendre = True if 'G_Legendre' in R else False;
for f in range(NCOR):
for sp in spins:
G.append(R['G/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['G/%s%d/data'%(sp,f)][:, 0, 0, 1]);
if is_legendre: Gl.append(R['G_Legendre/%s%d/data'%(sp,f)][:, 0, 0]);
S.append(R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 1]);
nf.append(float(R['Observables/N_%s%d'%(sp,f)][...]));
Giwn = array(G).T;
Siwn = array(S).T;
nf = array(nf);
nn = array([]);
for i in range(SPINS*NCOR):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(R['Observables/nn_%d_%d'%(i,j)][...]);
nn = r_[nn, tmp];
obs = { 'nn' : nn };
Gtau = zeros((self.Ntau, SPINS*NCOR), dtype = float);
for f in range(SPINS*NCOR):
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), self.Ntau, BETA, 1.0, 0.0);
Gtau[-1, :] = -nf;
Gtau[0, :] = -(1-nf);
order_min = int(R['Sigma/up0/singularity/omin'][...]);
order_max = min(R['Sigma/up0/singularity/mask'][:]);
Stail = zeros((order_max-order_min+1, NCOR*SPINS), dtype = complex);
for f in range(NCOR):
for s, sp in enumerate(spins):
tmp = R['Sigma/%s%d/singularity/data'%(sp,f)][:order_max-order_min+1, 0, 0];
Stail[:, 2*f+s] = tmp[:,0] + 1j*tmp[:,1];
Stail = r_[order_min*ones((1, NCOR*SPINS)), Stail, order_max*ones((1, NCOR*SPINS))];
if is_legendre:
GLegendre = array(Gl).T;
obs = { 'SelfEnergyTail' : Stail, 'GLegendre' : GLegendre , 'nn' : nn };
else: obs = { 'SelfEnergyTail' : Stail, 'nn' : nn };
if self.measure > 0:
if 'TimeCorrelators' in R:
opr_list = eval(str(R['TimeCorrelators/indices'][...]))
for opr_name in opr_list:
obs[opr_name] = R['TimeCorrelators/%s/data'%opr_name][:]
R.close()
return Gtau, obs, Giwn, Siwn;
class TRIQSSolver(object):
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare input for the impurity solver'
self.mysys = in_data['parms'];
self.prefix = prefix
self.measure = int(self.mysys['MEASURE'])
mysys = self.mysys
beta = float(mysys['BETA'])
hyb_mat = in_data['hybmat']
hyb_tail = in_data['hybtail']
assert(int(mysys['N_MAX_FREQ']) == len(hyb_mat))
wn = (2*arange(int(mysys['N_MAX_FREQ']))+1)*pi/beta
savetxt('%s.hybmat.real'%prefix, c_[wn, hyb_mat.real]);
savetxt('%s.hybmat.imag'%prefix, c_[wn, hyb_mat.imag]);
savetxt('%s.hybmat.tail'%prefix, hyb_tail);
savetxt('%s.mu_eff'%prefix, in_data['MU']);
# prepare parms file for TRIQS solver
triqs_parms = {
'n_cycles' : int(mysys['SWEEPS'])/self.args['np'],
'length_cycle' : mysys.get('N_MEAS', 100),
'n_warmup_cycles' : mysys.get('THERMALIZATION', 10000),
'max_time' : mysys.get('MAX_TIME', -1),
'partition_method' : mysys.get('TRIQS_PARTITION_METHOD',
'autopartition'),
'U' : mysys['U'],
'J' : mysys['J'],
'INTERACTION' : mysys.get('INTERACTION', 'Kanamori'),
'HYB_MAT' : '%s.hybmat'%prefix,
'MU_VECTOR' : '%s.mu_eff'%prefix,
'BETA' : mysys['BETA'],
'NFLAVORS' : int(mysys['FLAVORS'])/2,
'NSPINS' : 2,
'N_TAU' : max(10001, 2*len(wn)),
'N_MAX_FREQ' : len(wn),
'HDF5_OUTPUT' : '%s.triqs.out.h5'%prefix,
'PREFIX' : prefix,
'MEASURE' : self.measure,
}
solver_parms_file = open('%s.parms'%prefix, 'w')
for k, v in triqs_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n')
def run(self):
print 'Running the solver %s'%('and measure static observables'
if self.measure else '')
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'],
self.args['np'],
self.args['solver_path'],
self.prefix)
print cmd
retval = os.system(cmd)
return retval
def collect(self):
print 'Collect data from ' + self.prefix;
mysys = self.mysys
h5tmp = self.prefix+'.triqs.out.h5'
arch = h5py.File(h5tmp, 'r')
spin_names = ('up', 'dn')
nfreqs = int(mysys['N_MAX_FREQ'])
nflavors = int(mysys['FLAVORS']) / 2
nspins = 2
norbs = nflavors*nspins
ntau = int(mysys['N_TAU'])
beta = float(mysys['BETA'])
Giwn = zeros((nfreqs, norbs), dtype=complex)
Siwn = zeros((nfreqs, norbs), dtype=complex)
Gtau = zeros((ntau+1, norbs), dtype=float)
nf = zeros(norbs)
wn = (2*arange(nfreqs)+1)*pi/beta
for i in range(nflavors):
for s in range(nspins):
f = nspins*i+s
d = arch['Giwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Giwn[:, f] = d[:, 0] + 1j*d[:, 1]
d = arch['Siwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Siwn[:, f] = d[:, 0] + 1j*d[:, 1]
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), ntau+1,
beta, 1.0, 0.0)
nf[f] = self._get_density_from_gmat(Giwn[:, f], [0, 1, 0])
#nf[f] = arch['Occupancy/%s_%d'%(spin_names[s], i)][...]
obs = {'sign' : arch['average_sign'][...]}
if 'Observables' in arch:
for k, v in arch['Observables'].iteritems(): obs[k] = v
return Gtau, obs, Giwn, Siwn
def _get_density_from_gmat(self, giwn, tail):
beta = float(self.mysys['BETA'])
# nfreqs = 1.5*int(self.mysys['N_CUTOFF'])
nfreqs = int(self.mysys['N_MAX_FREQ'])
wn = (2*arange(nfreqs)+1)*pi/beta
C = tail
density = 2./beta*real(sum(giwn[:nfreqs]) \
+ C[2]*sum(1./wn**2)) + 0.5*C[1] - beta*C[2]/4.
return density | solver_types.py | import os, sys;
import h5py, user_config, cppext;
from numpy import *;
from share_fun import val_def;
from functions import generate_Umatrix;
def init_solver(parms, np):
solver_type = parms['SOLVER_TYPE'];
print '%s solver is used...'%solver_type;
input_args = {
'solver_path' : parms.get('SOLVER_EXE_PATH', ''),
'mpirun_path' : parms.get('SOLVER_MPIRUN_PATH', user_config.mpirun),
'np' : np
}
if solver_type == 'CTHYB_Matrix':
input_args['parm2xml'] = val_def(parms, 'PARMS2XML', user_config.parm2xml);
input_args['solver_path'] = user_config.solver_matrix;
solver = HybridizationMatrixSolver(input_args);
elif solver_type == 'CTHYB_Segment':
input_args['solver_path'] = user_config.solver_segment;
solver = HybridizationSegmentSolver(input_args);
elif solver_type == 'TRIQS':
input_args['solver_path'] = user_config.solver_triqs;
solver = TRIQSSolver(input_args);
elif solver_type == 'TRIQSOld':
input_args['solver_path'] = user_config.solver_triqs_old;
solver = TRIQSSolverOld(input_args);
else: print 'Solver %s unknown'%solver_type;
return solver;
class HybridizationMatrixSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
hyb_tau = in_data['hybtau'];
FLAVORS = int(parms['FLAVORS']);
for f in range(FLAVORS): hyb_tau[:, f] = -hyb_tau[::-1, f];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
if FLAVORS/2 == 3: Lattice = '"t2g system"';
if FLAVORS/2 == 2: Lattice = '"eg system"';
if FLAVORS/2 == 1: Lattice = '"site"';
# prepare parms file for CTQMC
green_only = 1; self.list_obs = None;
if int(parms['MEASURE']) > 0:
green_only = 0
self.list_obs = parms['OBSERVABLES'].split(',')
QMC_parms = {
'LATTICE_LIBRARY' : user_config.LatticeLibrary,
'LATTICE' : Lattice,
'MODEL_LIBRARY' : user_config.ModelLibrary,
'MODEL' : user_config.Model,
'L' : FLAVORS/2,
'SITES' : FLAVORS/2,
'GREEN_ONLY' : green_only,
'SEED' : random.random_integers(10000),
'SWEEPS' : val_def(parms, 'SWEEPS', 500000),
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 300),
'N' : parms['N_TAU'],
'N_ORDER' : val_def(parms, 'N_ORDER', 50),
'N_MEAS' : val_def(parms, 'N_MEAS', 200),
'N_SHIFT' : val_def(parms, 'N_SHIFT', 0),
'N_SWAP' : val_def(parms, 'N_SWAP', 0),
'BETA' : parms['BETA'],
'U' : parms['U'],
"U'" : float(parms['U']) - 2*float(parms['J']),
'J' : parms['J'],
'SPINS' : 2,
'CONSERVED_QUANTUMNUMBERS': '"Nup, Ndown"',
'F' : prefix + '.hybtau'
};
for f in range(FLAVORS/2):
QMC_parms['MUUP'+str(f)] = in_data['MU'][2*f];
QMC_parms['MUDOWN'+str(f)] = in_data['MU'][2*f+1];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n');
solver_parms_file.write('{}');
solver_parms_file.close();
def run(self):
cmd = '%s %s.parms %s 1>&2'%(self.args['parm2xml'], self.prefix,self.prefix);
print cmd; os.system(cmd);
cmd = '%s -n %d %s %s.in.xml'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd; return os.system(cmd);
def collect(self):
print 'Collect data from ' + self.prefix;
measure = 0;
collect_error = False;
if self.list_obs is not None:
print 'also collect data for observables ', self.list_obs;
if 'error' in self.list_obs:
collect_error = True;
self.list_obs.pop(self.list_obs.index('error'));
measure = 1;
Gtau, Gerr, obs = cppext.get_raw_data(self.prefix, measure, self.list_obs);
if collect_error: obs.update({'GreenError' : mean(Gerr, 0) });
return Gtau, obs;
class HybridizationSegmentSolver:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
# prepare hybtau file for CTQMC
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
self.list_obs = None;
self.parms = in_data['parms'];
self.MEASURE_freq = int(val_def(in_data['parms'], 'MEASURE_freq', 1));
parms = in_data['parms'];
FLAVORS = int(parms['FLAVORS']);
# prepare parms file for CTQMC
QMC_parms = {
'SEED' : random.random_integers(10000),
'SWEEPS' : int(val_def(parms, 'SWEEPS', 500000)),
'THERMALIZATION' : int(val_def(parms, 'THERMALIZATION', 300)),
'N_TAU' : int(parms['N_TAU']),
'N_HISTOGRAM_ORDERS' : int(val_def(parms, 'N_ORDER', 50)),
'N_MEAS' : int(val_def(parms, 'N_MEAS', 100)),
'N_CYCLES' : int(val_def(parms, 'N_CYCLES', 30)),
'BETA' : float(parms['BETA']),
'U_MATRIX' : self.prefix+'.Umatrix',
'MU_VECTOR' : self.prefix+'.MUvector',
'BASENAME' : prefix,
'DELTA' : prefix + '.hybtau',
'N_ORBITALS' : FLAVORS,
'MEASURE_freq' : self.MEASURE_freq,
'N_MATSUBARA' : int(parms['N_CUTOFF']),
'MAX_TIME' : val_def(parms, 'MAX_TIME', 80000),
};
self.Norder = QMC_parms['N_HISTOGRAM_ORDERS'];
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
# Umatrix: either Slater-Kanamori form or using Slater integrals
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
FLAVORS/2, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
hyb_tau = in_data['hybtau'];
hyb_tau = c_[linspace(0, float(parms['BETA']), int(parms['N_TAU']) + 1), hyb_tau];
savetxt(prefix+'.hybtau', hyb_tau);
savetxt(self.prefix+'.Umatrix', Umatrix);
savetxt(self.prefix+'.MUvector', in_data['MU']);
def run(self):
FLAVORS = int(self.parms['FLAVORS']);
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
sign = float(gh5['/simulation/results/Sign/mean/value'][...]);
if sign < 0.99: print >> sys.stderr, 'sign = %.4f: Run QMC again for %s!'%(sign, self.prefix); retval = 1;
for i in range(FLAVORS):
norder = float(gh5['/simulation/results/order_%d/mean/value'%i][...]);
if norder > self.Norder:
print sys.stderr >> "mean Norder of flavor %d > Norder = %d"%(norder, self.Norder);
retval = 1;
gh5.close(); del gh5;
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
FLAVORS = int(self.parms['FLAVORS']);
obs = None;
gh5 = h5py.File('%s.out.h5'%self.prefix, 'r');
Gtau = array([gh5['/G_tau/%d/mean/value'%f][:] for f in range(FLAVORS)]).T;
Serr = None;
if self.MEASURE_freq:
Giwn = array([gh5['/G_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/G_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
Siwn = array([gh5['/S_omega/%d/mean/value'%f][:, 0] + 1j*gh5['/S_omega/%d/mean/value'%f][:, 1] for f in range(FLAVORS)]).T;
if int(self.parms['MEASURE']) > 0:
if 'error' in self.parms['OBSERVABLES']:
Serr = zeros((len(Siwn), FLAVORS));
for f in range(FLAVORS):
Fval = gh5['simulation/results/fw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/value'%f][:];
Ferr = gh5['simulation/results/fw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/fw_im_%d/mean/error'%f][:];
Gval = gh5['simulation/results/gw_re_%d/mean/value'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/value'%f][:];
Gerr = gh5['simulation/results/gw_re_%d/mean/error'%f][:] + 1j*gh5['simulation/results/gw_im_%d/mean/error'%f][:];
Serr[:, f] = abs(Fval/Gval) * sqrt(abs(Ferr/Fval)**2 + abs(Gerr/Gval)**2);
nn = array([]);
nf = -Gtau[-1, :];
for i in range(FLAVORS):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(gh5['/simulation/results/nn_%d_%d/mean/value'%(i,j)][...]);
nn = r_[nn, tmp];
gh5.close();
obs = { 'nn' : nn };
if Serr is not None: obs.update({'SelfEnergyError': Serr});
if self.MEASURE_freq: return Gtau, obs, Giwn, Siwn;
else: return Gtau, obs;
class TRIQSSolverOld:
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare running solver for ' + prefix;
self.prefix = prefix;
parms = in_data['parms'];
BETA = float(parms['BETA']);
NCOR = int(parms['FLAVORS']) / 2;
self.beta = BETA;
self.Ntau = int(parms['N_TAU']) + 1;
self.Ncor = NCOR;
self.measure = int(parms['MEASURE'])
hyb_mat = in_data['hybmat'];
hyb_tail = in_data['hybtail'];
wn = (2*arange(size(hyb_mat, 0))+1)*pi/BETA;
savetxt(prefix+'.hybmat.real', c_[wn, hyb_mat.real]);
savetxt(prefix+'.hybmat.imag', c_[wn, hyb_mat.imag]);
savetxt(prefix+'.hybmat.tail', hyb_tail);
savetxt(prefix+'.MUvector', in_data['MU']);
Umatrix = generate_Umatrix(float(parms['U']), float(parms['J']),
NCOR, val_def(parms, 'INTERACTION_TYPE', 'SlaterKanamori'));
savetxt(prefix+'.Umatrix', Umatrix);
# prepare parms file for CTQMC
QMC_parms = {
'SWEEPS_EACH_NODE' : int(val_def(parms, 'SWEEPS', 500000))/self.args['np'],
'THERMALIZATION' : val_def(parms, 'THERMALIZATION', 50000),
'N_MEAS' : val_def(parms, 'N_MEAS', 100),
'BETA' : parms['BETA'],
'U_MATRIX' : prefix+'.Umatrix',
'MU_VECTOR' : prefix + '.MUvector',
'HYB_MAT' : prefix + '.hybmat',
'NCOR' : NCOR,
'HDF5_OUTPUT' : prefix + '.solution.h5',
'N_LEGENDRE' : val_def(parms, 'TRIQS_N_LEGENDRE', 50),
'ACCUMULATION' : val_def(parms, 'TRIQS_ACCUMULATION', 'legendre'),
'SPINFLIP' : val_def(parms, 'TRIQS_SPINFLIP', 1),
'MEASURE' : self.measure,
};
solver_parms_file = open(prefix + '.parms', 'w');
for k, v in QMC_parms.iteritems(): solver_parms_file.write(k + ' = ' + str(v) + ';\n');
def run(self):
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'], self.args['np'], self.args['solver_path'], self.prefix);
print cmd;
retval = os.system(cmd);
return retval;
def collect(self):
print 'Collect data from ' + self.prefix;
R = h5py.File(self.prefix+'.solution.h5', 'r');
BETA = self.beta;
SPINS = 2; spins = ('up', 'dn');
NCOR = self.Ncor;
G = []; S = []; nf = []; Gl = [];
is_legendre = True if 'G_Legendre' in R else False;
for f in range(NCOR):
for sp in spins:
G.append(R['G/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['G/%s%d/data'%(sp,f)][:, 0, 0, 1]);
if is_legendre: Gl.append(R['G_Legendre/%s%d/data'%(sp,f)][:, 0, 0]);
S.append(R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 0] + 1j*R['Sigma/%s%d/data'%(sp,f)][:, 0, 0, 1]);
nf.append(float(R['Observables/N_%s%d'%(sp,f)][...]));
Giwn = array(G).T;
Siwn = array(S).T;
nf = array(nf);
nn = array([]);
for i in range(SPINS*NCOR):
for j in range(i+1):
if i == j: tmp = nf[i];
else: tmp = float(R['Observables/nn_%d_%d'%(i,j)][...]);
nn = r_[nn, tmp];
obs = { 'nn' : nn };
Gtau = zeros((self.Ntau, SPINS*NCOR), dtype = float);
for f in range(SPINS*NCOR):
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), self.Ntau, BETA, 1.0, 0.0);
Gtau[-1, :] = -nf;
Gtau[0, :] = -(1-nf);
order_min = int(R['Sigma/up0/singularity/omin'][...]);
order_max = min(R['Sigma/up0/singularity/mask'][:]);
Stail = zeros((order_max-order_min+1, NCOR*SPINS), dtype = complex);
for f in range(NCOR):
for s, sp in enumerate(spins):
tmp = R['Sigma/%s%d/singularity/data'%(sp,f)][:order_max-order_min+1, 0, 0];
Stail[:, 2*f+s] = tmp[:,0] + 1j*tmp[:,1];
Stail = r_[order_min*ones((1, NCOR*SPINS)), Stail, order_max*ones((1, NCOR*SPINS))];
if is_legendre:
GLegendre = array(Gl).T;
obs = { 'SelfEnergyTail' : Stail, 'GLegendre' : GLegendre , 'nn' : nn };
else: obs = { 'SelfEnergyTail' : Stail, 'nn' : nn };
if self.measure > 0:
if 'TimeCorrelators' in R:
opr_list = eval(str(R['TimeCorrelators/indices'][...]))
for opr_name in opr_list:
obs[opr_name] = R['TimeCorrelators/%s/data'%opr_name][:]
R.close()
return Gtau, obs, Giwn, Siwn;
class TRIQSSolver(object):
def __init__(self, input_args):
self.args = input_args;
def prepare(self, prefix, in_data):
print 'Prepare input for the impurity solver'
self.mysys = in_data['parms'];
self.prefix = prefix
self.measure = int(self.mysys['MEASURE'])
mysys = self.mysys
beta = float(mysys['BETA'])
hyb_mat = in_data['hybmat']
hyb_tail = in_data['hybtail']
assert(int(mysys['N_MAX_FREQ']) == len(hyb_mat))
wn = (2*arange(int(mysys['N_MAX_FREQ']))+1)*pi/beta
savetxt('%s.hybmat.real'%prefix, c_[wn, hyb_mat.real]);
savetxt('%s.hybmat.imag'%prefix, c_[wn, hyb_mat.imag]);
savetxt('%s.hybmat.tail'%prefix, hyb_tail);
savetxt('%s.mu_eff'%prefix, in_data['MU']);
# prepare parms file for TRIQS solver
triqs_parms = {
'n_cycles' : int(mysys['SWEEPS'])/self.args['np'],
'length_cycle' : mysys.get('N_MEAS', 100),
'n_warmup_cycles' : mysys.get('THERMALIZATION', 10000),
'max_time' : mysys.get('MAX_TIME', -1),
'partition_method' : mysys.get('TRIQS_PARTITION_METHOD',
'autopartition'),
'U' : mysys['U'],
'J' : mysys['J'],
'INTERACTION' : mysys.get('INTERACTION', 'Kanamori'),
'HYB_MAT' : '%s.hybmat'%prefix,
'MU_VECTOR' : '%s.mu_eff'%prefix,
'BETA' : mysys['BETA'],
'NFLAVORS' : int(mysys['FLAVORS'])/2,
'NSPINS' : 2,
'N_TAU' : max(10001, 2*len(wn)),
'N_MAX_FREQ' : len(wn),
'HDF5_OUTPUT' : '%s.triqs.out.h5'%prefix,
'PREFIX' : prefix,
'MEASURE' : self.measure,
}
solver_parms_file = open('%s.parms'%prefix, 'w')
for k, v in triqs_parms.iteritems():
solver_parms_file.write(k + ' = ' + str(v) + ';\n')
def run(self):
print 'Running the solver %s'%('and measure static observables'
if self.measure else '')
cmd = '%s -n %d %s %s.parms 1>&2'%(self.args['mpirun_path'],
self.args['np'],
self.args['solver_path'],
self.prefix)
print cmd
retval = os.system(cmd)
return retval
def collect(self):
print 'Collect data from ' + self.prefix;
mysys = self.mysys
h5tmp = self.prefix+'.triqs.out.h5'
arch = h5py.File(h5tmp, 'r')
spin_names = ('up', 'dn')
nfreqs = int(mysys['N_MAX_FREQ'])
nflavors = int(mysys['FLAVORS']) / 2
nspins = 2
norbs = nflavors*nspins
ntau = int(mysys['N_TAU'])
beta = float(mysys['BETA'])
Giwn = zeros((nfreqs, norbs), dtype=complex)
Siwn = zeros((nfreqs, norbs), dtype=complex)
Gtau = zeros((ntau+1, norbs), dtype=float)
nf = zeros(norbs)
wn = (2*arange(nfreqs)+1)*pi/beta
for i in range(nflavors):
for s in range(nspins):
f = nspins*i+s
d = arch['Giwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Giwn[:, f] = d[:, 0] + 1j*d[:, 1]
d = arch['Siwn/%s_%d/data'%(spin_names[s], i)][:nfreqs, 0, 0]
Siwn[:, f] = d[:, 0] + 1j*d[:, 1]
Gtau[:, f] = cppext.IFT_mat2tau(Giwn[:, f].copy(), ntau+1,
beta, 1.0, 0.0)
nf[f] = self._get_density_from_gmat(Giwn[:, f], [0, 1, 0])
#nf[f] = arch['Occupancy/%s_%d'%(spin_names[s], i)][...]
obs = {'sign' : arch['average_sign'][...]}
if 'Observables' in arch:
for k, v in arch['Observables'].iteritems(): obs[k] = v
return Gtau, obs, Giwn, Siwn
def _get_density_from_gmat(self, giwn, tail):
beta = float(self.mysys['BETA'])
# nfreqs = 1.5*int(self.mysys['N_CUTOFF'])
nfreqs = int(self.mysys['N_MAX_FREQ'])
wn = (2*arange(nfreqs)+1)*pi/beta
C = tail
density = 2./beta*real(sum(giwn[:nfreqs]) \
+ C[2]*sum(1./wn**2)) + 0.5*C[1] - beta*C[2]/4.
return density | 0.247714 | 0.094594 |
import sys
sys.path.insert(0, '../utils')
import os
import os.path as osp
import numpy as np
import torch
import tqdm
import torch.nn as nn
from sklearn.metrics import confusion_matrix, classification_report
import yaml
from model import Model
from particle_dataset import ParticleDataset
from segment_scan import segment_scan
from config import cfg
from datetime import datetime
from sklearn.externals import joblib
from torch.utils.data import DataLoader
from torch.autograd import Variable
def detection_collate(batch):
voxel_features = []
voxel_coords = []
labels = []
for i, sample in enumerate(batch):
voxel_features.append(sample['inputs'])
voxel_coords.append(
np.pad(sample['coords'], ((0, 0), (1, 0)),
mode='constant', constant_values=i))
labels.append(sample['labels'])
return {
'inputs': np.concatenate(voxel_features),
'coords': np.concatenate(voxel_coords),
'labels': np.concatenate(labels)}
# Models
input_models = [
'20190429_152306_test_model'
]
# Evaluation parameters
save_pcl = True
eval_title = 'val'
test_data = 'val'
now = datetime.now()
for m in input_models:
# Load model parameters
with open(osp.join(cfg.LOG_DIR, m, 'config.yaml'), 'r') as f:
parameters = yaml.load(f,Loader=yaml.SafeLoader)
eval_dir = osp.join(cfg.LOG_DIR, m, 'evaluations/eval_' + now.strftime('%Y%m%d_%H%M%S') + '_' + eval_title)
os.makedirs(eval_dir)
# os.makedirs(osp.join(eval_dir,'scans'))
criterion = nn.CrossEntropyLoss()
# Choose device for computation
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
model = Model(features_in=parameters["features_size"])
# Load the best model saved
model_state = osp.join(cfg.LOG_DIR, m, 'best.pth.tar')
model_state = torch.load(model_state)
model.load_state_dict(model_state['model_state_dict'])
model.eval()
model.to(device)
# ParticleDataset
scaler = joblib.load(osp.join(cfg.LOG_DIR,m,'scaler.pkl'))
# Load dataset parameters
with open(osp.join(cfg.DATASETS_DIR, test_data, 'config.yaml'), 'r') as f:
ds_parameters = yaml.load(f,Loader=yaml.SafeLoader)
nb_scans = ds_parameters['nb_scans']
y_target = []
y_pred = []
val_loss = 0
eval_pcl = []
test_loader = DataLoader(ParticleDataset(dataset_dir=osp.join(cfg.DATASETS_DIR, test_data), scaler=scaler), batch_size=1, shuffle=False, num_workers=4, collate_fn=detection_collate)
for s, sample in tqdm.tqdm(enumerate(test_loader), total=len(test_loader),
desc='Evaluating scan', ncols=80,
leave=False):
pcl = np.load(osp.join(cfg.DATASETS_DIR, test_data, 'scan_pcls', str(s) + '.npy'))
inputs = Variable(torch.FloatTensor(sample['inputs']))
coords = Variable(torch.LongTensor(sample['coords']))
labels = Variable(torch.LongTensor(sample['labels']))
inputs, coords, labels = inputs.to(device), coords.to(device), labels.to(device)
# forward
with torch.no_grad():
pred = model(inputs, coords)
[proba, index] = pred.max(dim=1)
pred = index.cpu().data.numpy()
# convert predicted voxels back into pointcloud
raw_pred_points = segment_scan(pcl, pred)
# np.save(osp.join(eval_dir, 'scans', str(s)), raw_pred_points[:, :7])
# Removes any scan with ground truth = 1 for whole scan
if np.sum((raw_pred_points[:, 5] == 1).astype(np.int8)) / float(raw_pred_points.shape[0]) < 0.5:
if save_pcl:
eval_pcl.append(raw_pred_points[:, :7])
y_pred.append(raw_pred_points[:, 6])
y_target.append(raw_pred_points[:, 5])
if save_pcl:
np.save(osp.join(eval_dir,'scans'),eval_pcl)
y_pred = np.concatenate(y_pred)
y_target = np.concatenate(y_target)
with open(osp.join(eval_dir, 'eval_results.txt'), 'w') as f:
f.write('Evaluation parameters:\n')
f.write('ParticleDataset: %s\n' % test_data)
f.write('nb_scans: %s\n' % ds_parameters['nb_scans'])
f.write('dataset_size: %s\n' % ds_parameters['dataset_size'])
f.write('\n\nEvaluation results:\n')
# Compute performance scores
print('\n')
print("Evaluation results for model: %s" % m)
print('Confusion Matrix')
f.write("Confusion Matrix\n")
cnf_matrix = confusion_matrix(y_target, y_pred).astype(np.float32) # Compute confusion matrix
cnf_matrix /= cnf_matrix.sum(1, keepdims=True) # put into ratio
print(cnf_matrix)
f.write(str(cnf_matrix))
f.write('\n')
# Can only use this if both classes are at least predicted once
if len(np.unique(y_pred)) > 1:
print('Classification Report')
f.write('Classification Report\n')
cr = classification_report(y_target, y_pred)
print(cr)
f.write(cr)
f.write('\n') | voxel_classification/src/evaluate_models.py |
import sys
sys.path.insert(0, '../utils')
import os
import os.path as osp
import numpy as np
import torch
import tqdm
import torch.nn as nn
from sklearn.metrics import confusion_matrix, classification_report
import yaml
from model import Model
from particle_dataset import ParticleDataset
from segment_scan import segment_scan
from config import cfg
from datetime import datetime
from sklearn.externals import joblib
from torch.utils.data import DataLoader
from torch.autograd import Variable
def detection_collate(batch):
voxel_features = []
voxel_coords = []
labels = []
for i, sample in enumerate(batch):
voxel_features.append(sample['inputs'])
voxel_coords.append(
np.pad(sample['coords'], ((0, 0), (1, 0)),
mode='constant', constant_values=i))
labels.append(sample['labels'])
return {
'inputs': np.concatenate(voxel_features),
'coords': np.concatenate(voxel_coords),
'labels': np.concatenate(labels)}
# Models
input_models = [
'20190429_152306_test_model'
]
# Evaluation parameters
save_pcl = True
eval_title = 'val'
test_data = 'val'
now = datetime.now()
for m in input_models:
# Load model parameters
with open(osp.join(cfg.LOG_DIR, m, 'config.yaml'), 'r') as f:
parameters = yaml.load(f,Loader=yaml.SafeLoader)
eval_dir = osp.join(cfg.LOG_DIR, m, 'evaluations/eval_' + now.strftime('%Y%m%d_%H%M%S') + '_' + eval_title)
os.makedirs(eval_dir)
# os.makedirs(osp.join(eval_dir,'scans'))
criterion = nn.CrossEntropyLoss()
# Choose device for computation
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
model = Model(features_in=parameters["features_size"])
# Load the best model saved
model_state = osp.join(cfg.LOG_DIR, m, 'best.pth.tar')
model_state = torch.load(model_state)
model.load_state_dict(model_state['model_state_dict'])
model.eval()
model.to(device)
# ParticleDataset
scaler = joblib.load(osp.join(cfg.LOG_DIR,m,'scaler.pkl'))
# Load dataset parameters
with open(osp.join(cfg.DATASETS_DIR, test_data, 'config.yaml'), 'r') as f:
ds_parameters = yaml.load(f,Loader=yaml.SafeLoader)
nb_scans = ds_parameters['nb_scans']
y_target = []
y_pred = []
val_loss = 0
eval_pcl = []
test_loader = DataLoader(ParticleDataset(dataset_dir=osp.join(cfg.DATASETS_DIR, test_data), scaler=scaler), batch_size=1, shuffle=False, num_workers=4, collate_fn=detection_collate)
for s, sample in tqdm.tqdm(enumerate(test_loader), total=len(test_loader),
desc='Evaluating scan', ncols=80,
leave=False):
pcl = np.load(osp.join(cfg.DATASETS_DIR, test_data, 'scan_pcls', str(s) + '.npy'))
inputs = Variable(torch.FloatTensor(sample['inputs']))
coords = Variable(torch.LongTensor(sample['coords']))
labels = Variable(torch.LongTensor(sample['labels']))
inputs, coords, labels = inputs.to(device), coords.to(device), labels.to(device)
# forward
with torch.no_grad():
pred = model(inputs, coords)
[proba, index] = pred.max(dim=1)
pred = index.cpu().data.numpy()
# convert predicted voxels back into pointcloud
raw_pred_points = segment_scan(pcl, pred)
# np.save(osp.join(eval_dir, 'scans', str(s)), raw_pred_points[:, :7])
# Removes any scan with ground truth = 1 for whole scan
if np.sum((raw_pred_points[:, 5] == 1).astype(np.int8)) / float(raw_pred_points.shape[0]) < 0.5:
if save_pcl:
eval_pcl.append(raw_pred_points[:, :7])
y_pred.append(raw_pred_points[:, 6])
y_target.append(raw_pred_points[:, 5])
if save_pcl:
np.save(osp.join(eval_dir,'scans'),eval_pcl)
y_pred = np.concatenate(y_pred)
y_target = np.concatenate(y_target)
with open(osp.join(eval_dir, 'eval_results.txt'), 'w') as f:
f.write('Evaluation parameters:\n')
f.write('ParticleDataset: %s\n' % test_data)
f.write('nb_scans: %s\n' % ds_parameters['nb_scans'])
f.write('dataset_size: %s\n' % ds_parameters['dataset_size'])
f.write('\n\nEvaluation results:\n')
# Compute performance scores
print('\n')
print("Evaluation results for model: %s" % m)
print('Confusion Matrix')
f.write("Confusion Matrix\n")
cnf_matrix = confusion_matrix(y_target, y_pred).astype(np.float32) # Compute confusion matrix
cnf_matrix /= cnf_matrix.sum(1, keepdims=True) # put into ratio
print(cnf_matrix)
f.write(str(cnf_matrix))
f.write('\n')
# Can only use this if both classes are at least predicted once
if len(np.unique(y_pred)) > 1:
print('Classification Report')
f.write('Classification Report\n')
cr = classification_report(y_target, y_pred)
print(cr)
f.write(cr)
f.write('\n') | 0.663996 | 0.27903 |
import subprocess
import argparse
import shutil
import sys
import time
from datetime import datetime
import re
import os.path
import pandas as pd
import sys
from Bio.Seq import Seq
fixed_file = open("filtered_variants.txt", "w+")
def translate(seq):
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'X', 'TAG':'X',
'TGC':'C', 'TGT':'C', 'TGA':'X', 'TGG':'W',
}
protein =""
if len(seq)%3 == 0:
for i in range(0, len(seq), 3):
codon = seq[i:i + 3]
protein+= table[codon]
return protein
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-name', help="Provide sample name.")
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
sample_name = args.name
for line in open("variants.txt"):
dp4=line.split("DP4=")[1].split(";")[0]
ad = line.split("AD=")[1].split(";")[0]
#allele_ref = int(ad.split(",")[0]) + int(ad.split(",")[1])
#allele_alt = int(ad.split(",")[2]) + int(ad.split(",")[3])
allele_ref = int(ad.split(",")[0])
if (int(dp4.split(",")[2]) + int(dp4.split(",")[3])) == 0:
allele_alt = 0
else:
allele_alt = int(ad.split(",")[1])
fixed_depth = int(dp4.split(",")[0]) + int(dp4.split(",")[1]) + int(dp4.split(",")[2]) + int(dp4.split(",")[3])
if (allele_ref + allele_alt) > 0 and allele_alt > 0:
if("IMF" in line):
af = float(line.split("IMF=")[1].split(";")[0])
else:
af = allele_alt / fixed_depth
type = line.split("\t")[1]
if(af >= 0.01 and type!="synonymous SNV" and len(line.split("\t")[6])<400 and "wholegene" not in line.split("\t")[2]):
line_parts = line.split("\t")
nuc_ref = (line_parts[6])
nuc_alt = (line_parts[7])
fixed_aa_change = line_parts[2].split(":p.")[1].split(",")[0]
fixed_protein = line_parts[2].split(":")[1]
#fixed_depth = int(allele_ref + allele_alt)
fixed_nuc_change = line_parts[2].split(":c.")[1].split(":")[0]
# if(line_parts[12].rstrip()=="-"):
# mat_peptide = ""
# mat_peptide_nuc_change = ""
# mat_peptide_aa_change = ""
# else:
# mat_peptide = line_parts[12].split(":")[0]
# mat_peptide_nuc_change = line_parts[12].split(":")[1].rstrip().split(";")[0].strip()
# mat_peptide_aa_change = line_parts[12].split(":")[1].rstrip().split(";")[1].strip()
#visualization.write("Sample,Position,Protein,AAChange,NucleotideChange,AlleleFreq,Depth,Type,MatPeptide,MatPeptideAAChange,MatPeptideNucChange\n"
nuc = fixed_nuc_change
if '_' in nuc:
nuc_num = nuc.split("_")[0]
elif 'del' in nuc or 'dup' in nuc:
nuc_num = int(nuc[0:-4])
elif type == "frameshift insertion" or "ins" in nuc:
nuc_num = int(nuc.split("_")[0])
else:
nuc_num = int(nuc[1:-1])
nuc_num=str(nuc_num)
# Time to break up some indels into multiple lines
if(type=="nonframeshift deletion"):
translated_ref = translate(nuc_ref)
aa_start_pos = int(fixed_aa_change.split("_")[0])
nuc_pos = int(line_parts[4])
for codon in range(3,len(nuc_ref) + 1, 3):
split_nuc_ref = (nuc_ref[codon-3:codon])
split_amino_ref = (translated_ref[int(codon/3)-1])
nuc_change = split_nuc_ref + str(nuc_pos) + "del"
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + str(nuc_pos) + "," + str(aa_start_pos) + "," + split_amino_ref + "," + "-" + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + str(nuc_num) + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
aa_start_pos += 1
nuc_pos +=3
nuc_num = int(nuc_num) + 3
elif(type=="nonframeshift insertion"):
translated_alt = translate(nuc_alt)
aa_start_pos = fixed_aa_change.split("delins")[0][1:]
split_amino_ref = fixed_aa_change[0]
split_amino_alt = fixed_aa_change.split("delins")[1]
nuc_change = line_parts[4] + "ins" + fixed_nuc_change.split("ins")[1]
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
elif(type == "frameshift insertion"):
split_amino_ref = fixed_aa_change[0]
aa_start_pos = fixed_aa_change.split("fs")[0][1:]
split_amino_alt = "fs"
if("dup" in nuc):
nuc_change = line_parts[4] + "dup" + fixed_nuc_change.split("dup")[1]
else:
nuc_change = line_parts[4] + "ins" + fixed_nuc_change.split("ins")[1]
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
elif(type == "frameshift deletion"):
split_amino_ref = fixed_aa_change[0]
aa_start_pos = fixed_aa_change.split("fs")[0][1:]
split_amino_alt = "fs"
nuc_change = nuc_ref + line_parts[4] + "del"
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
else:
split_amino_ref = fixed_aa_change[0]
split_amino_alt = fixed_aa_change[-1]
aa_start_pos = fixed_aa_change[1:-1]
nuc_change = nuc_ref + line_parts[4] + nuc_alt
if(split_amino_ref=="X"):
type = "stoploss"
else:
type = line_parts[1]
print(sample_name)
print(fixed_protein)
print(line_parts[4])
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + type + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n") | annotation/correct_AF_bcftools.py | import subprocess
import argparse
import shutil
import sys
import time
from datetime import datetime
import re
import os.path
import pandas as pd
import sys
from Bio.Seq import Seq
fixed_file = open("filtered_variants.txt", "w+")
def translate(seq):
table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'X', 'TAG':'X',
'TGC':'C', 'TGT':'C', 'TGA':'X', 'TGG':'W',
}
protein =""
if len(seq)%3 == 0:
for i in range(0, len(seq), 3):
codon = seq[i:i + 3]
protein+= table[codon]
return protein
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-name', help="Provide sample name.")
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(0)
sample_name = args.name
for line in open("variants.txt"):
dp4=line.split("DP4=")[1].split(";")[0]
ad = line.split("AD=")[1].split(";")[0]
#allele_ref = int(ad.split(",")[0]) + int(ad.split(",")[1])
#allele_alt = int(ad.split(",")[2]) + int(ad.split(",")[3])
allele_ref = int(ad.split(",")[0])
if (int(dp4.split(",")[2]) + int(dp4.split(",")[3])) == 0:
allele_alt = 0
else:
allele_alt = int(ad.split(",")[1])
fixed_depth = int(dp4.split(",")[0]) + int(dp4.split(",")[1]) + int(dp4.split(",")[2]) + int(dp4.split(",")[3])
if (allele_ref + allele_alt) > 0 and allele_alt > 0:
if("IMF" in line):
af = float(line.split("IMF=")[1].split(";")[0])
else:
af = allele_alt / fixed_depth
type = line.split("\t")[1]
if(af >= 0.01 and type!="synonymous SNV" and len(line.split("\t")[6])<400 and "wholegene" not in line.split("\t")[2]):
line_parts = line.split("\t")
nuc_ref = (line_parts[6])
nuc_alt = (line_parts[7])
fixed_aa_change = line_parts[2].split(":p.")[1].split(",")[0]
fixed_protein = line_parts[2].split(":")[1]
#fixed_depth = int(allele_ref + allele_alt)
fixed_nuc_change = line_parts[2].split(":c.")[1].split(":")[0]
# if(line_parts[12].rstrip()=="-"):
# mat_peptide = ""
# mat_peptide_nuc_change = ""
# mat_peptide_aa_change = ""
# else:
# mat_peptide = line_parts[12].split(":")[0]
# mat_peptide_nuc_change = line_parts[12].split(":")[1].rstrip().split(";")[0].strip()
# mat_peptide_aa_change = line_parts[12].split(":")[1].rstrip().split(";")[1].strip()
#visualization.write("Sample,Position,Protein,AAChange,NucleotideChange,AlleleFreq,Depth,Type,MatPeptide,MatPeptideAAChange,MatPeptideNucChange\n"
nuc = fixed_nuc_change
if '_' in nuc:
nuc_num = nuc.split("_")[0]
elif 'del' in nuc or 'dup' in nuc:
nuc_num = int(nuc[0:-4])
elif type == "frameshift insertion" or "ins" in nuc:
nuc_num = int(nuc.split("_")[0])
else:
nuc_num = int(nuc[1:-1])
nuc_num=str(nuc_num)
# Time to break up some indels into multiple lines
if(type=="nonframeshift deletion"):
translated_ref = translate(nuc_ref)
aa_start_pos = int(fixed_aa_change.split("_")[0])
nuc_pos = int(line_parts[4])
for codon in range(3,len(nuc_ref) + 1, 3):
split_nuc_ref = (nuc_ref[codon-3:codon])
split_amino_ref = (translated_ref[int(codon/3)-1])
nuc_change = split_nuc_ref + str(nuc_pos) + "del"
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + str(nuc_pos) + "," + str(aa_start_pos) + "," + split_amino_ref + "," + "-" + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + str(nuc_num) + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
aa_start_pos += 1
nuc_pos +=3
nuc_num = int(nuc_num) + 3
elif(type=="nonframeshift insertion"):
translated_alt = translate(nuc_alt)
aa_start_pos = fixed_aa_change.split("delins")[0][1:]
split_amino_ref = fixed_aa_change[0]
split_amino_alt = fixed_aa_change.split("delins")[1]
nuc_change = line_parts[4] + "ins" + fixed_nuc_change.split("ins")[1]
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
elif(type == "frameshift insertion"):
split_amino_ref = fixed_aa_change[0]
aa_start_pos = fixed_aa_change.split("fs")[0][1:]
split_amino_alt = "fs"
if("dup" in nuc):
nuc_change = line_parts[4] + "dup" + fixed_nuc_change.split("dup")[1]
else:
nuc_change = line_parts[4] + "ins" + fixed_nuc_change.split("ins")[1]
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
elif(type == "frameshift deletion"):
split_amino_ref = fixed_aa_change[0]
aa_start_pos = fixed_aa_change.split("fs")[0][1:]
split_amino_alt = "fs"
nuc_change = nuc_ref + line_parts[4] + "del"
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + line_parts[1] + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n")
else:
split_amino_ref = fixed_aa_change[0]
split_amino_alt = fixed_aa_change[-1]
aa_start_pos = fixed_aa_change[1:-1]
nuc_change = nuc_ref + line_parts[4] + nuc_alt
if(split_amino_ref=="X"):
type = "stoploss"
else:
type = line_parts[1]
print(sample_name)
print(fixed_protein)
print(line_parts[4])
# SAMPLE_ID GENE GENPOS AAPOS AAREF AASUB NUCCHANGE AAFREQ DEPTH TYPE
fixed_file.write(sample_name + "," + str(fixed_protein) + "," + line_parts[4] + "," + str(aa_start_pos) + "," + split_amino_ref + "," + split_amino_alt + "," + nuc_change + "," + str(af) + "," + str(fixed_depth) + "," + str(allele_ref) + "," + str(allele_alt) + "," + type + "," + nuc_num + "\n")#+ "," + mat_peptide + "," + mat_peptide_nuc_change + "," + mat_peptide_aa_change + "\n") | 0.079523 | 0.218544 |
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pytest
from km3pipe.testing import TestCase
from km3pipe import Table
from km3pipe.math import (
angle_between,
dist,
pld3,
com,
zenith,
azimuth,
Polygon,
IrregularPrism,
rotation_matrix,
spherecutmask,
spherecut,
SparseCone,
space_angle,
hsin,
phi,
theta,
unit_vector,
log_b,
qeuler,
qrot,
qrot_yaw,
intersect_3d,
)
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2016, KM3Pipe devs and the KM3NeT collaboration."
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = "<EMAIL>"
__status__ = "Development"
class TestMath(TestCase):
def setUp(self):
# self.vecs = np.array([[0., 1., 5.],
# [1., 1., 4.],
# [2., 1., 3.],
# [3., 1., 2.],
# [4., 1., 1.]])
# self.v = (1, 2, 3)
self.v = np.array([0.26726124, 0.53452248, 0.80178373])
self.vecs = np.array(
[
[0.0, 0.19611614, 0.98058068],
[0.23570226, 0.23570226, 0.94280904],
[0.53452248, 0.26726124, 0.80178373],
[0.80178373, 0.26726124, 0.53452248],
[0.94280904, 0.23570226, 0.23570226],
]
)
def test_phi(self):
print(phi((1, 0, 0)))
assert_almost_equal(0, phi((1, 0, 0)))
assert_almost_equal(np.pi, phi((-1, 0, 0)))
assert_almost_equal(np.pi / 2, phi((0, 1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(0, phi((0, 0, 0)))
assert_almost_equal(phi(self.v), 1.10714872)
assert_almost_equal(
phi(self.vecs),
np.array([1.57079633, 0.78539816, 0.46364761, 0.32175055, 0.24497866]),
)
def test_zenith(self):
assert_allclose(np.pi, zenith((0, 0, 1)))
assert_allclose(0, zenith((0, 0, -1)))
assert_allclose(np.pi / 2, zenith((0, 1, 0)))
assert_allclose(np.pi / 2, zenith((0, -1, 0)))
assert_allclose(np.pi / 4 * 3, zenith((0, 1, 1)))
assert_allclose(np.pi / 4 * 3, zenith((0, -1, 1)))
assert_almost_equal(zenith(self.v), 2.5010703409103687)
assert_allclose(
zenith(self.vecs),
np.array([2.94419709, 2.80175574, 2.50107034, 2.13473897, 1.80873745]),
)
def test_azimuth(self):
self.assertTrue(np.allclose(np.pi, azimuth((1, 0, 0))))
self.assertTrue(np.allclose(0, azimuth((-1, 0, 0))))
print(azimuth((0, 1, 0)))
print(azimuth((0, -1, 0)))
print(azimuth((0, 0, 0)))
print(azimuth(self.v))
print(azimuth(self.vecs))
self.assertTrue(np.allclose(np.pi / 2 * 3, azimuth((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, azimuth((0, -1, 0))))
self.assertTrue(np.allclose(np.pi, azimuth((0, 0, 0))))
self.assertTrue(np.allclose(azimuth(self.v), 4.24874137138))
self.assertTrue(
np.allclose(
azimuth(self.vecs),
np.array([4.71238898, 3.92699082, 3.60524026, 3.46334321, 3.38657132]),
)
)
def test_theta(self):
print(theta((0, 0, -1)))
print(theta((0, 0, 1)))
print(theta((0, 1, 0)))
print(theta((0, -1, 0)))
print(theta((0, 1, 1)))
print(theta((0, -1, 1)))
print(theta(self.v))
print(theta(self.vecs))
self.assertTrue(np.allclose(0, theta((0, 0, 1))))
self.assertTrue(np.allclose(np.pi, theta((0, 0, -1))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, -1, 0))))
self.assertTrue(np.allclose(0, theta((0, 1, 1))))
self.assertTrue(np.allclose(0, theta((0, -1, 1))))
self.assertTrue(np.allclose(theta(self.v), 0.64052231))
self.assertTrue(
np.allclose(
theta(self.vecs),
np.array([0.19739554, 0.33983691, 0.64052231, 1.00685369, 1.3328552]),
)
)
def test_unit_vector(self):
v1 = (1, 0, 0)
v2 = (1, 1, 0)
v3 = (-1, 2, 0)
assert np.allclose(v1, unit_vector(v1))
assert np.allclose(np.array(v2) / np.sqrt(2), unit_vector(v2))
assert np.allclose(np.array(v3) / np.sqrt(5), unit_vector(v3))
def test_angle_between(self):
v1 = (1, 0, 0)
v2 = (0, 1, 0)
v3 = (-1, 0, 0)
self.assertAlmostEqual(0, angle_between(v1, v1))
self.assertAlmostEqual(np.pi / 2, angle_between(v1, v2))
self.assertAlmostEqual(np.pi, angle_between(v1, v3))
self.assertAlmostEqual(angle_between(self.v, v1), 1.3002465638163236)
self.assertAlmostEqual(angle_between(self.v, v2), 1.0068536854342678)
self.assertAlmostEqual(angle_between(self.v, v3), 1.8413460897734695)
assert np.allclose(
[0.0, 0.0, 0.0]
- angle_between(np.array([v1, v2, v3]), np.array([v1, v2, v3]), axis=1),
0,
)
assert np.allclose(
[np.pi / 2, np.pi]
- angle_between(np.array([v1, v1]), np.array([v2, v3]), axis=1),
0,
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v1),
np.array([1.57079633, 1.3328552, 1.0068537, 0.64052231, 0.33983691]),
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v2),
np.array([1.37340077, 1.3328552, 1.3002466, 1.30024656, 1.3328552]),
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v3),
np.array([1.57079633, 1.80873745, 2.13473897, 2.50107034, 2.80175574]),
)
)
def test_angle_between_returns_nan_for_zero_length_vectors(self):
v1 = (0, 0, 0)
v2 = (1, 0, 0)
with pytest.warns(RuntimeWarning):
self.assertTrue(np.isnan(angle_between(v1, v2)))
def test_space_angle(self):
p1 = (np.pi / 2, np.pi)
p2 = (np.pi, 0)
self.assertAlmostEqual(
space_angle(p1[0], p2[0], p1[1], p2[1]), 1.57079632679489
)
p3 = (0, np.pi)
p4 = (np.pi / 2, 0)
self.assertAlmostEqual(
space_angle(p3[0], p4[0], p3[1], p4[1]), 1.57079632679489
)
def test_hsin(self):
assert np.all(hsin((np.pi, 0)) == (1, 0))
self.assertAlmostEqual(hsin(np.pi / 2), 0.5)
def test_pld3(self):
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 1))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(1, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 2))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(2, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 0))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(0, pld3(p1, p2, d2))
p1 = np.array((1, 2, 3))
p2 = np.array((4, 5, 6))
d2 = np.array((7, 8, 9))
self.assertAlmostEqual(0.5275893, pld3(p1, p2, d2))
p1 = np.array((0, 0, 2))
p2 = np.array((-100, 0, -100))
d2 = np.array((1, 0, 1))
self.assertAlmostEqual(1.4142136, pld3(p1, p2, d2))
p1 = np.array([183.0, -311.0, 351.96083871])
p2 = np.array([40.256, -639.888, 921.93])
d2 = np.array([0.185998, 0.476123, -0.859483])
self.assertAlmostEqual(21.25456308, pld3(p1, p2, d2))
def test_com(self):
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12)))
self.assertEqual((5.5, 6.5, 7.5), tuple(center_of_mass))
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9)), masses=(1, 0, 0))
self.assertEqual((1, 2, 3), tuple(center_of_mass))
center_of_mass = com(((1, 1, 1), (0, 0, 0)))
self.assertEqual((0.5, 0.5, 0.5), tuple(center_of_mass))
class TestShapes(TestCase):
def setUp(self):
self.poly = [
(-60, 120),
(80, 120),
(110, 60),
(110, -30),
(70, -110),
(-70, -110),
(-90, -70),
(-90, 60),
]
def test_poly_containment(self):
polygon = Polygon(self.poly)
point_in = (-40, -40)
point_out = (-140, -140)
points = [
(-40, -40),
(-140, -140),
(40, -140),
]
assert np.all(polygon.contains(point_in))
assert not np.any(polygon.contains(point_out))
assert np.all(polygon.contains(points) == [True, False, False])
def test_poly_xy(self):
polygon = Polygon(self.poly)
x = (-40, -140, 40)
y = (-40, -140, -140)
assert np.all(polygon.contains_xy(x, y) == [True, False, False])
def test_prism_contained(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
points = [
(0, 1, 2),
(-100, 20, 10),
(10, 90, 10),
]
assert np.all(prism.contains(points) == [True, False, True])
def test_prism_contained_xyz(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
x = (0, -100, 10)
y = (1, 20, 90)
z = (2, 10, 10)
assert np.all(prism.contains_xyz(x, y, z) == [True, False, True])
class TestRotation(TestCase):
def test_rotmat(self):
v = [3, 5, 0]
axis = [4, 4, 1]
theta = 1.2
newvec = np.dot(rotation_matrix(axis, theta), v)
self.assertTrue(
np.allclose(newvec, np.array([2.74911638, 4.77180932, 1.91629719]))
)
def test_cone(self):
spike = [1, 1, 0]
bottom = [0, 2, 0]
angle = np.pi / 4
n_angles = 20
cone = SparseCone(spike, bottom, angle)
circ_samp = cone.sample_circle(n_angles=n_angles)
axis_samp = cone.sample_axis
samp = cone.sample(n_angles)
assert len(circ_samp) == n_angles
assert len(axis_samp) == 2
assert len(samp) == len(circ_samp) + 2
class TestSphereCut(TestCase):
def test_spherecut_mask(self):
center = (0.0, 0.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 10.0
self.assertListEqual(
list(spherecutmask(center, rmin, rmax, items)),
[True, True, True, False, False],
)
def test_with_table(self):
center = (0.0, 0.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 10.0
selected_items = spherecut(center, rmin, rmax, items)
assert len(selected_items) == 3
self.assertListEqual(
list(items[spherecutmask(center, rmin, rmax, items)]), list(selected_items)
)
def test_with_array(self):
center = (0.0, 0.0, 0.0)
items = np.array([[0, 10, 0], [10, 0, 0], [0, 0, 10], [20, 0, 0], [0, 30, 0]])
rmin = 0.0
rmax = 10.0
selected_items = [list(e) for e in spherecut(center, rmin, rmax, items)]
assert len(selected_items) == 3
assert [0, 10, 0] in selected_items
assert [10, 0, 0] in selected_items
assert [0, 0, 10] in selected_items
def test_center(self):
center = (0.0, 10.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 15.0
selected_items = spherecut(center, rmin, rmax, items)
assert len(selected_items) == 3
self.assertListEqual(
list(items[spherecutmask(center, rmin, rmax, items)]), list(selected_items)
)
def test_rmin(self):
center = (0.0, 0.0, 0.0)
items = np.array([[0, 10, 0], [10, 0, 0], [0, 0, 10], [20, 0, 0], [0, 30, 0]])
rmin = 20.0
rmax = 40.0
selected_items = [list(e) for e in spherecut(center, rmin, rmax, items)]
assert len(selected_items) == 2
assert [20, 0, 0] in selected_items
assert [0, 30, 0] in selected_items
class TestLog(TestCase):
def test_val(self):
assert_allclose(log_b(5, 2), np.log2(5))
assert_allclose(log_b(5, 10), np.log10(5))
assert_allclose(log_b(5, np.e), np.log(5))
class TestQeuler(TestCase):
def test_conversion_of_yaw(self):
assert np.allclose([1, 0, 0, 0], qeuler(0, 0, 0))
assert np.allclose([0.7071, 0, 0, 0.7071], qeuler(90, 0, 0))
assert np.allclose([0, 0, 0, 1], qeuler(180, 0, 0))
assert np.allclose([-0.7071, 0, 0, 0.7071], qeuler(270, 0, 0))
assert np.allclose([-1, 0, 0, 0], qeuler(360, 0, 0))
def test_conversion_of_pitch(self):
assert np.allclose([0.92388, 0, 0.38268, 0], qeuler(0, 45, 0))
assert np.allclose([0.92388, 0, -0.38268, 0], qeuler(0, -45, 0))
assert np.allclose([0.7071, 0, 0.7071, 0], qeuler(0, 90, 0))
assert np.allclose([0.8660254, 0, 0.5, 0], qeuler(0, 60, 0))
assert np.allclose([-0.96592583, 0, -0.25881905, 0], qeuler(0, 390, 0))
def test_conversion_of_roll(self):
assert np.allclose([0.92388, 0.38268, 0, 0], qeuler(0, 0, 45))
assert np.allclose([0.92388, -0.38268, 0, 0], qeuler(0, 0, -45))
assert np.allclose([0.70710, 0.70710, 0, 0], qeuler(0, 0, 90))
assert np.allclose([0.86602, 0.5, 0, 0], qeuler(0, 0, 60))
assert np.allclose([-0.96592583, -0.25881905, 0, 0], qeuler(0, 0, 390))
def test_mixed_conversion(self):
assert np.allclose(
[0.999471, 0.02601972, 0.01767416, 0.00826538], qeuler(1, 2, 3)
)
assert np.allclose(
[0.94371436, 0.26853582, -0.14487813, 0.12767944], qeuler(10, -20, 30)
)
assert np.allclose(
[-0.16575384, -0.69624819, 0.05479592, -0.69624819], qeuler(-999, 999, -999)
)
class TestQrot(TestCase):
def test_rotation_of_x_vector(self):
assert np.allclose([0, 1, 0], qrot([1, 0, 0], qeuler(90, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, -45)))
assert np.allclose([0, 0, -1], qrot([1, 0, 0], qeuler(180, 90, 45)))
def test_rotation_of_y_vector(self):
assert np.allclose([-1, 0, 0], qrot([0, 1, 0], qeuler(90, 0, 0)))
assert np.allclose([0, -1, 0], qrot([0, 1, 0], qeuler(180, 0, 0)))
assert np.allclose(
[0, -0.70710, -0.70710], qrot([0, 1, 0], qeuler(180, 0, -45))
)
assert np.allclose(
[-0.70710, -0.70710, 0], qrot([0, 1, 0], qeuler(180, 90, 45))
)
def test_rotation_of_z_vector(self):
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(90, 0, 0)))
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, 0.70710], qrot([0, 0, 1], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, 0.70710, 0], qrot([0, 0, 1], qeuler(180, 90, 45)))
def test_mixed_rotation(self):
assert np.allclose([1, 2, 3], qrot([1, 2, 3], qeuler(0, 0, 0)))
assert np.allclose([0, -1.414213, 0], qrot([0, 1, -1], qeuler(180, 90, 45)))
assert np.allclose([-1.41421356, 0, -1], qrot([1, 1, 1], qeuler(180, 90, 45)))
assert np.allclose(
[-14.1421356, 0, -10], qrot([10, 10, 10], qeuler(180, 90, 45))
)
class TestQrotYaw(TestCase):
def test_call_with_list(self):
qrot_yaw([1, 2, 3], 1)
def test_no_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 0)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_90(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 90)
assert np.allclose([0, 1, 0], vec_rot)
def test_a_rotation_of_180(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 180)
assert np.allclose([-1, 0, 0], vec_rot)
def test_a_full_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 360)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_45(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 45)
assert np.allclose([0.7071, 0.7071, 0], vec_rot)
class TestIntersect3D(TestCase):
def test_intersection_at_zero(self):
p1 = np.array([(1, 0, 0), (0, 0, 1)])
p2 = -p1
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 0, 0], intersection)
def test_intersection_of_multiple_lines_with_same_endpoints(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
p2 = np.array([(4, 4, 4), (4, 4, 4), (4, 4, 4)])
intersection = intersect_3d(p1, p2)
assert np.allclose([4, 4, 4], intersection)
def test_intersection_of_multiple_lines_with_target(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
target = np.array([23, 5, 42])
p2 = 2 * target - p1
intersection = intersect_3d(p1, p2)
assert np.allclose(target, intersection)
def test_another_intersection(self):
p1 = np.array([(1, 10, 0), (0, 10, 1)])
p2 = np.array([(-1, 10, 0), (0, 10, -1)])
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 10, 0], intersection)
class TestDist(TestCase):
def test_dist_between_two_2D_points(self):
self.assertAlmostEqual(1, dist(np.array([0, 0]), np.array([1, 0])))
self.assertAlmostEqual(np.sqrt(2), dist(np.array([0, 1]), np.array([1, 0])))
self.assertAlmostEqual(2 * np.sqrt(2), dist(np.array([1, 2]), np.array([3, 4])))
def test_dist_between_two_3D_points(self):
self.assertAlmostEqual(1, dist(np.array([0, 0, 0]), np.array([1, 0, 0])))
self.assertAlmostEqual(
np.sqrt(2), dist(np.array([0, 1, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(2, dist(np.array([0, 0, 2]), np.array([0, 0, 0])))
self.assertAlmostEqual(
5.1961524, dist(np.array([1, 2, 3]), np.array([4, 5, 6]))
)
def test_dist_to_many_points(self):
assert np.allclose(
[1, 1, 0, 1.73205081],
dist(
np.array([0, 0, 0]),
np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0], [1, 1, 1]]),
axis=1,
),
) | km3pipe/tests/test_math.py |
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
import pytest
from km3pipe.testing import TestCase
from km3pipe import Table
from km3pipe.math import (
angle_between,
dist,
pld3,
com,
zenith,
azimuth,
Polygon,
IrregularPrism,
rotation_matrix,
spherecutmask,
spherecut,
SparseCone,
space_angle,
hsin,
phi,
theta,
unit_vector,
log_b,
qeuler,
qrot,
qrot_yaw,
intersect_3d,
)
__author__ = ["<NAME>", "<NAME>"]
__copyright__ = "Copyright 2016, KM3Pipe devs and the KM3NeT collaboration."
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = "<EMAIL>"
__status__ = "Development"
class TestMath(TestCase):
def setUp(self):
# self.vecs = np.array([[0., 1., 5.],
# [1., 1., 4.],
# [2., 1., 3.],
# [3., 1., 2.],
# [4., 1., 1.]])
# self.v = (1, 2, 3)
self.v = np.array([0.26726124, 0.53452248, 0.80178373])
self.vecs = np.array(
[
[0.0, 0.19611614, 0.98058068],
[0.23570226, 0.23570226, 0.94280904],
[0.53452248, 0.26726124, 0.80178373],
[0.80178373, 0.26726124, 0.53452248],
[0.94280904, 0.23570226, 0.23570226],
]
)
def test_phi(self):
print(phi((1, 0, 0)))
assert_almost_equal(0, phi((1, 0, 0)))
assert_almost_equal(np.pi, phi((-1, 0, 0)))
assert_almost_equal(np.pi / 2, phi((0, 1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(np.pi / 2 * 3, phi((0, -1, 0)))
assert_almost_equal(0, phi((0, 0, 0)))
assert_almost_equal(phi(self.v), 1.10714872)
assert_almost_equal(
phi(self.vecs),
np.array([1.57079633, 0.78539816, 0.46364761, 0.32175055, 0.24497866]),
)
def test_zenith(self):
assert_allclose(np.pi, zenith((0, 0, 1)))
assert_allclose(0, zenith((0, 0, -1)))
assert_allclose(np.pi / 2, zenith((0, 1, 0)))
assert_allclose(np.pi / 2, zenith((0, -1, 0)))
assert_allclose(np.pi / 4 * 3, zenith((0, 1, 1)))
assert_allclose(np.pi / 4 * 3, zenith((0, -1, 1)))
assert_almost_equal(zenith(self.v), 2.5010703409103687)
assert_allclose(
zenith(self.vecs),
np.array([2.94419709, 2.80175574, 2.50107034, 2.13473897, 1.80873745]),
)
def test_azimuth(self):
self.assertTrue(np.allclose(np.pi, azimuth((1, 0, 0))))
self.assertTrue(np.allclose(0, azimuth((-1, 0, 0))))
print(azimuth((0, 1, 0)))
print(azimuth((0, -1, 0)))
print(azimuth((0, 0, 0)))
print(azimuth(self.v))
print(azimuth(self.vecs))
self.assertTrue(np.allclose(np.pi / 2 * 3, azimuth((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, azimuth((0, -1, 0))))
self.assertTrue(np.allclose(np.pi, azimuth((0, 0, 0))))
self.assertTrue(np.allclose(azimuth(self.v), 4.24874137138))
self.assertTrue(
np.allclose(
azimuth(self.vecs),
np.array([4.71238898, 3.92699082, 3.60524026, 3.46334321, 3.38657132]),
)
)
def test_theta(self):
print(theta((0, 0, -1)))
print(theta((0, 0, 1)))
print(theta((0, 1, 0)))
print(theta((0, -1, 0)))
print(theta((0, 1, 1)))
print(theta((0, -1, 1)))
print(theta(self.v))
print(theta(self.vecs))
self.assertTrue(np.allclose(0, theta((0, 0, 1))))
self.assertTrue(np.allclose(np.pi, theta((0, 0, -1))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, 1, 0))))
self.assertTrue(np.allclose(np.pi / 2, theta((0, -1, 0))))
self.assertTrue(np.allclose(0, theta((0, 1, 1))))
self.assertTrue(np.allclose(0, theta((0, -1, 1))))
self.assertTrue(np.allclose(theta(self.v), 0.64052231))
self.assertTrue(
np.allclose(
theta(self.vecs),
np.array([0.19739554, 0.33983691, 0.64052231, 1.00685369, 1.3328552]),
)
)
def test_unit_vector(self):
v1 = (1, 0, 0)
v2 = (1, 1, 0)
v3 = (-1, 2, 0)
assert np.allclose(v1, unit_vector(v1))
assert np.allclose(np.array(v2) / np.sqrt(2), unit_vector(v2))
assert np.allclose(np.array(v3) / np.sqrt(5), unit_vector(v3))
def test_angle_between(self):
v1 = (1, 0, 0)
v2 = (0, 1, 0)
v3 = (-1, 0, 0)
self.assertAlmostEqual(0, angle_between(v1, v1))
self.assertAlmostEqual(np.pi / 2, angle_between(v1, v2))
self.assertAlmostEqual(np.pi, angle_between(v1, v3))
self.assertAlmostEqual(angle_between(self.v, v1), 1.3002465638163236)
self.assertAlmostEqual(angle_between(self.v, v2), 1.0068536854342678)
self.assertAlmostEqual(angle_between(self.v, v3), 1.8413460897734695)
assert np.allclose(
[0.0, 0.0, 0.0]
- angle_between(np.array([v1, v2, v3]), np.array([v1, v2, v3]), axis=1),
0,
)
assert np.allclose(
[np.pi / 2, np.pi]
- angle_between(np.array([v1, v1]), np.array([v2, v3]), axis=1),
0,
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v1),
np.array([1.57079633, 1.3328552, 1.0068537, 0.64052231, 0.33983691]),
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v2),
np.array([1.37340077, 1.3328552, 1.3002466, 1.30024656, 1.3328552]),
)
)
self.assertTrue(
np.allclose(
angle_between(self.vecs, v3),
np.array([1.57079633, 1.80873745, 2.13473897, 2.50107034, 2.80175574]),
)
)
def test_angle_between_returns_nan_for_zero_length_vectors(self):
v1 = (0, 0, 0)
v2 = (1, 0, 0)
with pytest.warns(RuntimeWarning):
self.assertTrue(np.isnan(angle_between(v1, v2)))
def test_space_angle(self):
p1 = (np.pi / 2, np.pi)
p2 = (np.pi, 0)
self.assertAlmostEqual(
space_angle(p1[0], p2[0], p1[1], p2[1]), 1.57079632679489
)
p3 = (0, np.pi)
p4 = (np.pi / 2, 0)
self.assertAlmostEqual(
space_angle(p3[0], p4[0], p3[1], p4[1]), 1.57079632679489
)
def test_hsin(self):
assert np.all(hsin((np.pi, 0)) == (1, 0))
self.assertAlmostEqual(hsin(np.pi / 2), 0.5)
def test_pld3(self):
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 1))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(1, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 2))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(2, pld3(p1, p2, d2))
p1 = np.array((0, 0, 0))
p2 = np.array((0, 0, 0))
d2 = np.array((0, 1, 0))
self.assertAlmostEqual(0, pld3(p1, p2, d2))
p1 = np.array((1, 2, 3))
p2 = np.array((4, 5, 6))
d2 = np.array((7, 8, 9))
self.assertAlmostEqual(0.5275893, pld3(p1, p2, d2))
p1 = np.array((0, 0, 2))
p2 = np.array((-100, 0, -100))
d2 = np.array((1, 0, 1))
self.assertAlmostEqual(1.4142136, pld3(p1, p2, d2))
p1 = np.array([183.0, -311.0, 351.96083871])
p2 = np.array([40.256, -639.888, 921.93])
d2 = np.array([0.185998, 0.476123, -0.859483])
self.assertAlmostEqual(21.25456308, pld3(p1, p2, d2))
def test_com(self):
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 12)))
self.assertEqual((5.5, 6.5, 7.5), tuple(center_of_mass))
center_of_mass = com(((1, 2, 3), (4, 5, 6), (7, 8, 9)), masses=(1, 0, 0))
self.assertEqual((1, 2, 3), tuple(center_of_mass))
center_of_mass = com(((1, 1, 1), (0, 0, 0)))
self.assertEqual((0.5, 0.5, 0.5), tuple(center_of_mass))
class TestShapes(TestCase):
def setUp(self):
self.poly = [
(-60, 120),
(80, 120),
(110, 60),
(110, -30),
(70, -110),
(-70, -110),
(-90, -70),
(-90, 60),
]
def test_poly_containment(self):
polygon = Polygon(self.poly)
point_in = (-40, -40)
point_out = (-140, -140)
points = [
(-40, -40),
(-140, -140),
(40, -140),
]
assert np.all(polygon.contains(point_in))
assert not np.any(polygon.contains(point_out))
assert np.all(polygon.contains(points) == [True, False, False])
def test_poly_xy(self):
polygon = Polygon(self.poly)
x = (-40, -140, 40)
y = (-40, -140, -140)
assert np.all(polygon.contains_xy(x, y) == [True, False, False])
def test_prism_contained(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
points = [
(0, 1, 2),
(-100, 20, 10),
(10, 90, 10),
]
assert np.all(prism.contains(points) == [True, False, True])
def test_prism_contained_xyz(self):
z = (-90, 90)
prism = IrregularPrism(self.poly, z[0], z[1])
x = (0, -100, 10)
y = (1, 20, 90)
z = (2, 10, 10)
assert np.all(prism.contains_xyz(x, y, z) == [True, False, True])
class TestRotation(TestCase):
def test_rotmat(self):
v = [3, 5, 0]
axis = [4, 4, 1]
theta = 1.2
newvec = np.dot(rotation_matrix(axis, theta), v)
self.assertTrue(
np.allclose(newvec, np.array([2.74911638, 4.77180932, 1.91629719]))
)
def test_cone(self):
spike = [1, 1, 0]
bottom = [0, 2, 0]
angle = np.pi / 4
n_angles = 20
cone = SparseCone(spike, bottom, angle)
circ_samp = cone.sample_circle(n_angles=n_angles)
axis_samp = cone.sample_axis
samp = cone.sample(n_angles)
assert len(circ_samp) == n_angles
assert len(axis_samp) == 2
assert len(samp) == len(circ_samp) + 2
class TestSphereCut(TestCase):
def test_spherecut_mask(self):
center = (0.0, 0.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 10.0
self.assertListEqual(
list(spherecutmask(center, rmin, rmax, items)),
[True, True, True, False, False],
)
def test_with_table(self):
center = (0.0, 0.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 10.0
selected_items = spherecut(center, rmin, rmax, items)
assert len(selected_items) == 3
self.assertListEqual(
list(items[spherecutmask(center, rmin, rmax, items)]), list(selected_items)
)
def test_with_array(self):
center = (0.0, 0.0, 0.0)
items = np.array([[0, 10, 0], [10, 0, 0], [0, 0, 10], [20, 0, 0], [0, 30, 0]])
rmin = 0.0
rmax = 10.0
selected_items = [list(e) for e in spherecut(center, rmin, rmax, items)]
assert len(selected_items) == 3
assert [0, 10, 0] in selected_items
assert [10, 0, 0] in selected_items
assert [0, 0, 10] in selected_items
def test_center(self):
center = (0.0, 10.0, 0.0)
items = Table(
{
"pos_x": [0, 10, 0, 20, 0],
"pos_y": [10, 0, 0, 0, 30],
"pos_z": [0, 0, 10, 0, 0],
}
)
rmin = 0.0
rmax = 15.0
selected_items = spherecut(center, rmin, rmax, items)
assert len(selected_items) == 3
self.assertListEqual(
list(items[spherecutmask(center, rmin, rmax, items)]), list(selected_items)
)
def test_rmin(self):
center = (0.0, 0.0, 0.0)
items = np.array([[0, 10, 0], [10, 0, 0], [0, 0, 10], [20, 0, 0], [0, 30, 0]])
rmin = 20.0
rmax = 40.0
selected_items = [list(e) for e in spherecut(center, rmin, rmax, items)]
assert len(selected_items) == 2
assert [20, 0, 0] in selected_items
assert [0, 30, 0] in selected_items
class TestLog(TestCase):
def test_val(self):
assert_allclose(log_b(5, 2), np.log2(5))
assert_allclose(log_b(5, 10), np.log10(5))
assert_allclose(log_b(5, np.e), np.log(5))
class TestQeuler(TestCase):
def test_conversion_of_yaw(self):
assert np.allclose([1, 0, 0, 0], qeuler(0, 0, 0))
assert np.allclose([0.7071, 0, 0, 0.7071], qeuler(90, 0, 0))
assert np.allclose([0, 0, 0, 1], qeuler(180, 0, 0))
assert np.allclose([-0.7071, 0, 0, 0.7071], qeuler(270, 0, 0))
assert np.allclose([-1, 0, 0, 0], qeuler(360, 0, 0))
def test_conversion_of_pitch(self):
assert np.allclose([0.92388, 0, 0.38268, 0], qeuler(0, 45, 0))
assert np.allclose([0.92388, 0, -0.38268, 0], qeuler(0, -45, 0))
assert np.allclose([0.7071, 0, 0.7071, 0], qeuler(0, 90, 0))
assert np.allclose([0.8660254, 0, 0.5, 0], qeuler(0, 60, 0))
assert np.allclose([-0.96592583, 0, -0.25881905, 0], qeuler(0, 390, 0))
def test_conversion_of_roll(self):
assert np.allclose([0.92388, 0.38268, 0, 0], qeuler(0, 0, 45))
assert np.allclose([0.92388, -0.38268, 0, 0], qeuler(0, 0, -45))
assert np.allclose([0.70710, 0.70710, 0, 0], qeuler(0, 0, 90))
assert np.allclose([0.86602, 0.5, 0, 0], qeuler(0, 0, 60))
assert np.allclose([-0.96592583, -0.25881905, 0, 0], qeuler(0, 0, 390))
def test_mixed_conversion(self):
assert np.allclose(
[0.999471, 0.02601972, 0.01767416, 0.00826538], qeuler(1, 2, 3)
)
assert np.allclose(
[0.94371436, 0.26853582, -0.14487813, 0.12767944], qeuler(10, -20, 30)
)
assert np.allclose(
[-0.16575384, -0.69624819, 0.05479592, -0.69624819], qeuler(-999, 999, -999)
)
class TestQrot(TestCase):
def test_rotation_of_x_vector(self):
assert np.allclose([0, 1, 0], qrot([1, 0, 0], qeuler(90, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, 0)))
assert np.allclose([-1, 0, 0], qrot([1, 0, 0], qeuler(180, 0, -45)))
assert np.allclose([0, 0, -1], qrot([1, 0, 0], qeuler(180, 90, 45)))
def test_rotation_of_y_vector(self):
assert np.allclose([-1, 0, 0], qrot([0, 1, 0], qeuler(90, 0, 0)))
assert np.allclose([0, -1, 0], qrot([0, 1, 0], qeuler(180, 0, 0)))
assert np.allclose(
[0, -0.70710, -0.70710], qrot([0, 1, 0], qeuler(180, 0, -45))
)
assert np.allclose(
[-0.70710, -0.70710, 0], qrot([0, 1, 0], qeuler(180, 90, 45))
)
def test_rotation_of_z_vector(self):
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(90, 0, 0)))
assert np.allclose([0, 0, 1], qrot([0, 0, 1], qeuler(180, 0, 0)))
assert np.allclose([0, -0.70710, 0.70710], qrot([0, 0, 1], qeuler(180, 0, -45)))
assert np.allclose([-0.70710, 0.70710, 0], qrot([0, 0, 1], qeuler(180, 90, 45)))
def test_mixed_rotation(self):
assert np.allclose([1, 2, 3], qrot([1, 2, 3], qeuler(0, 0, 0)))
assert np.allclose([0, -1.414213, 0], qrot([0, 1, -1], qeuler(180, 90, 45)))
assert np.allclose([-1.41421356, 0, -1], qrot([1, 1, 1], qeuler(180, 90, 45)))
assert np.allclose(
[-14.1421356, 0, -10], qrot([10, 10, 10], qeuler(180, 90, 45))
)
class TestQrotYaw(TestCase):
def test_call_with_list(self):
qrot_yaw([1, 2, 3], 1)
def test_no_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 0)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_90(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 90)
assert np.allclose([0, 1, 0], vec_rot)
def test_a_rotation_of_180(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 180)
assert np.allclose([-1, 0, 0], vec_rot)
def test_a_full_rotation(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 360)
assert np.allclose([1, 0, 0], vec_rot)
def test_a_rotation_of_45(self):
vec = (1, 0, 0)
vec_rot = qrot_yaw(vec, 45)
assert np.allclose([0.7071, 0.7071, 0], vec_rot)
class TestIntersect3D(TestCase):
def test_intersection_at_zero(self):
p1 = np.array([(1, 0, 0), (0, 0, 1)])
p2 = -p1
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 0, 0], intersection)
def test_intersection_of_multiple_lines_with_same_endpoints(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
p2 = np.array([(4, 4, 4), (4, 4, 4), (4, 4, 4)])
intersection = intersect_3d(p1, p2)
assert np.allclose([4, 4, 4], intersection)
def test_intersection_of_multiple_lines_with_target(self):
p1 = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)])
target = np.array([23, 5, 42])
p2 = 2 * target - p1
intersection = intersect_3d(p1, p2)
assert np.allclose(target, intersection)
def test_another_intersection(self):
p1 = np.array([(1, 10, 0), (0, 10, 1)])
p2 = np.array([(-1, 10, 0), (0, 10, -1)])
intersection = intersect_3d(p1, p2)
assert np.allclose([0, 10, 0], intersection)
class TestDist(TestCase):
def test_dist_between_two_2D_points(self):
self.assertAlmostEqual(1, dist(np.array([0, 0]), np.array([1, 0])))
self.assertAlmostEqual(np.sqrt(2), dist(np.array([0, 1]), np.array([1, 0])))
self.assertAlmostEqual(2 * np.sqrt(2), dist(np.array([1, 2]), np.array([3, 4])))
def test_dist_between_two_3D_points(self):
self.assertAlmostEqual(1, dist(np.array([0, 0, 0]), np.array([1, 0, 0])))
self.assertAlmostEqual(
np.sqrt(2), dist(np.array([0, 1, 0]), np.array([1, 0, 0]))
)
self.assertAlmostEqual(2, dist(np.array([0, 0, 2]), np.array([0, 0, 0])))
self.assertAlmostEqual(
5.1961524, dist(np.array([1, 2, 3]), np.array([4, 5, 6]))
)
def test_dist_to_many_points(self):
assert np.allclose(
[1, 1, 0, 1.73205081],
dist(
np.array([0, 0, 0]),
np.array([[0, 0, 1], [0, 0, 1], [0, 0, 0], [1, 1, 1]]),
axis=1,
),
) | 0.698227 | 0.611527 |
"""
Definition of views.
"""
from datetime import datetime
from django.shortcuts import render
from django.http import HttpRequest
from app.euro_spider import euro_start_scraper
from app.me_spider import me_start_scraper
from app.mm_spider import mm_start_scraper
from app.models import clean_json_files, json_reader, output_lists, advise_checker
import time
import os
import app.utils as utils
from django.http import HttpResponseRedirect
from django.urls import resolve
import ast
import json
def home(request):
assert isinstance(request, HttpRequest)
return render(
request,
'app/index.html',
{
'title':'Strona główna',
'year':datetime.now().year,
}
)
def contact(request):
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
{
'title':'Kontakt',
'message':'',
'year':datetime.now().year,
}
)
def refresh_page(request, model):
print("Model in refresh page")
print(model)
clean_json_files(model)
return HttpResponseRedirect(f"/result/{model}")
def render_page(request, model):
# print(utils.lookup_file(model))
print("Model in render page is:")
print(model)
if os.path.exists(os.path.join("app", utils.lookup_file(model))):
print("JSON already exists")
return HttpResponseRedirect(f"/result/{model}/results")
if model=="rtx3060ti":
site_model="3060-ti"
else:
site_model= model[3:]
print(site_model)
#static url for this specific card model
euro_url = f"https://www.euro.com.pl/karty-graficzne,typ-chipsetu!geforce-rtx-{site_model}.bhtml"
me_url = f"https://www.mediaexpert.pl/komputery-i-tablety/podzespoly-komputerowe/karty-graficzne/geforce-rtx_tak/model_geforce-rtx-{site_model}"
mm_url = f"https://mediamarkt.pl/komputery-i-tablety/akcesoria-komputerowe/czesci-komputerowe/karty-graficzne./uklad-graficzny=geforce-rtx-{site_model}"
print("Media mark url to be checked")
print(mm_url)
mm_start_scraper(mm_url,model)
me_start_scraper(me_url, model)
euro_start_scraper(euro_url, model)
while True:
time.sleep(1)
if os.path.exists(os.path.join("app",utils.lookup_file(model, shop="me"))):
break
#time.sleep(1)
output = output_lists(model)
#print(output)
#output.sort(key=lambda x: x['price'], reverse=False)
return HttpResponseRedirect(f"/result/{model}/results")
def result_page(request, model):
global_path = os.path.join("app", utils.lookup_file(model))
if os.path.exists(global_path):
print("JSON already exists RESULTS PAGE")
output = json.loads(open(global_path).readlines()[0])
else:
output = output_lists(model)
#print(output)
#output.sort(key=lambda x: x['price'], reverse=False)
historical_data=utils.get_model_history(model)
historical_data["prices"].sort(key=lambda x: x['date'], reverse=False)
profitability = historical_data["average_price"] / output["average"]
profitability = str(round(profitability, 2))
advise = advise_checker(profitability)
output["average"] = str(round(output["average"],2))
historical_data["average_price"] = str(round(historical_data["average_price"], 2))
return render(
request,
'app/result_template.html',
{
'average':output["average"],
'title': "Wyniki wyszukiwania",
'item_list':output["results"],
'history_list': historical_data["prices"],
'average_history': historical_data["average_price"],
'profitability': profitability,
'advise' : advise,
'year':datetime.now().year,
}
)
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/about.html',
{
'title':'O nas',
'message':'Opis strony',
'year':datetime.now().year,
}
) | app/views.py | """
Definition of views.
"""
from datetime import datetime
from django.shortcuts import render
from django.http import HttpRequest
from app.euro_spider import euro_start_scraper
from app.me_spider import me_start_scraper
from app.mm_spider import mm_start_scraper
from app.models import clean_json_files, json_reader, output_lists, advise_checker
import time
import os
import app.utils as utils
from django.http import HttpResponseRedirect
from django.urls import resolve
import ast
import json
def home(request):
assert isinstance(request, HttpRequest)
return render(
request,
'app/index.html',
{
'title':'Strona główna',
'year':datetime.now().year,
}
)
def contact(request):
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
{
'title':'Kontakt',
'message':'',
'year':datetime.now().year,
}
)
def refresh_page(request, model):
print("Model in refresh page")
print(model)
clean_json_files(model)
return HttpResponseRedirect(f"/result/{model}")
def render_page(request, model):
# print(utils.lookup_file(model))
print("Model in render page is:")
print(model)
if os.path.exists(os.path.join("app", utils.lookup_file(model))):
print("JSON already exists")
return HttpResponseRedirect(f"/result/{model}/results")
if model=="rtx3060ti":
site_model="3060-ti"
else:
site_model= model[3:]
print(site_model)
#static url for this specific card model
euro_url = f"https://www.euro.com.pl/karty-graficzne,typ-chipsetu!geforce-rtx-{site_model}.bhtml"
me_url = f"https://www.mediaexpert.pl/komputery-i-tablety/podzespoly-komputerowe/karty-graficzne/geforce-rtx_tak/model_geforce-rtx-{site_model}"
mm_url = f"https://mediamarkt.pl/komputery-i-tablety/akcesoria-komputerowe/czesci-komputerowe/karty-graficzne./uklad-graficzny=geforce-rtx-{site_model}"
print("Media mark url to be checked")
print(mm_url)
mm_start_scraper(mm_url,model)
me_start_scraper(me_url, model)
euro_start_scraper(euro_url, model)
while True:
time.sleep(1)
if os.path.exists(os.path.join("app",utils.lookup_file(model, shop="me"))):
break
#time.sleep(1)
output = output_lists(model)
#print(output)
#output.sort(key=lambda x: x['price'], reverse=False)
return HttpResponseRedirect(f"/result/{model}/results")
def result_page(request, model):
global_path = os.path.join("app", utils.lookup_file(model))
if os.path.exists(global_path):
print("JSON already exists RESULTS PAGE")
output = json.loads(open(global_path).readlines()[0])
else:
output = output_lists(model)
#print(output)
#output.sort(key=lambda x: x['price'], reverse=False)
historical_data=utils.get_model_history(model)
historical_data["prices"].sort(key=lambda x: x['date'], reverse=False)
profitability = historical_data["average_price"] / output["average"]
profitability = str(round(profitability, 2))
advise = advise_checker(profitability)
output["average"] = str(round(output["average"],2))
historical_data["average_price"] = str(round(historical_data["average_price"], 2))
return render(
request,
'app/result_template.html',
{
'average':output["average"],
'title': "Wyniki wyszukiwania",
'item_list':output["results"],
'history_list': historical_data["prices"],
'average_history': historical_data["average_price"],
'profitability': profitability,
'advise' : advise,
'year':datetime.now().year,
}
)
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/about.html',
{
'title':'O nas',
'message':'Opis strony',
'year':datetime.now().year,
}
) | 0.267408 | 0.182644 |
import os
import sys
import prody as pr
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot import generate_sse as gss
from metalprot import extract_vdm
def generate_c4s(outdir, target_path, metal_sel, pre_flag = 'target_c4_', x = 20, y = 10, delta = 3):
if not os.path.exists(outdir):
os.mkdir(outdir)
target = pr.parsePDB(target_path)
x_rotations = []
y_rotations = []
for i in range(-x, x+1):
x_rotations.append(i*delta)
for j in range(-y, y+1):
y_rotations.append([j*delta, 0, j*delta, 0])
gss._generate_cn_symmetry_helix(outdir, target, name = pre_flag, metal_sel = metal_sel, n = 4, x_rotations = x_rotations, y_rotations = y_rotations )
'''
### Generate for a special one
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4/'
outdir = workdir + 'output_delta1/'
target_path = workdir + 'm3-3_cluster_1_mem_20_centroid_3fms_NI_1_HIS_2.pdb'
name = 'C4_'+ str(count) + '_' + target_path.split('/')[-1].split('centroid_')[-1].split('.')[0]
generate_c4s(outdir, target_path, metal_sel, name, x = 60, y = 30, delta = 1)
'''
### Extract querys
query_dir = '/mnt/e/DesignData/ligands/CU_NI/'
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4_all_CYS/'
print(query_dir)
querys = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['cluster', 'M3-3_CSY'], score_cut = 0, clu_num_cut = 3)
for q in querys:
phi_180, psi_180, seq = gss.cal_phipsi(q.query)
dssps = gss.cal_dssp(phi_180, psi_180, seq)
if len(dssps) > 1 and len([x for x in dssps if x =='A']) <= 2 :
print(q.query.getTitle() + ' is not helix.')
continue
pr.writePDB(workdir + q.query.getTitle(), q.query)
###
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4_all_CYS/'
outdir = workdir + 'output_x40_y20/'
metal_sel = 'name NI CU'
if not os.path.exists(outdir):
os.mkdir(outdir)
### Generate c4s
pdb_paths = []
for pdb in os.listdir(workdir):
if not pdb.endswith(".pdb"):
continue
pdb_paths.append(workdir + pdb)
count = 0
for target_path in pdb_paths:
name = 'C4_'+ str(count) + '_' + target_path.split('/')[-1].split('centroid_')[-1].split('.')[0]
generate_c4s(outdir, target_path, metal_sel, name)
count += 1
### write XYZ for CCCP-fitting
pdbs = []
for pdb in os.listdir(outdir):
if not pdb.endswith(".pdb"):
continue
pdbs.append(pr.parsePDB(outdir + pdb))
gss.write_XYZs(outdir, pdbs) | scrips/construct_helix/run_gss_c4.py | import os
import sys
import prody as pr
#You can either add the python package path.
#sys.path.append(r'/mnt/e/GitHub_Design/Metalprot')
from metalprot import generate_sse as gss
from metalprot import extract_vdm
def generate_c4s(outdir, target_path, metal_sel, pre_flag = 'target_c4_', x = 20, y = 10, delta = 3):
if not os.path.exists(outdir):
os.mkdir(outdir)
target = pr.parsePDB(target_path)
x_rotations = []
y_rotations = []
for i in range(-x, x+1):
x_rotations.append(i*delta)
for j in range(-y, y+1):
y_rotations.append([j*delta, 0, j*delta, 0])
gss._generate_cn_symmetry_helix(outdir, target, name = pre_flag, metal_sel = metal_sel, n = 4, x_rotations = x_rotations, y_rotations = y_rotations )
'''
### Generate for a special one
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4/'
outdir = workdir + 'output_delta1/'
target_path = workdir + 'm3-3_cluster_1_mem_20_centroid_3fms_NI_1_HIS_2.pdb'
name = 'C4_'+ str(count) + '_' + target_path.split('/')[-1].split('centroid_')[-1].split('.')[0]
generate_c4s(outdir, target_path, metal_sel, name, x = 60, y = 30, delta = 1)
'''
### Extract querys
query_dir = '/mnt/e/DesignData/ligands/CU_NI/'
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4_all_CYS/'
print(query_dir)
querys = extract_vdm.extract_all_centroid(query_dir, summary_name = '_summary.txt', file_name_includes = ['cluster', 'M3-3_CSY'], score_cut = 0, clu_num_cut = 3)
for q in querys:
phi_180, psi_180, seq = gss.cal_phipsi(q.query)
dssps = gss.cal_dssp(phi_180, psi_180, seq)
if len(dssps) > 1 and len([x for x in dssps if x =='A']) <= 2 :
print(q.query.getTitle() + ' is not helix.')
continue
pr.writePDB(workdir + q.query.getTitle(), q.query)
###
workdir = '/mnt/e/DesignData/ligands/CoiledCoil/C4_all_CYS/'
outdir = workdir + 'output_x40_y20/'
metal_sel = 'name NI CU'
if not os.path.exists(outdir):
os.mkdir(outdir)
### Generate c4s
pdb_paths = []
for pdb in os.listdir(workdir):
if not pdb.endswith(".pdb"):
continue
pdb_paths.append(workdir + pdb)
count = 0
for target_path in pdb_paths:
name = 'C4_'+ str(count) + '_' + target_path.split('/')[-1].split('centroid_')[-1].split('.')[0]
generate_c4s(outdir, target_path, metal_sel, name)
count += 1
### write XYZ for CCCP-fitting
pdbs = []
for pdb in os.listdir(outdir):
if not pdb.endswith(".pdb"):
continue
pdbs.append(pr.parsePDB(outdir + pdb))
gss.write_XYZs(outdir, pdbs) | 0.084984 | 0.136695 |
import typing
import PIL.Image
from enum import Enum
import preppipe.commontypes
class VNContext:
def __init__(self) -> None:
self.background_music = None
self.background_image = None
class VNElement:
"""VNElement
Representing an actionable data element inside the VN model
"""
def __init__(self) -> None:
pass
class VNElementBlock:
"""VNElementBlock
Basically a list of VNElements under the same VNContext
All elements are guaranteed to execute from the first to last (similar to a basic block)
"""
ctx : VNContext = None
element_list : typing.List[VNElement] = []
def __init__(self, context : VNContext) -> None:
self.ctx = context
self.element_list = []
def addElement(self, element : VNElement) -> None:
self.element_list.append(element)
class VisualNovelModel:
context_list : typing.List[VNContext] = []
block_list : typing.List[VNElementBlock] = []
empty_context : VNContext = None
def __init__(self) -> None:
self.context_list = []
self.block_list = []
# create an empty context so that referencing code can query the object
self.empty_context = VNContext()
def addBlock(self, block : VNElementBlock):
self.block_list.append(block)
def addContext(self, ctx : VNContext):
self.context_list.append(ctx)
def getEmptyContext(self) -> VNContext:
return self.empty_context
class VNClearElement(VNElement):
"""This Element clears all temporary data (similar to \\r)"""
def __init__(self) -> None:
pass
class VNTextAttribute(Enum):
Bold = 0
Italic = 1
Size = 2
TextColor = 3
BackgroundColor = 4
RubyText = 5
HoverContent = 6 # unimplemented for now
ClickAction = 7 # unimplemented for now
class VNSayTextElement(VNElement):
"""This element represents a piece of spoken text"""
attributes : typing.Dict[VNTextAttribute, typing.Any] = {}
text : str = ""
def __init__(self, text : str = "", attributes : typing.Dict[VNTextAttribute, typing.Any] = {}) -> None:
super().__init__()
self.text = text
self.attributes = attributes
def bold(self) -> bool:
return VNTextAttribute.Bold in self.attributes
def italic(self) -> bool:
return VNTextAttribute.Italic in self.attributes
def has_nonzero_sizelevel(self) -> bool:
return VNTextAttribute.Size in self.attributes
def size_level(self) -> int:
return self.attributes.get(VNTextAttribute.Size, 0)
def has_text_color(self) -> bool:
return VNTextAttribute.TextColor in self.attributes
def text_color(self) -> preppipe.commontypes.Color:
return self.attributes.get(VNTextAttribute.TextColor, preppipe.commontypes.Color())
def has_background_color(self) -> bool:
return VNTextAttribute.BackgroundColor in self.attributes
def background_color(self) -> preppipe.commontypes.Color:
return self.attributes.get(VNTextAttribute.BackgroundColor, preppipe.commontypes.Color())
def has_ruby_text(self) -> bool:
return VNTextAttribute.RubyText in self.attributes
def ruby_text(self) -> str:
return self.attributes.get(VNTextAttribute.RubyText, "") | src/preppipe/visualnovelmodel.py |
import typing
import PIL.Image
from enum import Enum
import preppipe.commontypes
class VNContext:
def __init__(self) -> None:
self.background_music = None
self.background_image = None
class VNElement:
"""VNElement
Representing an actionable data element inside the VN model
"""
def __init__(self) -> None:
pass
class VNElementBlock:
"""VNElementBlock
Basically a list of VNElements under the same VNContext
All elements are guaranteed to execute from the first to last (similar to a basic block)
"""
ctx : VNContext = None
element_list : typing.List[VNElement] = []
def __init__(self, context : VNContext) -> None:
self.ctx = context
self.element_list = []
def addElement(self, element : VNElement) -> None:
self.element_list.append(element)
class VisualNovelModel:
context_list : typing.List[VNContext] = []
block_list : typing.List[VNElementBlock] = []
empty_context : VNContext = None
def __init__(self) -> None:
self.context_list = []
self.block_list = []
# create an empty context so that referencing code can query the object
self.empty_context = VNContext()
def addBlock(self, block : VNElementBlock):
self.block_list.append(block)
def addContext(self, ctx : VNContext):
self.context_list.append(ctx)
def getEmptyContext(self) -> VNContext:
return self.empty_context
class VNClearElement(VNElement):
"""This Element clears all temporary data (similar to \\r)"""
def __init__(self) -> None:
pass
class VNTextAttribute(Enum):
Bold = 0
Italic = 1
Size = 2
TextColor = 3
BackgroundColor = 4
RubyText = 5
HoverContent = 6 # unimplemented for now
ClickAction = 7 # unimplemented for now
class VNSayTextElement(VNElement):
"""This element represents a piece of spoken text"""
attributes : typing.Dict[VNTextAttribute, typing.Any] = {}
text : str = ""
def __init__(self, text : str = "", attributes : typing.Dict[VNTextAttribute, typing.Any] = {}) -> None:
super().__init__()
self.text = text
self.attributes = attributes
def bold(self) -> bool:
return VNTextAttribute.Bold in self.attributes
def italic(self) -> bool:
return VNTextAttribute.Italic in self.attributes
def has_nonzero_sizelevel(self) -> bool:
return VNTextAttribute.Size in self.attributes
def size_level(self) -> int:
return self.attributes.get(VNTextAttribute.Size, 0)
def has_text_color(self) -> bool:
return VNTextAttribute.TextColor in self.attributes
def text_color(self) -> preppipe.commontypes.Color:
return self.attributes.get(VNTextAttribute.TextColor, preppipe.commontypes.Color())
def has_background_color(self) -> bool:
return VNTextAttribute.BackgroundColor in self.attributes
def background_color(self) -> preppipe.commontypes.Color:
return self.attributes.get(VNTextAttribute.BackgroundColor, preppipe.commontypes.Color())
def has_ruby_text(self) -> bool:
return VNTextAttribute.RubyText in self.attributes
def ruby_text(self) -> str:
return self.attributes.get(VNTextAttribute.RubyText, "") | 0.68215 | 0.240607 |
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from gazebo_msgs.srv import SetEntityState
from gazebo_msgs.msg import EntityState
from geometry_msgs.msg import Pose, Twist
from ament_index_python import get_package_share_directory
from rclpy.qos import QoSPresetProfiles
from cv_bridge import CvBridge
from tf_transformations import quaternion_from_euler
import cv2 as cv
import numpy as np
import json, glob, os
from random import random
from numpy import pi
class Calibrator(Node):
def __init__(self):
super().__init__("sim_camera_calibrator")
self.declare_parameter('num_images', 15)
self.declare_parameter('chessboard_h', 7)
self.declare_parameter('chessboard_w', 7)
# Subscribe to the camera topic
self.flag = False
self.create_subscription(
Image,
'/sim_camera/image_raw',
self.callback,
QoSPresetProfiles.get_from_short_key('sensor_data')
)
self.img = np.zeros((1, 1, 3))
# Create a client for the gazebo set_entity_state service
self.cli = self.create_client(SetEntityState, '/set_entity_state')
while not self.cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('Waiting for /set_entity_state service to be available...')
else:
self.get_logger().info('Service found')
self.req = SetEntityState.Request()
self.send_pose_change()
# Create a timer for calibration steps
self.timer = self.create_timer(0.5, self.send_pose_change)
self.timer = self.create_timer(1, self.calibration_step)
# Set calibration parameters
self.cb = (
self.get_parameter('chessboard_h').get_parameter_value().integer_value,
self.get_parameter('chessboard_w').get_parameter_value().integer_value
)
self.num_images = self.get_parameter('num_images').get_parameter_value().integer_value
self.get_logger().info(f"Starting calibration for a %dx%d chessboard over %d images!" % (self.cb[0], self.cb[1], self.num_images))
# Create CVBridge instance
self.bridge = CvBridge()
# termination self.criteria
self.criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
self.objp = np.zeros((self.cb[0]*self.cb[1], 3), np.float32)
self.objp[:,:2] = np.mgrid[0:self.cb[0],0:self.cb[1]].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
self.objpoints = [] # 3d point in real world space
self.imgpoints = [] # 2d points in image plane.
def calibration_step(self):
self.get_logger().info("Attempting...")
if not self.flag or self.num_images <= 0: return
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY)
self.gray = gray
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (self.cb[0], self.cb[1]), None)
# If found, add object points, image points (after refining them)
if ret:
self.objpoints.append(self.objp)
self.imgpoints.append(corners)
self.num_images -= 1
self.get_logger().info(f"%d images remaining" % self.num_images)
# Draw and display the corners
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), self.criteria)
cv.drawChessboardCorners(self.img, (self.cb[0], self.cb[1]), corners2, ret)
self.flag = False
cv.imshow('img', self.img)
cv.waitKey(1)
if self.num_images <= 0: self.finalize()
def send_pose_change(self):
self.req.state = EntityState()
self.req.state.name = 'chessboard'
self.req.state.pose = Pose()
self.req.state.pose.position.x = 1.5 + 3.5 * random()
self.req.state.pose.position.y = -1.25 + 2.25 * random()
self.req.state.pose.position.z = 0.5 + 1.0 * random()
q = quaternion_from_euler(
-pi/8 + pi/4* random() + 0.0,
-pi/8 + pi/4* random() + 0.0,
-pi/8 + pi/4* random() + pi/2
)
self.req.state.pose.orientation.x = q[0]
self.req.state.pose.orientation.y = q[1]
self.req.state.pose.orientation.z = q[2]
self.req.state.pose.orientation.w = q[3]
self.req.state.twist = Twist()
self.req.state.reference_frame = 'world'
self.resp = self.cli.call_async(self.req)
def callback(self, msg):
self.img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
self.flag = True
def finalize(self):
path = os.path.join(get_package_share_directory('pose_estimation'), 'sim_calibration.json')
cv.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(self.objpoints, self.imgpoints, self.gray.shape[::-1], None, None)
h, w = self.img.shape[:2]
new_mtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
with open(path, 'w') as f:
json.dump({
'mtx': np.asarray(mtx).tolist(),
'dist': np.asarray(dist).tolist(),
'rvecs': np.asarray(rvecs).tolist(),
'tvecs': np.asarray(tvecs).tolist(),
'new_mtx':np.asarray(new_mtx).tolist(),
'roi': np.asarray(roi).tolist(),
'h': h,
'w': w
}, f)
self.get_logger().info(f"Finilizing... Saved to %s" % path)
return mtx, dist, rvecs, tvecs, new_mtx, roi
def main(args=None):
rclpy.init(args=args)
viewer = Calibrator()
rclpy.spin(viewer)
viewer.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | src/pose_estimation/pose_estimation/calibrate_sim.py | import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from gazebo_msgs.srv import SetEntityState
from gazebo_msgs.msg import EntityState
from geometry_msgs.msg import Pose, Twist
from ament_index_python import get_package_share_directory
from rclpy.qos import QoSPresetProfiles
from cv_bridge import CvBridge
from tf_transformations import quaternion_from_euler
import cv2 as cv
import numpy as np
import json, glob, os
from random import random
from numpy import pi
class Calibrator(Node):
def __init__(self):
super().__init__("sim_camera_calibrator")
self.declare_parameter('num_images', 15)
self.declare_parameter('chessboard_h', 7)
self.declare_parameter('chessboard_w', 7)
# Subscribe to the camera topic
self.flag = False
self.create_subscription(
Image,
'/sim_camera/image_raw',
self.callback,
QoSPresetProfiles.get_from_short_key('sensor_data')
)
self.img = np.zeros((1, 1, 3))
# Create a client for the gazebo set_entity_state service
self.cli = self.create_client(SetEntityState, '/set_entity_state')
while not self.cli.wait_for_service(timeout_sec=1.0):
self.get_logger().info('Waiting for /set_entity_state service to be available...')
else:
self.get_logger().info('Service found')
self.req = SetEntityState.Request()
self.send_pose_change()
# Create a timer for calibration steps
self.timer = self.create_timer(0.5, self.send_pose_change)
self.timer = self.create_timer(1, self.calibration_step)
# Set calibration parameters
self.cb = (
self.get_parameter('chessboard_h').get_parameter_value().integer_value,
self.get_parameter('chessboard_w').get_parameter_value().integer_value
)
self.num_images = self.get_parameter('num_images').get_parameter_value().integer_value
self.get_logger().info(f"Starting calibration for a %dx%d chessboard over %d images!" % (self.cb[0], self.cb[1], self.num_images))
# Create CVBridge instance
self.bridge = CvBridge()
# termination self.criteria
self.criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
self.objp = np.zeros((self.cb[0]*self.cb[1], 3), np.float32)
self.objp[:,:2] = np.mgrid[0:self.cb[0],0:self.cb[1]].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
self.objpoints = [] # 3d point in real world space
self.imgpoints = [] # 2d points in image plane.
def calibration_step(self):
self.get_logger().info("Attempting...")
if not self.flag or self.num_images <= 0: return
gray = cv.cvtColor(self.img, cv.COLOR_BGR2GRAY)
self.gray = gray
# Find the chess board corners
ret, corners = cv.findChessboardCorners(gray, (self.cb[0], self.cb[1]), None)
# If found, add object points, image points (after refining them)
if ret:
self.objpoints.append(self.objp)
self.imgpoints.append(corners)
self.num_images -= 1
self.get_logger().info(f"%d images remaining" % self.num_images)
# Draw and display the corners
corners2 = cv.cornerSubPix(gray,corners, (11,11), (-1,-1), self.criteria)
cv.drawChessboardCorners(self.img, (self.cb[0], self.cb[1]), corners2, ret)
self.flag = False
cv.imshow('img', self.img)
cv.waitKey(1)
if self.num_images <= 0: self.finalize()
def send_pose_change(self):
self.req.state = EntityState()
self.req.state.name = 'chessboard'
self.req.state.pose = Pose()
self.req.state.pose.position.x = 1.5 + 3.5 * random()
self.req.state.pose.position.y = -1.25 + 2.25 * random()
self.req.state.pose.position.z = 0.5 + 1.0 * random()
q = quaternion_from_euler(
-pi/8 + pi/4* random() + 0.0,
-pi/8 + pi/4* random() + 0.0,
-pi/8 + pi/4* random() + pi/2
)
self.req.state.pose.orientation.x = q[0]
self.req.state.pose.orientation.y = q[1]
self.req.state.pose.orientation.z = q[2]
self.req.state.pose.orientation.w = q[3]
self.req.state.twist = Twist()
self.req.state.reference_frame = 'world'
self.resp = self.cli.call_async(self.req)
def callback(self, msg):
self.img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
self.flag = True
def finalize(self):
path = os.path.join(get_package_share_directory('pose_estimation'), 'sim_calibration.json')
cv.destroyAllWindows()
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(self.objpoints, self.imgpoints, self.gray.shape[::-1], None, None)
h, w = self.img.shape[:2]
new_mtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
with open(path, 'w') as f:
json.dump({
'mtx': np.asarray(mtx).tolist(),
'dist': np.asarray(dist).tolist(),
'rvecs': np.asarray(rvecs).tolist(),
'tvecs': np.asarray(tvecs).tolist(),
'new_mtx':np.asarray(new_mtx).tolist(),
'roi': np.asarray(roi).tolist(),
'h': h,
'w': w
}, f)
self.get_logger().info(f"Finilizing... Saved to %s" % path)
return mtx, dist, rvecs, tvecs, new_mtx, roi
def main(args=None):
rclpy.init(args=args)
viewer = Calibrator()
rclpy.spin(viewer)
viewer.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main() | 0.587707 | 0.175945 |
__copyright__ = "Copyright (C) 2016-2021 Flexiv Ltd. All Rights Reserved."
__author__ = "Flexiv"
import time
import argparse
# Import Flexiv RDK Python library
# fmt: off
import sys
sys.path.insert(0, "../lib/")
import flexivrdk
# fmt: on
def main():
# Parse Arguments
# =============================================================================
argparser = argparse.ArgumentParser()
argparser.add_argument('robot_ip', help='IP address of the robot server')
argparser.add_argument('local_ip', help='IP address of the workstation PC')
args = argparser.parse_args()
# RDK Initialization
# =============================================================================
# Some alias
robot = flexivrdk.Robot()
mode = flexivrdk.Mode
system_status = flexivrdk.SystemStatus()
# Initialize connection with robot server
robot.init(args.robot_ip, args.local_ip)
# Wait for the connection to be established
while not robot.isConnected():
time.sleep(1)
# Enable the robot, make sure the E-stop is released before enabling
if robot.enable():
print("Enabling robot ...")
# Wait for the robot to become operational
while not robot.isOperational():
time.sleep(1)
print("Robot is now operational")
# Set mode after robot is operational
robot.setMode(mode.MODE_PLAN_EXECUTION)
# Wait for the mode to be switched
while (robot.getMode() != mode.MODE_PLAN_EXECUTION):
time.sleep(1)
# Application-specific Code
# =============================================================================
while True:
# Get user input
input_case = int(input(
"Choose an action:\n \
[1] Get plan list \n \
[2] Execute plan by index \n \
[3] Stop the current plan execution \n"
)
)
# Check if user input is valid
assert (input_case >= 1 and input_case <= 3), "Invalid input"
# Get plan list
if input_case == 1:
plan_list = robot.getPlanNameList()
time.sleep(1)
for i in range(len(plan_list)):
print("[" + str(i) + "]", plan_list[i])
# Execute plan by index
elif input_case == 2:
index = int(input("Enter plan index to execute:\n"))
robot.executePlanByIndex(index)
# Stop the current plan execution
elif input_case == 3:
robot.stop()
if __name__ == "__main__":
main() | example_py/plan_execution.py | __copyright__ = "Copyright (C) 2016-2021 Flexiv Ltd. All Rights Reserved."
__author__ = "Flexiv"
import time
import argparse
# Import Flexiv RDK Python library
# fmt: off
import sys
sys.path.insert(0, "../lib/")
import flexivrdk
# fmt: on
def main():
# Parse Arguments
# =============================================================================
argparser = argparse.ArgumentParser()
argparser.add_argument('robot_ip', help='IP address of the robot server')
argparser.add_argument('local_ip', help='IP address of the workstation PC')
args = argparser.parse_args()
# RDK Initialization
# =============================================================================
# Some alias
robot = flexivrdk.Robot()
mode = flexivrdk.Mode
system_status = flexivrdk.SystemStatus()
# Initialize connection with robot server
robot.init(args.robot_ip, args.local_ip)
# Wait for the connection to be established
while not robot.isConnected():
time.sleep(1)
# Enable the robot, make sure the E-stop is released before enabling
if robot.enable():
print("Enabling robot ...")
# Wait for the robot to become operational
while not robot.isOperational():
time.sleep(1)
print("Robot is now operational")
# Set mode after robot is operational
robot.setMode(mode.MODE_PLAN_EXECUTION)
# Wait for the mode to be switched
while (robot.getMode() != mode.MODE_PLAN_EXECUTION):
time.sleep(1)
# Application-specific Code
# =============================================================================
while True:
# Get user input
input_case = int(input(
"Choose an action:\n \
[1] Get plan list \n \
[2] Execute plan by index \n \
[3] Stop the current plan execution \n"
)
)
# Check if user input is valid
assert (input_case >= 1 and input_case <= 3), "Invalid input"
# Get plan list
if input_case == 1:
plan_list = robot.getPlanNameList()
time.sleep(1)
for i in range(len(plan_list)):
print("[" + str(i) + "]", plan_list[i])
# Execute plan by index
elif input_case == 2:
index = int(input("Enter plan index to execute:\n"))
robot.executePlanByIndex(index)
# Stop the current plan execution
elif input_case == 3:
robot.stop()
if __name__ == "__main__":
main() | 0.530236 | 0.170957 |
from pathlib import Path
import sanic
import sanic.response
import twitter
from sanic.log import logger
from sanic_ext.extensions.http.extension import HTTPExtension
from sanic_ext.extensions.openapi.extension import OpenAPIExtension
from .config import load_json_config
from .link_cache import initialize_link_cache
from .sanic_jinja import configure_jinja
from .stats_module import initialize_stats
from .storage_module import initialize_storage
from .twitfix_app import twitfix_app
from .twitfix_debug import debug
from .twitfix_stats import stats
from .twitfix_toys import toy
@stats.middleware
async def lock_stats(request):
logger.info(" ➤ [ X ] Stats have been disabled.")
return sanic.response.empty(status=401)
app = sanic.Sanic(
"twitfix",
env_prefix="TWITFIX_",
configure_logging=False,
)
app.blueprint(twitfix_app)
app.blueprint(stats)
app.blueprint(debug)
app.blueprint(toy)
app.extend(built_in_extensions=False, extensions=[HTTPExtension, OpenAPIExtension])
# If method is set to API or Hybrid, attempt to auth with the Twitter API
if app.config.DOWNLOAD_METHOD in ("api", "hybrid"):
auth = twitter.oauth.OAuth(
app.config.TWITTER_ACCESS_TOKEN,
app.config.TWITTER_ACCESS_SECRET,
app.config.TWITTER_API_KEY,
app.config.TWITTER_API_SECRET,
)
twitter_api = twitter.Twitter(auth=auth)
app.config.update({"TWITTER": twitter_api})
link_cache_system = app.config.LINK_CACHE
storage_module_type = app.config.STORAGE_MODULE
STAT_MODULE = initialize_stats(link_cache_system, app.config)
LINKS_MODULE = initialize_link_cache(link_cache_system, app.config)
STORAGE_MODULE = initialize_storage(storage_module_type, app.config)
base_url = app.config.BASE_URL
static_folder = Path("static").resolve()
template_folder = Path("templates").resolve()
configure_jinja(app, template_folder)
load_json_config(app)
app.static("/static", static_folder)
app.config.update(
{
"STAT_MODULE": STAT_MODULE,
"LINKS_MODULE": LINKS_MODULE,
"STORAGE_MODULE": STORAGE_MODULE,
"BASE_URL": base_url,
}
) | src/twitfix/routes.py | from pathlib import Path
import sanic
import sanic.response
import twitter
from sanic.log import logger
from sanic_ext.extensions.http.extension import HTTPExtension
from sanic_ext.extensions.openapi.extension import OpenAPIExtension
from .config import load_json_config
from .link_cache import initialize_link_cache
from .sanic_jinja import configure_jinja
from .stats_module import initialize_stats
from .storage_module import initialize_storage
from .twitfix_app import twitfix_app
from .twitfix_debug import debug
from .twitfix_stats import stats
from .twitfix_toys import toy
@stats.middleware
async def lock_stats(request):
logger.info(" ➤ [ X ] Stats have been disabled.")
return sanic.response.empty(status=401)
app = sanic.Sanic(
"twitfix",
env_prefix="TWITFIX_",
configure_logging=False,
)
app.blueprint(twitfix_app)
app.blueprint(stats)
app.blueprint(debug)
app.blueprint(toy)
app.extend(built_in_extensions=False, extensions=[HTTPExtension, OpenAPIExtension])
# If method is set to API or Hybrid, attempt to auth with the Twitter API
if app.config.DOWNLOAD_METHOD in ("api", "hybrid"):
auth = twitter.oauth.OAuth(
app.config.TWITTER_ACCESS_TOKEN,
app.config.TWITTER_ACCESS_SECRET,
app.config.TWITTER_API_KEY,
app.config.TWITTER_API_SECRET,
)
twitter_api = twitter.Twitter(auth=auth)
app.config.update({"TWITTER": twitter_api})
link_cache_system = app.config.LINK_CACHE
storage_module_type = app.config.STORAGE_MODULE
STAT_MODULE = initialize_stats(link_cache_system, app.config)
LINKS_MODULE = initialize_link_cache(link_cache_system, app.config)
STORAGE_MODULE = initialize_storage(storage_module_type, app.config)
base_url = app.config.BASE_URL
static_folder = Path("static").resolve()
template_folder = Path("templates").resolve()
configure_jinja(app, template_folder)
load_json_config(app)
app.static("/static", static_folder)
app.config.update(
{
"STAT_MODULE": STAT_MODULE,
"LINKS_MODULE": LINKS_MODULE,
"STORAGE_MODULE": STORAGE_MODULE,
"BASE_URL": base_url,
}
) | 0.388502 | 0.060863 |
import optapy
import optapy.score
import optapy.config
def test_pinning_filter():
def is_entity_pinned(_, entity):
return entity.is_pinned()
@optapy.planning_entity(pinning_filter=is_entity_pinned)
class Point:
def __init__(self, value, is_pinned=False):
self.value = value
self.pinned = is_pinned
def is_pinned(self):
return self.pinned
@optapy.planning_variable(int, value_range_provider_refs=['value_range'])
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
@optapy.planning_solution
class Solution:
def __init__(self, values, points):
self.values = values
self.points = points
self.score = None
@optapy.problem_fact_collection_property(int)
@optapy.value_range_provider('value_range')
def get_value_range(self):
return self.values
@optapy.planning_entity_collection_property(Point)
def get_points(self):
return self.points
@optapy.planning_score(optapy.score.SimpleScore)
def get_score(self) -> optapy.score.SimpleScore:
return self.score
def set_score(self, score):
self.score = score
@optapy.constraint_provider
def my_constraints(constraint_factory):
return [
constraint_factory.forEach(optapy.get_class(Point))
.penalize("Minimize Value", optapy.score.SimpleScore.ONE, lambda point: point.value)
]
termination_config = optapy.config.solver.termination.TerminationConfig()
termination_config.setUnimprovedSecondsSpentLimit(1)
solver_config = optapy.config.solver.SolverConfig() \
.withSolutionClass(optapy.get_class(Solution)) \
.withEntityClasses(optapy.get_class(Point)) \
.withConstraintProviderClass(optapy.get_class(my_constraints)) \
.withTerminationConfig(termination_config)
problem: Solution = Solution([0, 1, 2],
[
Point(0),
Point(1),
Point(2, is_pinned=True)
])
solver = optapy.solver_factory_create(solver_config).buildSolver()
solution = solver.solve(problem)
assert solution.get_score().getScore() == -2
def test_planning_pin():
@optapy.planning_entity
class Point:
def __init__(self, value, is_pinned=False):
self.value = value
self.pinned = is_pinned
@optapy.planning_pin
def is_pinned(self):
return self.pinned
@optapy.planning_variable(int, value_range_provider_refs=['value_range'])
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
@optapy.planning_solution
class Solution:
def __init__(self, values, points):
self.values = values
self.points = points
self.score = None
@optapy.problem_fact_collection_property(int)
@optapy.value_range_provider('value_range')
def get_value_range(self):
return self.values
@optapy.planning_entity_collection_property(Point)
def get_points(self):
return self.points
@optapy.planning_score(optapy.score.SimpleScore)
def get_score(self) -> optapy.score.SimpleScore:
return self.score
def set_score(self, score):
self.score = score
@optapy.constraint_provider
def my_constraints(constraint_factory):
return [
constraint_factory.forEach(optapy.get_class(Point))
.penalize("Minimize Value", optapy.score.SimpleScore.ONE, lambda point: point.value)
]
termination_config = optapy.config.solver.termination.TerminationConfig()
termination_config.setUnimprovedSecondsSpentLimit(1)
solver_config = optapy.config.solver.SolverConfig() \
.withSolutionClass(optapy.get_class(Solution)) \
.withEntityClasses(optapy.get_class(Point)) \
.withConstraintProviderClass(optapy.get_class(my_constraints)) \
.withTerminationConfig(termination_config)
problem: Solution = Solution([0, 1, 2],
[
Point(0),
Point(1),
Point(2, is_pinned=True)
])
solver = optapy.solver_factory_create(solver_config).buildSolver()
solution = solver.solve(problem)
assert solution.get_score().getScore() == -2 | optapy-core/tests/test_pinning.py | import optapy
import optapy.score
import optapy.config
def test_pinning_filter():
def is_entity_pinned(_, entity):
return entity.is_pinned()
@optapy.planning_entity(pinning_filter=is_entity_pinned)
class Point:
def __init__(self, value, is_pinned=False):
self.value = value
self.pinned = is_pinned
def is_pinned(self):
return self.pinned
@optapy.planning_variable(int, value_range_provider_refs=['value_range'])
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
@optapy.planning_solution
class Solution:
def __init__(self, values, points):
self.values = values
self.points = points
self.score = None
@optapy.problem_fact_collection_property(int)
@optapy.value_range_provider('value_range')
def get_value_range(self):
return self.values
@optapy.planning_entity_collection_property(Point)
def get_points(self):
return self.points
@optapy.planning_score(optapy.score.SimpleScore)
def get_score(self) -> optapy.score.SimpleScore:
return self.score
def set_score(self, score):
self.score = score
@optapy.constraint_provider
def my_constraints(constraint_factory):
return [
constraint_factory.forEach(optapy.get_class(Point))
.penalize("Minimize Value", optapy.score.SimpleScore.ONE, lambda point: point.value)
]
termination_config = optapy.config.solver.termination.TerminationConfig()
termination_config.setUnimprovedSecondsSpentLimit(1)
solver_config = optapy.config.solver.SolverConfig() \
.withSolutionClass(optapy.get_class(Solution)) \
.withEntityClasses(optapy.get_class(Point)) \
.withConstraintProviderClass(optapy.get_class(my_constraints)) \
.withTerminationConfig(termination_config)
problem: Solution = Solution([0, 1, 2],
[
Point(0),
Point(1),
Point(2, is_pinned=True)
])
solver = optapy.solver_factory_create(solver_config).buildSolver()
solution = solver.solve(problem)
assert solution.get_score().getScore() == -2
def test_planning_pin():
@optapy.planning_entity
class Point:
def __init__(self, value, is_pinned=False):
self.value = value
self.pinned = is_pinned
@optapy.planning_pin
def is_pinned(self):
return self.pinned
@optapy.planning_variable(int, value_range_provider_refs=['value_range'])
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
@optapy.planning_solution
class Solution:
def __init__(self, values, points):
self.values = values
self.points = points
self.score = None
@optapy.problem_fact_collection_property(int)
@optapy.value_range_provider('value_range')
def get_value_range(self):
return self.values
@optapy.planning_entity_collection_property(Point)
def get_points(self):
return self.points
@optapy.planning_score(optapy.score.SimpleScore)
def get_score(self) -> optapy.score.SimpleScore:
return self.score
def set_score(self, score):
self.score = score
@optapy.constraint_provider
def my_constraints(constraint_factory):
return [
constraint_factory.forEach(optapy.get_class(Point))
.penalize("Minimize Value", optapy.score.SimpleScore.ONE, lambda point: point.value)
]
termination_config = optapy.config.solver.termination.TerminationConfig()
termination_config.setUnimprovedSecondsSpentLimit(1)
solver_config = optapy.config.solver.SolverConfig() \
.withSolutionClass(optapy.get_class(Solution)) \
.withEntityClasses(optapy.get_class(Point)) \
.withConstraintProviderClass(optapy.get_class(my_constraints)) \
.withTerminationConfig(termination_config)
problem: Solution = Solution([0, 1, 2],
[
Point(0),
Point(1),
Point(2, is_pinned=True)
])
solver = optapy.solver_factory_create(solver_config).buildSolver()
solution = solver.solve(problem)
assert solution.get_score().getScore() == -2 | 0.729616 | 0.417064 |
from email.parser import BytesParser
from lxml import html
from lxml.etree import ParserError, Comment
import modules.port
import codecs
class HttpPort(modules.port.Port):
def __init__(self, port):
super().__init__("http", port)
self.data = {}
def add_data(self, row):
if row["type"] not in probe_types:
if row["type"].endswith("_time"):
response_time = row["data"].split(b" ")
self.data["{}:response_start".format(row["type"])] = float(response_time[0])
self.data["{}:response_end".format(row["type"])] = float(response_time[1])
return
self.data.update(process_probe(row))
def get_property(self, name):
return self.data.get(name)
def get_properties(self):
return self.data.items()
def has_property(self, name):
return name in self.data
probe_types = [
"get_root",
"head_root",
"very_simple_get",
"not_exist",
"invalid_version",
"invalid_protocol",
"long_path",
"get_favicon",
"get_robots",
"delete_root"
]
def merge_chunks(chunks):
content = b""
while len(chunks) > 0:
try:
chunk_size, chunk = chunks.split(b"\r\n", 1)
chunk_size = int(chunk_size, 16)
except ValueError:
# HTTP is fucked up
chunk_size = len(chunks)
chunk = chunks
chunk = chunk[0:chunk_size]
content += chunk
chunks = chunk[chunk_size:]
if chunks.startswith(b"\r\n"):
chunks = chunks[2:]
return content
def tag_recursive(element, depth=-1):
tag = element.tag
if tag is Comment:
tag = "!----"
tag_str = "<%s>" % tag
for child in element:
tag_str += tag_recursive(child)
if tag is not Comment:
tag_str += "</%s>" % tag
return tag_str
def get_type(rows, probe_type):
for row in rows:
if row["type"] == probe_type:
return row
return None
def process_probe(row):
if not row["data"].startswith(b"HTTP/"):
return {} # TODO: do some kind of content analysis
#print(row["data"], "\n")
response = row["data"].replace(b"\r\n\r\n", b"\n\n", 1)
try:
# split in headers and content
raw_headers, content = response.split(b"\n\n", 1)
request_line, headers_alone = raw_headers.split(b"\r\n", 1)
except ValueError as e:
return {}
# parse first line
try:
protocol, status_code, status_text, version = None, None, None, None
protocol, status_code, status_text = request_line.split(b" ", 2)
protocol, version = protocol.split(b"/", 1)
except ValueError as e:
pass
# get headers
headers = BytesParser().parsebytes(headers_alone)
server = headers.get("Server", "")
date = headers.get("Date", "")
content_type = headers.get("Content-Type", "")
transfer_encoding = list(map(lambda s: s.strip(), headers.get("Transfer-Encoding", "").split(",")))
charset = "utf-8"
if "charset=" in content_type:
charset = content_type[content_type.find("charset=")+len("charset="):]
if charset == "undef":
charset = "utf-8"
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
if "chunked" in transfer_encoding:
# the content is chunked and needs to be merged
content = merge_chunks(content)
# parse html
tag_tree = ""
try:
tree = html.fromstring(content)
tag_tree = tag_recursive(tree)
except ParserError as e:
pass
data = {}
probe_type = row["type"]
try:
# TODO: IIS server is dick and may return decimals in status_code :shrug:
try:
data["{}:status_code".format(probe_type)] = float(status_code)
except ValueError:
data["{}:status_code".format(probe_type)] = -1
except TypeError:
data["{}:status_code".format(probe_type)] = None
try:
data["{}:status_text".format(probe_type)] = status_text
except AttributeError:
data["{}:status_text".format(probe_type)] = None
try:
data["{}:header_keys".format(probe_type)] = headers.keys()
except TypeError:
data["{}:header_keys".format(probe_type)] = None
for header in headers:
data["{}:header:{}".format(probe_type, header)] = headers[header]
data["{}:dom_tree".format(probe_type)] = tag_tree
return data | modules/http.py | from email.parser import BytesParser
from lxml import html
from lxml.etree import ParserError, Comment
import modules.port
import codecs
class HttpPort(modules.port.Port):
def __init__(self, port):
super().__init__("http", port)
self.data = {}
def add_data(self, row):
if row["type"] not in probe_types:
if row["type"].endswith("_time"):
response_time = row["data"].split(b" ")
self.data["{}:response_start".format(row["type"])] = float(response_time[0])
self.data["{}:response_end".format(row["type"])] = float(response_time[1])
return
self.data.update(process_probe(row))
def get_property(self, name):
return self.data.get(name)
def get_properties(self):
return self.data.items()
def has_property(self, name):
return name in self.data
probe_types = [
"get_root",
"head_root",
"very_simple_get",
"not_exist",
"invalid_version",
"invalid_protocol",
"long_path",
"get_favicon",
"get_robots",
"delete_root"
]
def merge_chunks(chunks):
content = b""
while len(chunks) > 0:
try:
chunk_size, chunk = chunks.split(b"\r\n", 1)
chunk_size = int(chunk_size, 16)
except ValueError:
# HTTP is fucked up
chunk_size = len(chunks)
chunk = chunks
chunk = chunk[0:chunk_size]
content += chunk
chunks = chunk[chunk_size:]
if chunks.startswith(b"\r\n"):
chunks = chunks[2:]
return content
def tag_recursive(element, depth=-1):
tag = element.tag
if tag is Comment:
tag = "!----"
tag_str = "<%s>" % tag
for child in element:
tag_str += tag_recursive(child)
if tag is not Comment:
tag_str += "</%s>" % tag
return tag_str
def get_type(rows, probe_type):
for row in rows:
if row["type"] == probe_type:
return row
return None
def process_probe(row):
if not row["data"].startswith(b"HTTP/"):
return {} # TODO: do some kind of content analysis
#print(row["data"], "\n")
response = row["data"].replace(b"\r\n\r\n", b"\n\n", 1)
try:
# split in headers and content
raw_headers, content = response.split(b"\n\n", 1)
request_line, headers_alone = raw_headers.split(b"\r\n", 1)
except ValueError as e:
return {}
# parse first line
try:
protocol, status_code, status_text, version = None, None, None, None
protocol, status_code, status_text = request_line.split(b" ", 2)
protocol, version = protocol.split(b"/", 1)
except ValueError as e:
pass
# get headers
headers = BytesParser().parsebytes(headers_alone)
server = headers.get("Server", "")
date = headers.get("Date", "")
content_type = headers.get("Content-Type", "")
transfer_encoding = list(map(lambda s: s.strip(), headers.get("Transfer-Encoding", "").split(",")))
charset = "utf-8"
if "charset=" in content_type:
charset = content_type[content_type.find("charset=")+len("charset="):]
if charset == "undef":
charset = "utf-8"
try:
codecs.lookup(charset)
except LookupError:
charset = "utf-8"
if "chunked" in transfer_encoding:
# the content is chunked and needs to be merged
content = merge_chunks(content)
# parse html
tag_tree = ""
try:
tree = html.fromstring(content)
tag_tree = tag_recursive(tree)
except ParserError as e:
pass
data = {}
probe_type = row["type"]
try:
# TODO: IIS server is dick and may return decimals in status_code :shrug:
try:
data["{}:status_code".format(probe_type)] = float(status_code)
except ValueError:
data["{}:status_code".format(probe_type)] = -1
except TypeError:
data["{}:status_code".format(probe_type)] = None
try:
data["{}:status_text".format(probe_type)] = status_text
except AttributeError:
data["{}:status_text".format(probe_type)] = None
try:
data["{}:header_keys".format(probe_type)] = headers.keys()
except TypeError:
data["{}:header_keys".format(probe_type)] = None
for header in headers:
data["{}:header:{}".format(probe_type, header)] = headers[header]
data["{}:dom_tree".format(probe_type)] = tag_tree
return data | 0.176636 | 0.164953 |
from __future__ import unicode_literals
from nose.plugins.attrib import attr
from nose.tools import nottest
from mogwai.tests import BaseMogwaiTestCase
from mogwai._compat import PY2
from .base_tests import GraphPropertyBaseClassTestCase
from mogwai.properties.properties import DateTime, GraphProperty
from mogwai.models import Vertex
from mogwai.exceptions import ValidationError
from mogwai._compat import print_
import datetime
from pytz import utc
@attr('unit', 'property', 'property_datetime_utc')
class DateTimePropertyTestCase(GraphPropertyBaseClassTestCase):
klass = DateTime
good_cases = (datetime.datetime.now(tz=utc), None)
if PY2:
bad_cases = ('val', [], (), {}, 0, long(1), False, 1.1, datetime.datetime.now())
else:
bad_cases = ('val', [], (), {}, 0, False, 1.1, datetime.datetime.now())
def test_to_database_method(self):
d = self.klass(strict=False)
self.assertIsNone(d.to_database(None))
self.assertIsInstance(d.to_database(100000), float)
with self.assertRaises(ValidationError):
d.to_database(lambda x: x)
def test_input_output_equality(self):
d = datetime.datetime(2014, 1, 1, tzinfo=utc)
prop = self.klass()
result = prop.to_python(prop.to_database(d))
print_("Input: %s, Output: %s" % (d, result))
self.assertEqual(d, result)
class DateTimeTestVertex(Vertex):
element_type = 'test_datetime_vertex'
test_val = DateTime()
CHOICES = (
(datetime.datetime(2014, 1, 1, tzinfo=utc), 'A'),
(datetime.datetime(2014, 2, 1, tzinfo=utc), 'B')
)
class DateTimeTestChoicesVertex(Vertex):
element_type = 'test_datetime_choices_vertex'
test_val = DateTime(choices=CHOICES)
@attr('unit', 'property', 'property_datetime_utc')
class DateTimeVertexTestCase(GraphPropertyBaseClassTestCase):
def test_datetime_io(self):
print_("creating vertex")
dt = DateTimeTestVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("getting vertex from vertex: %s" % dt)
dt2 = DateTimeTestVertex.get(dt._id)
print_("got vertex: %s\n" % dt2)
self.assertEqual(dt2.test_val, dt.test_val)
print_("deleting vertex")
dt2.delete()
dt = DateTimeTestVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("\ncreated vertex: %s with time: %s" % (dt, dt.test_val))
dt2 = DateTimeTestVertex.get(dt._id)
print_("Got vertex: %s" % dt2)
self.assertEqual(dt2.test_val, datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("deleting vertex")
dt2.delete()
@attr('unit', 'property', 'property_datetime_utc')
class TestVertexChoicesTestCase(BaseMogwaiTestCase):
def test_good_choices_key_io(self):
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("validating input")
self.assertEqual(dt.test_val, datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("deleting vertex")
dt.delete()
@nottest
def test_good_choices_value_io(self):
# Known to be a bug, all keys and choices must be int | long | datetime
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val='B')
print_("validating input")
self.assertEqual(dt.test_val, datetime.datetime(2014, 2, 1, tzinfo=utc))
print_("deleting vertex")
dt.delete()
def test_bad_choices_io(self):
with self.assertRaises(ValidationError):
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val=datetime.datetime(2014, 3, 1, tzinfo=utc))
print_("validating input")
self.assertEqual(dt.test_val, 'C')
print_("deleting vertex")
dt.delete() | mogwai/tests/properties_tests/properties_tests/datetime_tests.py | from __future__ import unicode_literals
from nose.plugins.attrib import attr
from nose.tools import nottest
from mogwai.tests import BaseMogwaiTestCase
from mogwai._compat import PY2
from .base_tests import GraphPropertyBaseClassTestCase
from mogwai.properties.properties import DateTime, GraphProperty
from mogwai.models import Vertex
from mogwai.exceptions import ValidationError
from mogwai._compat import print_
import datetime
from pytz import utc
@attr('unit', 'property', 'property_datetime_utc')
class DateTimePropertyTestCase(GraphPropertyBaseClassTestCase):
klass = DateTime
good_cases = (datetime.datetime.now(tz=utc), None)
if PY2:
bad_cases = ('val', [], (), {}, 0, long(1), False, 1.1, datetime.datetime.now())
else:
bad_cases = ('val', [], (), {}, 0, False, 1.1, datetime.datetime.now())
def test_to_database_method(self):
d = self.klass(strict=False)
self.assertIsNone(d.to_database(None))
self.assertIsInstance(d.to_database(100000), float)
with self.assertRaises(ValidationError):
d.to_database(lambda x: x)
def test_input_output_equality(self):
d = datetime.datetime(2014, 1, 1, tzinfo=utc)
prop = self.klass()
result = prop.to_python(prop.to_database(d))
print_("Input: %s, Output: %s" % (d, result))
self.assertEqual(d, result)
class DateTimeTestVertex(Vertex):
element_type = 'test_datetime_vertex'
test_val = DateTime()
CHOICES = (
(datetime.datetime(2014, 1, 1, tzinfo=utc), 'A'),
(datetime.datetime(2014, 2, 1, tzinfo=utc), 'B')
)
class DateTimeTestChoicesVertex(Vertex):
element_type = 'test_datetime_choices_vertex'
test_val = DateTime(choices=CHOICES)
@attr('unit', 'property', 'property_datetime_utc')
class DateTimeVertexTestCase(GraphPropertyBaseClassTestCase):
def test_datetime_io(self):
print_("creating vertex")
dt = DateTimeTestVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("getting vertex from vertex: %s" % dt)
dt2 = DateTimeTestVertex.get(dt._id)
print_("got vertex: %s\n" % dt2)
self.assertEqual(dt2.test_val, dt.test_val)
print_("deleting vertex")
dt2.delete()
dt = DateTimeTestVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("\ncreated vertex: %s with time: %s" % (dt, dt.test_val))
dt2 = DateTimeTestVertex.get(dt._id)
print_("Got vertex: %s" % dt2)
self.assertEqual(dt2.test_val, datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("deleting vertex")
dt2.delete()
@attr('unit', 'property', 'property_datetime_utc')
class TestVertexChoicesTestCase(BaseMogwaiTestCase):
def test_good_choices_key_io(self):
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val=datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("validating input")
self.assertEqual(dt.test_val, datetime.datetime(2014, 1, 1, tzinfo=utc))
print_("deleting vertex")
dt.delete()
@nottest
def test_good_choices_value_io(self):
# Known to be a bug, all keys and choices must be int | long | datetime
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val='B')
print_("validating input")
self.assertEqual(dt.test_val, datetime.datetime(2014, 2, 1, tzinfo=utc))
print_("deleting vertex")
dt.delete()
def test_bad_choices_io(self):
with self.assertRaises(ValidationError):
print_("creating vertex")
dt = DateTimeTestChoicesVertex.create(test_val=datetime.datetime(2014, 3, 1, tzinfo=utc))
print_("validating input")
self.assertEqual(dt.test_val, 'C')
print_("deleting vertex")
dt.delete() | 0.701815 | 0.488222 |
import ast
import functools
import re
from copy import deepcopy
from inspect import stack
from pathlib import Path
import executing
import mat2py.config
from mat2py.common.backends import numpy as np
from mat2py.common.logger import logger
from mat2py.common.utils import Singleton
from .array import M, mp_detect_vector
@functools.lru_cache(maxsize=10)
def mp_last_arg_as_kwarg(key: str, value_map: (tuple, dict)):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args and isinstance(args[-1], str) and args[-1] in value_map:
if isinstance(value_map, dict):
value = value_map[args[-1]]
elif isinstance(value_map, tuple):
value = True if len(value_map) == 1 else args[-1]
kwargs = {**kwargs, key: value}
args = args[:-1]
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_match_vector_direction(match_arg_position=0, target_arg_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if len(args) <= match_arg_position:
return res
vec_type = mp_detect_vector(args[match_arg_position])
if vec_type == 0:
return res
new_res = (
list(res)
if isinstance(res, tuple)
else [
res,
]
)
for i in (
range(len(new_res))
if target_arg_position is None
else target_arg_position
):
res_vec_type = mp_detect_vector(new_res[i])
if res_vec_type != 0 and res_vec_type != vec_type:
new_res[i] = new_res[i].reshape(
(1, -1) if vec_type == 1 else (-1, 1)
)
return tuple(new_res) if isinstance(res, tuple) else new_res[0]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_argout_wrapper_decorators(nargout: int = 1):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if nargout == 1:
return M[obj]
else:
assert isinstance(obj, tuple)
return tuple(M[o] for o in obj)
return wrapper
return decorator
def mp_special_variables(value: float, name: str = ""):
return value
@functools.lru_cache(maxsize=10)
def mp_pass_values_decorators(args_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
# TODO: can we detect temporary right value and avoid deepcopy for throughput? e.g. sys.getrefcount()
if args_position is None:
return func(*deepcopy(args), **kwargs)
for p in args_position:
if p < len(args):
args[p] = deepcopy(args[p])
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargout_decorators(caller_level: int = 2):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, nargout=None, **kwargs):
if nargout is None:
nargout = mp_nargout_from_stack(caller_level, func)
res = func(*args, **kwargs, nargout=nargout)
if not isinstance(res, tuple):
# TODO: we should be smarter
raise SyntaxWarning(
"mp_inference_nargout_decorators can only be used once"
)
return res[0] if nargout == 1 else res[:nargout]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargin_decorators():
def decorator(func):
@functools.wraps(func)
def wrapper(*args):
return func(*args, nargin=len(args))
return wrapper
return decorator
class CodeContext(metaclass=Singleton):
def __init__(self):
self.code = None
self.__ast__ = None
def reset(self):
self.code = None
self.__ast__ = None
@property
def ast(self):
if self.__ast__ is None:
self.__ast__ = ast.parse(self.code).body[0]
return self.__ast__
def __call__(self, code: str):
code = code.strip()
if code != "" and code != self.code:
self.code = code
self.__ast__ = None
return self
def mp_nargout_from_ast(ast_tree, func_name: str):
if (
isinstance(ast_tree, ast.Assign)
and isinstance(ast_tree.value, ast.Call)
and ast_tree.value.func.id == func_name
and isinstance(
ast_tree.targets[0], ast.Tuple
) # `a, = func()` not allowed in matlab
):
return len(ast_tree.targets[0].elts)
else:
return 1
def mp_nargout_from_stack(
caller_level: int = 2,
func=None,
default_if_exception: int = 1
if mat2py.config.ignore_nargout_inference_exception
else None,
):
current, *_, caller = stack()[1 : (caller_level + 1)]
function = func.__name__ if func is not None else current.function
try:
try:
frame_meta = executing.Source.executing(caller.frame)
if frame_meta.node is not None:
call_node = frame_meta.node
assert isinstance(call_node, ast.Call) and call_node.func.id == function
return mp_nargout_from_ast(call_node.parent, function)
else:
if len(frame_meta.statements) == 1:
(ast_tree,) = frame_meta.statements
# TODO: how to handle multiple call with same function?
return mp_nargout_from_ast(ast_tree, function)
elif frame_meta.statements:
raise NotImplementedError(
"only one statement supported in one line for now"
)
elif caller.filename in ("<stdin>", "<console>"):
raise ValueError("can not identify source code, seems to be IDLE")
raise SystemError
except ValueError:
return mp_nargout_from_ast(CodeContext().ast, function)
except Exception as err:
if default_if_exception is None:
raise SyntaxWarning(
"failed to inference nargout from call stack, pass the information explicitly"
)
else:
logger.warning(
f"failed to inference nargout from call stack, set it to be {default_if_exception}: {err}"
)
return default_if_exception | mat2py/core/_internal/helper.py | import ast
import functools
import re
from copy import deepcopy
from inspect import stack
from pathlib import Path
import executing
import mat2py.config
from mat2py.common.backends import numpy as np
from mat2py.common.logger import logger
from mat2py.common.utils import Singleton
from .array import M, mp_detect_vector
@functools.lru_cache(maxsize=10)
def mp_last_arg_as_kwarg(key: str, value_map: (tuple, dict)):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if args and isinstance(args[-1], str) and args[-1] in value_map:
if isinstance(value_map, dict):
value = value_map[args[-1]]
elif isinstance(value_map, tuple):
value = True if len(value_map) == 1 else args[-1]
kwargs = {**kwargs, key: value}
args = args[:-1]
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_match_vector_direction(match_arg_position=0, target_arg_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if len(args) <= match_arg_position:
return res
vec_type = mp_detect_vector(args[match_arg_position])
if vec_type == 0:
return res
new_res = (
list(res)
if isinstance(res, tuple)
else [
res,
]
)
for i in (
range(len(new_res))
if target_arg_position is None
else target_arg_position
):
res_vec_type = mp_detect_vector(new_res[i])
if res_vec_type != 0 and res_vec_type != vec_type:
new_res[i] = new_res[i].reshape(
(1, -1) if vec_type == 1 else (-1, 1)
)
return tuple(new_res) if isinstance(res, tuple) else new_res[0]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_argout_wrapper_decorators(nargout: int = 1):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = func(*args, **kwargs)
if nargout == 1:
return M[obj]
else:
assert isinstance(obj, tuple)
return tuple(M[o] for o in obj)
return wrapper
return decorator
def mp_special_variables(value: float, name: str = ""):
return value
@functools.lru_cache(maxsize=10)
def mp_pass_values_decorators(args_position: tuple = None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
# TODO: can we detect temporary right value and avoid deepcopy for throughput? e.g. sys.getrefcount()
if args_position is None:
return func(*deepcopy(args), **kwargs)
for p in args_position:
if p < len(args):
args[p] = deepcopy(args[p])
return func(*args, **kwargs)
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargout_decorators(caller_level: int = 2):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, nargout=None, **kwargs):
if nargout is None:
nargout = mp_nargout_from_stack(caller_level, func)
res = func(*args, **kwargs, nargout=nargout)
if not isinstance(res, tuple):
# TODO: we should be smarter
raise SyntaxWarning(
"mp_inference_nargout_decorators can only be used once"
)
return res[0] if nargout == 1 else res[:nargout]
return wrapper
return decorator
@functools.lru_cache(maxsize=10)
def mp_inference_nargin_decorators():
def decorator(func):
@functools.wraps(func)
def wrapper(*args):
return func(*args, nargin=len(args))
return wrapper
return decorator
class CodeContext(metaclass=Singleton):
def __init__(self):
self.code = None
self.__ast__ = None
def reset(self):
self.code = None
self.__ast__ = None
@property
def ast(self):
if self.__ast__ is None:
self.__ast__ = ast.parse(self.code).body[0]
return self.__ast__
def __call__(self, code: str):
code = code.strip()
if code != "" and code != self.code:
self.code = code
self.__ast__ = None
return self
def mp_nargout_from_ast(ast_tree, func_name: str):
if (
isinstance(ast_tree, ast.Assign)
and isinstance(ast_tree.value, ast.Call)
and ast_tree.value.func.id == func_name
and isinstance(
ast_tree.targets[0], ast.Tuple
) # `a, = func()` not allowed in matlab
):
return len(ast_tree.targets[0].elts)
else:
return 1
def mp_nargout_from_stack(
caller_level: int = 2,
func=None,
default_if_exception: int = 1
if mat2py.config.ignore_nargout_inference_exception
else None,
):
current, *_, caller = stack()[1 : (caller_level + 1)]
function = func.__name__ if func is not None else current.function
try:
try:
frame_meta = executing.Source.executing(caller.frame)
if frame_meta.node is not None:
call_node = frame_meta.node
assert isinstance(call_node, ast.Call) and call_node.func.id == function
return mp_nargout_from_ast(call_node.parent, function)
else:
if len(frame_meta.statements) == 1:
(ast_tree,) = frame_meta.statements
# TODO: how to handle multiple call with same function?
return mp_nargout_from_ast(ast_tree, function)
elif frame_meta.statements:
raise NotImplementedError(
"only one statement supported in one line for now"
)
elif caller.filename in ("<stdin>", "<console>"):
raise ValueError("can not identify source code, seems to be IDLE")
raise SystemError
except ValueError:
return mp_nargout_from_ast(CodeContext().ast, function)
except Exception as err:
if default_if_exception is None:
raise SyntaxWarning(
"failed to inference nargout from call stack, pass the information explicitly"
)
else:
logger.warning(
f"failed to inference nargout from call stack, set it to be {default_if_exception}: {err}"
)
return default_if_exception | 0.38943 | 0.208219 |
from __future__ import division
import Tkinter as tk
class Main_window:
def __init__(self, master):
self.master = master
## Main Frame
self.main_frame = tk.Frame(master, bd=2, relief='sunken', padx=10,pady=20)
self.main_frame.pack(anchor='c', padx= 10, pady= 20)
## Entry Frame
self.frame_entry = tk.Frame(self.main_frame, padx=5, pady=5)
self.enter_label = tk.Label(self.frame_entry, text="Enter Lines in counterclock wise order")
self.enter_label.grid(row=0,column=0, pady=5)
self.start_x = tk.StringVar()
self.start_x_label = tk.Label(self.frame_entry, text="Start x (ft) : ")
self.start_x_label.grid(row=1,column=0, pady=5)
self.start_x_entry = tk.Entry(self.frame_entry,textvariable=self.start_x)
self.start_x_entry.grid(row=1,column=1, pady=5)
self.start_y = tk.StringVar()
self.start_y_label = tk.Label(self.frame_entry, text="Start y (ft) : ")
self.start_y_label.grid(row=2,column=0, pady=5)
self.start_y_entry = tk.Entry(self.frame_entry,textvariable=self.start_y)
self.start_y_entry.grid(row=2,column=1, pady=5)
self.end_x = tk.StringVar()
self.end_x_label = tk.Label(self.frame_entry, text="End x (ft) : ")
self.end_x_label.grid(row=3,column=0, pady=5)
self.end_x_entry = tk.Entry(self.frame_entry,textvariable=self.end_x)
self.end_x_entry.grid(row=3,column=1, pady=5)
self.end_y = tk.StringVar()
self.end_y_label = tk.Label(self.frame_entry, text="End y (ft) : ")
self.end_y_label.grid(row=4,column=0, pady=5)
self.end_y_entry = tk.Entry(self.frame_entry,textvariable=self.end_y)
self.end_y_entry.grid(row=4,column=1, pady=5)
self.hc_in = tk.StringVar()
self.hc_in_label = tk.Label(self.frame_entry, text="Hc (ft) : ")
self.hc_in_label.grid(row=5,column=0, pady=5)
self.hc_in_entry = tk.Entry(self.frame_entry,textvariable=self.hc_in)
self.hc_in_entry.grid(row=5,column=1, pady=5)
self.loc = tk.StringVar()
self.loc.set('e')
self.loc_label = tk.Label(self.frame_entry, text="e or i : ")
self.loc_label.grid(row=6,column=0, pady=5)
choice = ['e','i']
self.loc_entry = tk.OptionMenu(self.frame_entry,self.loc, *choice)
self.loc_entry.grid(row=6,column=1, pady=5)
self.b_prev = tk.Button(self.frame_entry,text="Start = Last End", command=self.prev_point)
self.b_prev.grid(row=1,column=2, padx=5, pady=5)
self.b_add = tk.Button(self.frame_entry,text="Add Line", command=self.add_line)
self.b_add.grid(row=7,column=1, pady=5)
self.x = tk.StringVar()
self.x_label = tk.Label(self.frame_entry, text="x_out: ")
self.x_label.grid(row=1,column=3, pady=5)
self.x_entry = tk.Entry(self.frame_entry,textvariable=self.x, width=100)
self.x_entry.grid(row=1,column=4, pady=5)
self.y = tk.StringVar()
self.y_label = tk.Label(self.frame_entry, text="y_out: ")
self.y_label.grid(row=2,column=3, pady=5)
self.y_entry = tk.Entry(self.frame_entry,textvariable=self.y, width=100)
self.y_entry.grid(row=2,column=4, pady=5)
self.hc_out = tk.StringVar()
self.hc_out_label = tk.Label(self.frame_entry, text="hc_out : ")
self.hc_out_label.grid(row=3,column=3, pady=5)
self.hc_out_entry = tk.Entry(self.frame_entry,textvariable=self.hc_out, width=100)
self.hc_out_entry.grid(row=3,column=4, pady=5)
self.loc_out = tk.StringVar()
self.loc_out_label = tk.Label(self.frame_entry, text="loc_out : ")
self.loc_out_label.grid(row=4,column=3, pady=5)
self.loc_out_entry = tk.Entry(self.frame_entry,textvariable=self.loc_out, width=100)
self.loc_out_entry.grid(row=4,column=4, pady=5)
self.frame_entry.pack(side=tk.LEFT)
## Results Frame
self.results_frame = tk.Frame(self.main_frame, padx=5, pady=5)
self.results_frame.pack(side=tk.LEFT)
## Outside Main Frame
self.b1 = tk.Button(master,text="Close", command=self.quit_app)
self.b1.pack(side=tk.RIGHT, padx=5, pady=5)
self.b2 = tk.Button(master,text="Write Output to txt File", command=self.write_output)
self.b2.pack(side=tk.RIGHT, padx=5, pady=5)
self.prev_x = 0
self.prev_y = 0
def quit_app(self):
self.master.destroy()
self.master.quit()
def prev_point(self):
self.start_x.set(self.prev_x)
self.start_y.set(self.prev_y)
def add_line(self):
x1 = self.start_x.get()
x2 = self.end_x.get()
y1 = self.start_y.get()
y2 = self.end_y.get()
if [x1,y1] == [x2,y2]:
pass
else:
self.prev_x = x2
string_x = self.x.get() + '{0},{1},'.format(x1,x2)
self.x.set(string_x)
self.prev_y = y2
hc = self.hc_in.get()
loc = self.loc.get()
string_y = self.y.get() + '{0},{1},'.format(y1,y2)
self.y.set(string_y)
string_hc = self.hc_out.get() + '{0},'.format(hc)
self.hc_out.set(string_hc)
string_loc = self.loc_out.get() + "'{0}',".format(loc)
self.loc_out.set(string_loc)
def write_output(self):
file = open('Line_Helper_output.txt','w')
file.write(self.x.get())
file.write('\n')
file.write(self.y.get())
file.write('\n')
file.write(self.hc_out.get())
file.write('\n')
file.write(self.loc_out.get())
file.write('\n')
file.close()
def main():
root = tk.Tk()
root.title("Wall Line Helper For Drift")
app = Main_window(root)
root.minsize(800,600)
root.mainloop()
if __name__ == '__main__':
main() | Code/line_helper_gui.py | from __future__ import division
import Tkinter as tk
class Main_window:
def __init__(self, master):
self.master = master
## Main Frame
self.main_frame = tk.Frame(master, bd=2, relief='sunken', padx=10,pady=20)
self.main_frame.pack(anchor='c', padx= 10, pady= 20)
## Entry Frame
self.frame_entry = tk.Frame(self.main_frame, padx=5, pady=5)
self.enter_label = tk.Label(self.frame_entry, text="Enter Lines in counterclock wise order")
self.enter_label.grid(row=0,column=0, pady=5)
self.start_x = tk.StringVar()
self.start_x_label = tk.Label(self.frame_entry, text="Start x (ft) : ")
self.start_x_label.grid(row=1,column=0, pady=5)
self.start_x_entry = tk.Entry(self.frame_entry,textvariable=self.start_x)
self.start_x_entry.grid(row=1,column=1, pady=5)
self.start_y = tk.StringVar()
self.start_y_label = tk.Label(self.frame_entry, text="Start y (ft) : ")
self.start_y_label.grid(row=2,column=0, pady=5)
self.start_y_entry = tk.Entry(self.frame_entry,textvariable=self.start_y)
self.start_y_entry.grid(row=2,column=1, pady=5)
self.end_x = tk.StringVar()
self.end_x_label = tk.Label(self.frame_entry, text="End x (ft) : ")
self.end_x_label.grid(row=3,column=0, pady=5)
self.end_x_entry = tk.Entry(self.frame_entry,textvariable=self.end_x)
self.end_x_entry.grid(row=3,column=1, pady=5)
self.end_y = tk.StringVar()
self.end_y_label = tk.Label(self.frame_entry, text="End y (ft) : ")
self.end_y_label.grid(row=4,column=0, pady=5)
self.end_y_entry = tk.Entry(self.frame_entry,textvariable=self.end_y)
self.end_y_entry.grid(row=4,column=1, pady=5)
self.hc_in = tk.StringVar()
self.hc_in_label = tk.Label(self.frame_entry, text="Hc (ft) : ")
self.hc_in_label.grid(row=5,column=0, pady=5)
self.hc_in_entry = tk.Entry(self.frame_entry,textvariable=self.hc_in)
self.hc_in_entry.grid(row=5,column=1, pady=5)
self.loc = tk.StringVar()
self.loc.set('e')
self.loc_label = tk.Label(self.frame_entry, text="e or i : ")
self.loc_label.grid(row=6,column=0, pady=5)
choice = ['e','i']
self.loc_entry = tk.OptionMenu(self.frame_entry,self.loc, *choice)
self.loc_entry.grid(row=6,column=1, pady=5)
self.b_prev = tk.Button(self.frame_entry,text="Start = Last End", command=self.prev_point)
self.b_prev.grid(row=1,column=2, padx=5, pady=5)
self.b_add = tk.Button(self.frame_entry,text="Add Line", command=self.add_line)
self.b_add.grid(row=7,column=1, pady=5)
self.x = tk.StringVar()
self.x_label = tk.Label(self.frame_entry, text="x_out: ")
self.x_label.grid(row=1,column=3, pady=5)
self.x_entry = tk.Entry(self.frame_entry,textvariable=self.x, width=100)
self.x_entry.grid(row=1,column=4, pady=5)
self.y = tk.StringVar()
self.y_label = tk.Label(self.frame_entry, text="y_out: ")
self.y_label.grid(row=2,column=3, pady=5)
self.y_entry = tk.Entry(self.frame_entry,textvariable=self.y, width=100)
self.y_entry.grid(row=2,column=4, pady=5)
self.hc_out = tk.StringVar()
self.hc_out_label = tk.Label(self.frame_entry, text="hc_out : ")
self.hc_out_label.grid(row=3,column=3, pady=5)
self.hc_out_entry = tk.Entry(self.frame_entry,textvariable=self.hc_out, width=100)
self.hc_out_entry.grid(row=3,column=4, pady=5)
self.loc_out = tk.StringVar()
self.loc_out_label = tk.Label(self.frame_entry, text="loc_out : ")
self.loc_out_label.grid(row=4,column=3, pady=5)
self.loc_out_entry = tk.Entry(self.frame_entry,textvariable=self.loc_out, width=100)
self.loc_out_entry.grid(row=4,column=4, pady=5)
self.frame_entry.pack(side=tk.LEFT)
## Results Frame
self.results_frame = tk.Frame(self.main_frame, padx=5, pady=5)
self.results_frame.pack(side=tk.LEFT)
## Outside Main Frame
self.b1 = tk.Button(master,text="Close", command=self.quit_app)
self.b1.pack(side=tk.RIGHT, padx=5, pady=5)
self.b2 = tk.Button(master,text="Write Output to txt File", command=self.write_output)
self.b2.pack(side=tk.RIGHT, padx=5, pady=5)
self.prev_x = 0
self.prev_y = 0
def quit_app(self):
self.master.destroy()
self.master.quit()
def prev_point(self):
self.start_x.set(self.prev_x)
self.start_y.set(self.prev_y)
def add_line(self):
x1 = self.start_x.get()
x2 = self.end_x.get()
y1 = self.start_y.get()
y2 = self.end_y.get()
if [x1,y1] == [x2,y2]:
pass
else:
self.prev_x = x2
string_x = self.x.get() + '{0},{1},'.format(x1,x2)
self.x.set(string_x)
self.prev_y = y2
hc = self.hc_in.get()
loc = self.loc.get()
string_y = self.y.get() + '{0},{1},'.format(y1,y2)
self.y.set(string_y)
string_hc = self.hc_out.get() + '{0},'.format(hc)
self.hc_out.set(string_hc)
string_loc = self.loc_out.get() + "'{0}',".format(loc)
self.loc_out.set(string_loc)
def write_output(self):
file = open('Line_Helper_output.txt','w')
file.write(self.x.get())
file.write('\n')
file.write(self.y.get())
file.write('\n')
file.write(self.hc_out.get())
file.write('\n')
file.write(self.loc_out.get())
file.write('\n')
file.close()
def main():
root = tk.Tk()
root.title("Wall Line Helper For Drift")
app = Main_window(root)
root.minsize(800,600)
root.mainloop()
if __name__ == '__main__':
main() | 0.510496 | 0.081046 |
from django.db import transaction
from cms.models import CMSPlugin
from cms.utils.plugins import reorder_plugins
from .utils import get_plugin_class
@transaction.atomic
def import_plugins(plugins, placeholder, language, root_plugin_id=None):
source_map = {}
new_plugins = []
if root_plugin_id:
root_plugin = CMSPlugin.objects.get(pk=root_plugin_id)
source_map[root_plugin_id] = root_plugin
else:
root_plugin = None
tree_order = placeholder.get_plugin_tree_order(language, parent_id=root_plugin_id)
for archived_plugin in plugins:
if archived_plugin.parent_id:
parent = source_map[archived_plugin.parent_id]
else:
parent = root_plugin
if parent and parent.__class__ != CMSPlugin:
parent = parent.cmsplugin_ptr
plugin = archived_plugin.restore(
placeholder=placeholder,
language=language,
parent=parent,
)
source_map[archived_plugin.pk] = plugin
if parent == root_plugin:
tree_order.append(plugin.pk)
new_plugins.append(plugin)
for new_plugin in new_plugins:
plugin_class = get_plugin_class(new_plugin.plugin_type)
if getattr(plugin_class, '_has_do_post_copy', False):
# getattr is used for django CMS 3.4 compatibility
# apps on 3.4 wishing to leverage this callback will need
# to manually set the _has_do_post_copy attribute.
plugin_class.do_post_copy(new_plugin, source_map)
reorder_plugins(
placeholder,
parent_id=root_plugin_id,
language=language,
order=tree_order,
)
placeholder.mark_as_dirty(language, clear_cache=False)
@transaction.atomic
def import_plugins_to_page(placeholders, page, language):
page_placeholders = page.rescan_placeholders()
for archived_placeholder in placeholders:
plugins = archived_placeholder.plugins
placeholder = page_placeholders.get(archived_placeholder.slot)
if placeholder and plugins:
import_plugins(plugins, placeholder, language) | djangocms_transfer/importer.py | from django.db import transaction
from cms.models import CMSPlugin
from cms.utils.plugins import reorder_plugins
from .utils import get_plugin_class
@transaction.atomic
def import_plugins(plugins, placeholder, language, root_plugin_id=None):
source_map = {}
new_plugins = []
if root_plugin_id:
root_plugin = CMSPlugin.objects.get(pk=root_plugin_id)
source_map[root_plugin_id] = root_plugin
else:
root_plugin = None
tree_order = placeholder.get_plugin_tree_order(language, parent_id=root_plugin_id)
for archived_plugin in plugins:
if archived_plugin.parent_id:
parent = source_map[archived_plugin.parent_id]
else:
parent = root_plugin
if parent and parent.__class__ != CMSPlugin:
parent = parent.cmsplugin_ptr
plugin = archived_plugin.restore(
placeholder=placeholder,
language=language,
parent=parent,
)
source_map[archived_plugin.pk] = plugin
if parent == root_plugin:
tree_order.append(plugin.pk)
new_plugins.append(plugin)
for new_plugin in new_plugins:
plugin_class = get_plugin_class(new_plugin.plugin_type)
if getattr(plugin_class, '_has_do_post_copy', False):
# getattr is used for django CMS 3.4 compatibility
# apps on 3.4 wishing to leverage this callback will need
# to manually set the _has_do_post_copy attribute.
plugin_class.do_post_copy(new_plugin, source_map)
reorder_plugins(
placeholder,
parent_id=root_plugin_id,
language=language,
order=tree_order,
)
placeholder.mark_as_dirty(language, clear_cache=False)
@transaction.atomic
def import_plugins_to_page(placeholders, page, language):
page_placeholders = page.rescan_placeholders()
for archived_placeholder in placeholders:
plugins = archived_placeholder.plugins
placeholder = page_placeholders.get(archived_placeholder.slot)
if placeholder and plugins:
import_plugins(plugins, placeholder, language) | 0.322953 | 0.070304 |
from rest_framework import serializers, exceptions as drf_exceptions
from .....models import (
WorkflowCollectionSubscription,
WorkflowCollectionSubscriptionSchedule,
WorkflowCollection,
)
class WorkflowCollectionSubscriptionScheduleSummarySerializer(
serializers.ModelSerializer
):
"""Model Serializer for Workflow Collection Subscription Schedule Objects."""
class Meta:
model = WorkflowCollectionSubscriptionSchedule
fields = ["time_of_day", "day_of_week", "weekly_interval"]
class WorkflowCollectionSubscriptionSummarySerializer(serializers.ModelSerializer):
"""ModelSerializer for Workflow Collection Subscription Objects."""
detail = serializers.HyperlinkedIdentityField(
view_name="user-workflow-collection-subscription", lookup_field="id"
)
workflow_collection = serializers.HyperlinkedRelatedField(
queryset=WorkflowCollection.objects.all(),
view_name="workflow-collection",
lookup_field="id",
)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
workflowcollectionsubscriptionschedule_set = (
WorkflowCollectionSubscriptionScheduleSummarySerializer(many=True)
)
class Meta:
model = WorkflowCollectionSubscription
fields = [
"detail",
"workflow_collection",
"user",
"active",
"workflowcollectionsubscriptionschedule_set",
]
def create(self, validated_data):
"""
Override create() method to be able to handle nested
WorkflowCollectionSubscriptionSchedule object creations.
"""
workflow_collection_subscription = (
WorkflowCollectionSubscription.objects.create(
user=validated_data["user"],
active=validated_data["active"]
if validated_data.get("active")
else True,
workflow_collection=validated_data["workflow_collection"],
)
)
workflowcollectionsubscriptionschedule_data = validated_data.pop(
"workflowcollectionsubscriptionschedule_set"
)
# TODO: This doesn't handle a user submitting multiple schedules for the same day.
# Should raise an appropriate validation error.
for subscription in workflowcollectionsubscriptionschedule_data:
subscription[
"workflow_collection_subscription"
] = workflow_collection_subscription
WorkflowCollectionSubscriptionSchedule.objects.create(**subscription)
return workflow_collection_subscription
def update(self, instance, validated_data):
"""
Update WorkflowCollectionSubscription and WorkflowCollectionSubscriptionSchedule objects.
Notes:
We only allow the user to update the "active" property of an
existing WorkflowCollectionSubscription object or the related
WorkflowCollectionSubscriptionSchedule objects. They cannot modify
other aspects of the original WorkflowCollectionSubscription object.
Parameters:
instance (WorkflowCollectionSubscription): The object retrieved by DRF based on the URL
route being accessed by the user.
validated_data (dict): A dictionary containing the parsed/validated data from
the incoming HTTP request payload.
Returns:
WorkflowCollectionSubscription
The modified WorkflowCollectionSubscription object.
"""
instance.workflowcollectionsubscriptionschedule_set.all().delete()
instance.active = validated_data["active"]
workflowcollectionsubscriptionschedule_data = validated_data.pop(
"workflowcollectionsubscriptionschedule_set"
)
# TODO: This doesn't handle a user submitting multiple schedules for the same day.
# Should raise an appropriate validation error.
for subscription in workflowcollectionsubscriptionschedule_data:
subscription["workflow_collection_subscription"] = instance
WorkflowCollectionSubscriptionSchedule.objects.create(**subscription)
instance.save()
return instance | django_workflow_system/api/serializers/user/workflows/subscription.py | from rest_framework import serializers, exceptions as drf_exceptions
from .....models import (
WorkflowCollectionSubscription,
WorkflowCollectionSubscriptionSchedule,
WorkflowCollection,
)
class WorkflowCollectionSubscriptionScheduleSummarySerializer(
serializers.ModelSerializer
):
"""Model Serializer for Workflow Collection Subscription Schedule Objects."""
class Meta:
model = WorkflowCollectionSubscriptionSchedule
fields = ["time_of_day", "day_of_week", "weekly_interval"]
class WorkflowCollectionSubscriptionSummarySerializer(serializers.ModelSerializer):
"""ModelSerializer for Workflow Collection Subscription Objects."""
detail = serializers.HyperlinkedIdentityField(
view_name="user-workflow-collection-subscription", lookup_field="id"
)
workflow_collection = serializers.HyperlinkedRelatedField(
queryset=WorkflowCollection.objects.all(),
view_name="workflow-collection",
lookup_field="id",
)
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
workflowcollectionsubscriptionschedule_set = (
WorkflowCollectionSubscriptionScheduleSummarySerializer(many=True)
)
class Meta:
model = WorkflowCollectionSubscription
fields = [
"detail",
"workflow_collection",
"user",
"active",
"workflowcollectionsubscriptionschedule_set",
]
def create(self, validated_data):
"""
Override create() method to be able to handle nested
WorkflowCollectionSubscriptionSchedule object creations.
"""
workflow_collection_subscription = (
WorkflowCollectionSubscription.objects.create(
user=validated_data["user"],
active=validated_data["active"]
if validated_data.get("active")
else True,
workflow_collection=validated_data["workflow_collection"],
)
)
workflowcollectionsubscriptionschedule_data = validated_data.pop(
"workflowcollectionsubscriptionschedule_set"
)
# TODO: This doesn't handle a user submitting multiple schedules for the same day.
# Should raise an appropriate validation error.
for subscription in workflowcollectionsubscriptionschedule_data:
subscription[
"workflow_collection_subscription"
] = workflow_collection_subscription
WorkflowCollectionSubscriptionSchedule.objects.create(**subscription)
return workflow_collection_subscription
def update(self, instance, validated_data):
"""
Update WorkflowCollectionSubscription and WorkflowCollectionSubscriptionSchedule objects.
Notes:
We only allow the user to update the "active" property of an
existing WorkflowCollectionSubscription object or the related
WorkflowCollectionSubscriptionSchedule objects. They cannot modify
other aspects of the original WorkflowCollectionSubscription object.
Parameters:
instance (WorkflowCollectionSubscription): The object retrieved by DRF based on the URL
route being accessed by the user.
validated_data (dict): A dictionary containing the parsed/validated data from
the incoming HTTP request payload.
Returns:
WorkflowCollectionSubscription
The modified WorkflowCollectionSubscription object.
"""
instance.workflowcollectionsubscriptionschedule_set.all().delete()
instance.active = validated_data["active"]
workflowcollectionsubscriptionschedule_data = validated_data.pop(
"workflowcollectionsubscriptionschedule_set"
)
# TODO: This doesn't handle a user submitting multiple schedules for the same day.
# Should raise an appropriate validation error.
for subscription in workflowcollectionsubscriptionschedule_data:
subscription["workflow_collection_subscription"] = instance
WorkflowCollectionSubscriptionSchedule.objects.create(**subscription)
instance.save()
return instance | 0.674694 | 0.173393 |
import sys
sys.path.append(
"./src/")
import unittest
from undefined.API import trace
from undefined.Calculator import *
class TestAPI(unittest.TestCase):
def setUp(self):
self.a = 2
self.b = - 1.11
self.c = 99999.99
self.d = -99999.99
self.e = np.array([[self.a, self.b]])
self.f = np.array([[self.a, self.b, self.c, self.d]])
self.g = math.pi / 2
self.h = math.e
self.j = np.array([[self.a, self.b, self.g, self.h]])
self.s = "str"
self.f0 = lambda x: sqrt(x)
self.f1 = lambda x: x + 2
self.f2 = lambda x, y: x + y - 1
self.f3 = lambda x, y: 2*x + sqrt(y)
self.f12 = lambda x, y: sqrt(x) + sqrt(y)
def test_trace_forward(self):
# Stop using string comparing!!!!!!!
self.assertEqual(trace(self.f1, x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='forward', x=2), (4, 1))
self.assertEqual(trace(self.f2, x=2, y=4), (5, [[1], [1]]))
self.assertEqual(trace(self.f2, y=4, x=2), (5, [[1], [1]]))
self.assertEqual(trace(self.f3, x=2, y=4), (6.0, [[2.], [0.25]]))
self.assertEqual(trace(self.f3, x=2, y=4, seeds=np.array(
[[2, 2], [1, 1]])), (6.0, [[4.25], [4.25]]))
with self.assertRaises(TypeError):
trace(self.f1, seeds="1", x=np.array([[999]]))
with self.assertRaises(TypeError):
trace(self.f2, seeds="1", x=np.array([[999]]), y=np.array([[99]]))
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array([1, 2]), x=np.array(
[[999]]), y=np.array([[99]]))
self.assertEqual(trace(self.f12, seeds=np.array(
[[1, 2], [0, 1]]), x=1, y=2)[1], [[0.5], [1.354]])
self.assertEqual(trace(self.f0, seeds=1, x=np.array([[10, 1]]))[
1], [[0.158, 0.5]])
def test_trace_with_incompatible_inputs(self):
with self.assertRaises(AttributeError):
trace(self.f2, mode='undefined', x=np.array(
[[1, 2, 3]]), y=np.array([[1, 2]]))
with self.assertRaises(AttributeError):
trace(self.f2, x=np.array(
[[1, 2, 3]]), y=np.array([[1, 2]]))
with self.assertRaises(AttributeError):
trace(self.f2, x=1, y=2, z=3)
def test_trace_with_incompatible_seeds(self):
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1, 3, 2], [1, 3, 2], [2, 3, 3]]), x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))
def test_trace_with_incompatible_seeds_reverse(self):
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1, 3, 2], [1, 3, 2], [2, 3, 3]]), mode="reverse", x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))
def test_trace_with_scalar_inputs_seeds(self):
with self.assertRaises(TypeError):
trace(self.f1, seeds=np.array([[1, 0]]), x=1)
self.assertEqual(trace(self.f1, seeds=2, x=1), (3, 2))
self.assertEqual(trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), x=2, y=999)[1], [[1], [1]])
with self.assertRaises(TypeError):
trace(self.f2, seeds=1, x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds="seed", x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), x="2", y=999)
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1], [0]]), x=2, y=999)
def test_trace_with_scalar_inputs_seeds_reverse(self):
with self.assertRaises(TypeError):
trace(self.f1, seeds=np.array([[1, 0]]), mode='reverse', x=1)
self.assertEqual(trace(self.f1, seeds=2, mode='reverse', x=1), (3, 2))
self.assertEqual(trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), mode='reverse', x=2, y=999)[1], [[1], [1]])
with self.assertRaises(TypeError):
trace(self.f2, seeds=1, mode='reverse', x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds="seed", mode='reverse', x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), mode='reverse', x="2", y=999)
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array(
[[1], [0]]), mode='reverse', x=2, y=999)
def test_trace_with_vector_inputs_seeds(self):
self.assertEqual(trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))[1], [[1, 1], [1, 1]])
def test_trace_with_vector_inputs_seeds_reverse(self):
self.assertEqual(trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), mode='reverse', x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))[1], [[1, 1], [1, 1]])
def test_trace_with_different_moded(self):
self.assertEqual(trace(self.f1, x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='forward', x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
with self.assertRaises(AttributeError):
trace(self.f1, mode='undefined', x=2)
def test_trace_reverse(self):
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
with self.assertRaises(AttributeError):
trace(self.f1, mode='undefined', x=2)
self.assertEqual(trace(self.f2, mode='reverse',
x=2, y=4), (5, [[1], [1]]))
self.assertEqual(trace(self.f2, mode='reverse',
y=4, x=2), (5, [[1], [1]]))
self.assertEqual(trace(self.f3, mode='reverse',
x=2, y=4), (6.0, [[2.], [0.25]]))
self.assertEqual(str(trace(self.f1, x=2)), '(4, 1)')
self.assertEqual(str(trace(self.f1, mode='forward', x=2)), '(4, 1)')
with self.assertRaises(AttributeError) as context:
trace(self.f1, mode='undefined', x=2)
with self.assertRaises(TypeError) as context:
(trace(self.f1, mode='reverse', x=np.array([2, 1])))
with self.assertRaises(TypeError) as context:
(trace(self.f1, mode='reverse', x="1"))
def test_trace_multiple_vector_inputs(self):
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=np.array([[4, 4]]))[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array(
[[2, 2]]), y=np.array([[4, 4]]))[1], [[2., 2.], [0.25, 0.25]])
def test_trace_single_vector_input(self):
self.assertEqual(trace(self.f1, x=np.array([[2, 2]]))[1], [[1, 1]])
with self.assertRaises(TypeError):
trace(self.f1, x=np.array([]))
def test_trace_non_lambda_function(self):
with self.assertRaises(TypeError):
trace("Function", x=1)
def test_trace_vector_functions(self):
self.assertEqual(trace([self.f2, self.f3], x=2, y=4)[
1].tolist(), [[[1.0], [1.0]], [[2.0], [0.25]]])
def test_mixed_inputs(self):
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=4)[
0].tolist(), [[6, 6]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=4)[
0].tolist(), [[6, 6]])
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=4)[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=4)[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=np.array([[4, 4]]))[
1], [[2., 2.], [0.25, 0.25]])
if __name__ == "__main__":
unittest.main() | test/test_API.py | import sys
sys.path.append(
"./src/")
import unittest
from undefined.API import trace
from undefined.Calculator import *
class TestAPI(unittest.TestCase):
def setUp(self):
self.a = 2
self.b = - 1.11
self.c = 99999.99
self.d = -99999.99
self.e = np.array([[self.a, self.b]])
self.f = np.array([[self.a, self.b, self.c, self.d]])
self.g = math.pi / 2
self.h = math.e
self.j = np.array([[self.a, self.b, self.g, self.h]])
self.s = "str"
self.f0 = lambda x: sqrt(x)
self.f1 = lambda x: x + 2
self.f2 = lambda x, y: x + y - 1
self.f3 = lambda x, y: 2*x + sqrt(y)
self.f12 = lambda x, y: sqrt(x) + sqrt(y)
def test_trace_forward(self):
# Stop using string comparing!!!!!!!
self.assertEqual(trace(self.f1, x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='forward', x=2), (4, 1))
self.assertEqual(trace(self.f2, x=2, y=4), (5, [[1], [1]]))
self.assertEqual(trace(self.f2, y=4, x=2), (5, [[1], [1]]))
self.assertEqual(trace(self.f3, x=2, y=4), (6.0, [[2.], [0.25]]))
self.assertEqual(trace(self.f3, x=2, y=4, seeds=np.array(
[[2, 2], [1, 1]])), (6.0, [[4.25], [4.25]]))
with self.assertRaises(TypeError):
trace(self.f1, seeds="1", x=np.array([[999]]))
with self.assertRaises(TypeError):
trace(self.f2, seeds="1", x=np.array([[999]]), y=np.array([[99]]))
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array([1, 2]), x=np.array(
[[999]]), y=np.array([[99]]))
self.assertEqual(trace(self.f12, seeds=np.array(
[[1, 2], [0, 1]]), x=1, y=2)[1], [[0.5], [1.354]])
self.assertEqual(trace(self.f0, seeds=1, x=np.array([[10, 1]]))[
1], [[0.158, 0.5]])
def test_trace_with_incompatible_inputs(self):
with self.assertRaises(AttributeError):
trace(self.f2, mode='undefined', x=np.array(
[[1, 2, 3]]), y=np.array([[1, 2]]))
with self.assertRaises(AttributeError):
trace(self.f2, x=np.array(
[[1, 2, 3]]), y=np.array([[1, 2]]))
with self.assertRaises(AttributeError):
trace(self.f2, x=1, y=2, z=3)
def test_trace_with_incompatible_seeds(self):
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1, 3, 2], [1, 3, 2], [2, 3, 3]]), x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))
def test_trace_with_incompatible_seeds_reverse(self):
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1, 3, 2], [1, 3, 2], [2, 3, 3]]), mode="reverse", x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))
def test_trace_with_scalar_inputs_seeds(self):
with self.assertRaises(TypeError):
trace(self.f1, seeds=np.array([[1, 0]]), x=1)
self.assertEqual(trace(self.f1, seeds=2, x=1), (3, 2))
self.assertEqual(trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), x=2, y=999)[1], [[1], [1]])
with self.assertRaises(TypeError):
trace(self.f2, seeds=1, x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds="seed", x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), x="2", y=999)
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array([[1], [0]]), x=2, y=999)
def test_trace_with_scalar_inputs_seeds_reverse(self):
with self.assertRaises(TypeError):
trace(self.f1, seeds=np.array([[1, 0]]), mode='reverse', x=1)
self.assertEqual(trace(self.f1, seeds=2, mode='reverse', x=1), (3, 2))
self.assertEqual(trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), mode='reverse', x=2, y=999)[1], [[1], [1]])
with self.assertRaises(TypeError):
trace(self.f2, seeds=1, mode='reverse', x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds="seed", mode='reverse', x=2, y=999)
with self.assertRaises(TypeError):
trace(self.f2, seeds=np.array(
[[1, 0], [0, 1]]), mode='reverse', x="2", y=999)
with self.assertRaises(AttributeError):
trace(self.f2, seeds=np.array(
[[1], [0]]), mode='reverse', x=2, y=999)
def test_trace_with_vector_inputs_seeds(self):
self.assertEqual(trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))[1], [[1, 1], [1, 1]])
def test_trace_with_vector_inputs_seeds_reverse(self):
self.assertEqual(trace(self.f2, seeds=np.array([[1, 0], [0, 1]]), mode='reverse', x=np.array(
[[2, 3]]), y=np.array([[1, 2]]))[1], [[1, 1], [1, 1]])
def test_trace_with_different_moded(self):
self.assertEqual(trace(self.f1, x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='forward', x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
with self.assertRaises(AttributeError):
trace(self.f1, mode='undefined', x=2)
def test_trace_reverse(self):
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
self.assertEqual(trace(self.f1, mode='reverse', x=2), (4, 1))
with self.assertRaises(AttributeError):
trace(self.f1, mode='undefined', x=2)
self.assertEqual(trace(self.f2, mode='reverse',
x=2, y=4), (5, [[1], [1]]))
self.assertEqual(trace(self.f2, mode='reverse',
y=4, x=2), (5, [[1], [1]]))
self.assertEqual(trace(self.f3, mode='reverse',
x=2, y=4), (6.0, [[2.], [0.25]]))
self.assertEqual(str(trace(self.f1, x=2)), '(4, 1)')
self.assertEqual(str(trace(self.f1, mode='forward', x=2)), '(4, 1)')
with self.assertRaises(AttributeError) as context:
trace(self.f1, mode='undefined', x=2)
with self.assertRaises(TypeError) as context:
(trace(self.f1, mode='reverse', x=np.array([2, 1])))
with self.assertRaises(TypeError) as context:
(trace(self.f1, mode='reverse', x="1"))
def test_trace_multiple_vector_inputs(self):
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=np.array([[4, 4]]))[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array(
[[2, 2]]), y=np.array([[4, 4]]))[1], [[2., 2.], [0.25, 0.25]])
def test_trace_single_vector_input(self):
self.assertEqual(trace(self.f1, x=np.array([[2, 2]]))[1], [[1, 1]])
with self.assertRaises(TypeError):
trace(self.f1, x=np.array([]))
def test_trace_non_lambda_function(self):
with self.assertRaises(TypeError):
trace("Function", x=1)
def test_trace_vector_functions(self):
self.assertEqual(trace([self.f2, self.f3], x=2, y=4)[
1].tolist(), [[[1.0], [1.0]], [[2.0], [0.25]]])
def test_mixed_inputs(self):
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=4)[
0].tolist(), [[6, 6]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=4)[
0].tolist(), [[6, 6]])
self.assertEqual(trace(self.f3, x=np.array([[2, 2]]), y=4)[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=4)[
1], [[2., 2.], [0.25, 0.25]])
self.assertEqual(trace(self.f3, mode='reverse', x=np.array([[2, 2]]), y=np.array([[4, 4]]))[
1], [[2., 2.], [0.25, 0.25]])
if __name__ == "__main__":
unittest.main() | 0.287268 | 0.607314 |
import os
import sys
import re
import sqlite3 as sqlite
try:
if sys.version_info < (2, 3):
raise ImportError
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
import xls2db
def do_one(xls_filename, dbname, do_drop=False):
if do_drop:
xls2db.xls2db(xls_filename, dbname, do_drop=do_drop)
else:
xls2db.xls2db(xls_filename, dbname)
class AllTests(unittest.TestCase):
def test_stackhaus(self):
xls_filename, dbname = 'stackhaus.xls', 'stackhaus.db'
try:
os.remove(dbname)
except:
pass
do_one(xls_filename, dbname)
stackhaus = sqlite.connect(dbname)
tests = {
"locations": [
"id string primary key",
"short_descr string",
"long_descr string",
"special string"
],
"links": [
"src string",
"dst string",
"dir string"
],
"items": [
"id string primary key",
"location string",
"short_descr string",
"long_descr string",
"get_descr string",
"get_pts integer",
"use_desc string",
"use_pts integer"
]
}
for t in tests.items():
table = t[0]
headers = t[1]
row = stackhaus.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = 'table'", (table,)
).fetchone()
for header in headers:
msg = u'header ' + header + u' in ' + table
self.assertTrue(re.search(header, row[0]), 'x ' + msg)
def test_comma(self):
xls_filename, dbname = 'comma_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'comma_test',)])
c.execute("SELECT * FROM comma_test ORDER BY 1")
rows = c.fetchall()
col_names = []
for c_description in c.description:
col_names.append(c_description[0])
self.assertEqual(col_names, [u'col1', u'english, text'])
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
finally:
db.close()
def test_empty_worksheet(self):
xls_filename, dbname = 'empty_worksheet_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
finally:
db.close()
def test_simple_test(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
finally:
db.close()
def test_simple_test_twice(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
c = db.cursor()
def do_one_simple_conversion():
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
try:
do_one_simple_conversion()
self.assertRaises(sqlite.OperationalError, do_one_simple_conversion)
finally:
db.close()
def test_simple_test_twice_with_drop(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
c = db.cursor()
def do_one_simple_conversion():
do_one(xls_filename, db, do_drop=True)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
try:
do_one_simple_conversion()
do_one_simple_conversion() # do again
finally:
db.close()
def main():
# Some tests may use data files (without a full pathname)
# set current working directory to test directory if
# test is not being run from the same directory
testpath = os.path.dirname(__file__)
if testpath:
testpath = os.path.join(testpath, 'example')
else:
# Just assume current directory
testpath = 'example'
try:
os.chdir(testpath)
except OSError:
# this may be Jython 2.2 under OpenJDK...
if sys.version_info <= (2, 3):
pass
else:
raise
unittest.main()
if __name__ == '__main__':
main() | test.py | import os
import sys
import re
import sqlite3 as sqlite
try:
if sys.version_info < (2, 3):
raise ImportError
import unittest2
unittest = unittest2
except ImportError:
import unittest
unittest2 = None
import xls2db
def do_one(xls_filename, dbname, do_drop=False):
if do_drop:
xls2db.xls2db(xls_filename, dbname, do_drop=do_drop)
else:
xls2db.xls2db(xls_filename, dbname)
class AllTests(unittest.TestCase):
def test_stackhaus(self):
xls_filename, dbname = 'stackhaus.xls', 'stackhaus.db'
try:
os.remove(dbname)
except:
pass
do_one(xls_filename, dbname)
stackhaus = sqlite.connect(dbname)
tests = {
"locations": [
"id string primary key",
"short_descr string",
"long_descr string",
"special string"
],
"links": [
"src string",
"dst string",
"dir string"
],
"items": [
"id string primary key",
"location string",
"short_descr string",
"long_descr string",
"get_descr string",
"get_pts integer",
"use_desc string",
"use_pts integer"
]
}
for t in tests.items():
table = t[0]
headers = t[1]
row = stackhaus.execute(
"SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = 'table'", (table,)
).fetchone()
for header in headers:
msg = u'header ' + header + u' in ' + table
self.assertTrue(re.search(header, row[0]), 'x ' + msg)
def test_comma(self):
xls_filename, dbname = 'comma_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'comma_test',)])
c.execute("SELECT * FROM comma_test ORDER BY 1")
rows = c.fetchall()
col_names = []
for c_description in c.description:
col_names.append(c_description[0])
self.assertEqual(col_names, [u'col1', u'english, text'])
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
finally:
db.close()
def test_empty_worksheet(self):
xls_filename, dbname = 'empty_worksheet_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
finally:
db.close()
def test_simple_test(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
try:
c = db.cursor()
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
finally:
db.close()
def test_simple_test_twice(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
c = db.cursor()
def do_one_simple_conversion():
do_one(xls_filename, db)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
try:
do_one_simple_conversion()
self.assertRaises(sqlite.OperationalError, do_one_simple_conversion)
finally:
db.close()
def test_simple_test_twice_with_drop(self):
xls_filename, dbname = 'simple_test.xls', ':memory:'
db = sqlite.connect(dbname)
c = db.cursor()
def do_one_simple_conversion():
do_one(xls_filename, db, do_drop=True)
c.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
rows = c.fetchall()
self.assertEqual(rows, [(u'simple_test',)])
c.execute("SELECT * FROM simple_test ORDER BY 1")
rows = c.fetchall()
self.assertEqual(rows, [(1.0, u'one'), (2.0, u'two'), (3.0, u'three')])
try:
do_one_simple_conversion()
do_one_simple_conversion() # do again
finally:
db.close()
def main():
# Some tests may use data files (without a full pathname)
# set current working directory to test directory if
# test is not being run from the same directory
testpath = os.path.dirname(__file__)
if testpath:
testpath = os.path.join(testpath, 'example')
else:
# Just assume current directory
testpath = 'example'
try:
os.chdir(testpath)
except OSError:
# this may be Jython 2.2 under OpenJDK...
if sys.version_info <= (2, 3):
pass
else:
raise
unittest.main()
if __name__ == '__main__':
main() | 0.167763 | 0.268893 |
from tempfile import TemporaryFile,gettempdir;
import os as _os;
from xlwt import Workbook,XFStyle;
from datetime import datetime,date,time,timedelta;
import calendar;
class ProjectToExcel(object):
def __init__(self):
pass;
def exportToExcel(self,objectProject):
book = Workbook();
sheet1 = book.add_sheet('Sheet 1')
if( objectProject):
i=0;
row1 = sheet1.row(i) ;
row1.write(0, ('ประเภท').decode('UTF8') );
row1.write(1, ('ชื่อโครงการ').decode('UTF8'));
row1.write(2, ('รายละเอืยด').decode('UTF8') );
row1.write(3, ('งบประมาณรวม').decode('UTF8') );
row1.write(4, ('งบประมาณ').decode('UTF8') );
row1.write(5, ('เงินบำรุง').decode('UTF8') );
row1.write(6, ('งบประมาณอื่น').decode('UTF8') );
row1.write(7, ('งบประมาณอื่นจาก').decode('UTF8') );
row1.write(8, ('ผู้รับผิดชอบ').decode('UTF8') );
row1.write(9, ('กลุ่ม').decode('UTF8') );
row1.write(10, ('หน่วย/งาน').decode('UTF8') );
i=i+1;
style = XFStyle();
style.num_format_str = '#,##0.00';
for value in objectProject:
row1 = sheet1.row(i) ;
row1.write(0, value.get('project_type').decode('UTF8') );
row1.write(1, value.get('project_name').decode('UTF8') );
row1.write(2, value.get('detail').decode('UTF8') );
row1.write(3, value.get('allBudget') ,style );
row1.write(4, value.get('project_budget' ) ,style );
row1.write(5, value.get('maintenance_funds_budget'),style );
row1.write(6, value.get('budget_other') ,style );
if(value.get('budget_other_from')):
row1.write(7, value.get('budget_other_from').decode('UTF8') );
if(value.get('user_name')):
row1.write(8, value.get('user_name').decode('UTF8') );
row1.write(9, value.get('division').decode('UTF8') );
row1.write(10, value.get('section').decode('UTF8') );
i=i+1;
dirTempFile = gettempdir() + _os.sep + str('simple.xls');
book.save(dirTempFile);
#book.save(TemporaryFile());
return dirTempFile; | comcenter/comcenter/controllers/util/exportexcel/projecttoexcel.py | from tempfile import TemporaryFile,gettempdir;
import os as _os;
from xlwt import Workbook,XFStyle;
from datetime import datetime,date,time,timedelta;
import calendar;
class ProjectToExcel(object):
def __init__(self):
pass;
def exportToExcel(self,objectProject):
book = Workbook();
sheet1 = book.add_sheet('Sheet 1')
if( objectProject):
i=0;
row1 = sheet1.row(i) ;
row1.write(0, ('ประเภท').decode('UTF8') );
row1.write(1, ('ชื่อโครงการ').decode('UTF8'));
row1.write(2, ('รายละเอืยด').decode('UTF8') );
row1.write(3, ('งบประมาณรวม').decode('UTF8') );
row1.write(4, ('งบประมาณ').decode('UTF8') );
row1.write(5, ('เงินบำรุง').decode('UTF8') );
row1.write(6, ('งบประมาณอื่น').decode('UTF8') );
row1.write(7, ('งบประมาณอื่นจาก').decode('UTF8') );
row1.write(8, ('ผู้รับผิดชอบ').decode('UTF8') );
row1.write(9, ('กลุ่ม').decode('UTF8') );
row1.write(10, ('หน่วย/งาน').decode('UTF8') );
i=i+1;
style = XFStyle();
style.num_format_str = '#,##0.00';
for value in objectProject:
row1 = sheet1.row(i) ;
row1.write(0, value.get('project_type').decode('UTF8') );
row1.write(1, value.get('project_name').decode('UTF8') );
row1.write(2, value.get('detail').decode('UTF8') );
row1.write(3, value.get('allBudget') ,style );
row1.write(4, value.get('project_budget' ) ,style );
row1.write(5, value.get('maintenance_funds_budget'),style );
row1.write(6, value.get('budget_other') ,style );
if(value.get('budget_other_from')):
row1.write(7, value.get('budget_other_from').decode('UTF8') );
if(value.get('user_name')):
row1.write(8, value.get('user_name').decode('UTF8') );
row1.write(9, value.get('division').decode('UTF8') );
row1.write(10, value.get('section').decode('UTF8') );
i=i+1;
dirTempFile = gettempdir() + _os.sep + str('simple.xls');
book.save(dirTempFile);
#book.save(TemporaryFile());
return dirTempFile; | 0.181662 | 0.053108 |
import collections
import numpy as np
RunResults = collections.namedtuple('RunResults', ['total_clicks',
'total_impressions',
'total_ad_spend'])
class BidSimulator:
"""Simulates given bidding strategy on a dataset"""
def __init__(self, data, bidding_strategy):
"""Initialize bidding simulator.
Parameters
----------
data : pandas.DataFrame
Historical data containing features for model predicti n, bidding
price, winning price, impressions and click indicators.
bidding_strategy : func
Function that retuns bid given prospenity to click and data row
"""
self._data = data
self._bidding_strategy = bidding_strategy
def run(self, ctr_model=None):
"""Run bidding simulator
Parameters
----------
ctr_model : sklearn-like model
Binary classifier for click prospenity
Returns
-------
bids : list
Bids for each entry in the data"""
total_impressions = 0
total_ad_spend = 0
total_clicks = 0
for i, row in self._data.iterrows():
if ctr_model is not None:
prospenity = ctr_model.predict_proba(
row.drop(['click',
'paying_price']).values.reshape(1, -1))[0][1]
else:
prospenity = None
bid = self._bidding_strategy(prospenity, row)
if bid >= row['paying_price']:
total_impressions += 1
total_ad_spend += row['paying_price']
if row['click']:
total_clicks += 1
return RunResults(total_clicks, total_impressions, total_ad_spend)
@staticmethod
def metrics_report(run_results):
"""Generate metric let g:pymode_lint = 0u
Parameters
----------
run_results : RunResults
Returns
-------
cpc : float
Cost Per Click.
ctr : float
Click Through Rate.
cpm : float
Cost Per Mille.
"""
ctr = BidSimulator.ctr(run_results.total_clicks,
run_results.total_impressions)
cpm = BidSimulator.cpm(run_results.total_ad_spend,
run_results.total_impressions)
cpc = BidSimulator.cpc(run_results.total_ad_spend,
run_results.total_clicks)
report = "CTR:\t%.2f\nCPM:\t%.3f\nCPC:\t%.3f" % (ctr, cpm, cpc)
return report
@staticmethod
def ctr(num_of_clicks, num_of_impressions):
"""Claculate Click Through Rate - frequency of clicks on ads."""
return num_of_clicks / num_of_impressions \
if num_of_impressions > 0 else 0
@staticmethod
def cpm(total_spendings, num_of_impressions):
"""Calculate Cost Per Mille
- total cost advertiser pays for 1000 impressions."""
return total_spendings / num_of_impressions * 1000 \
if num_of_impressions > 0 else 0
@staticmethod
def cpc(total_spendings, num_of_clicks):
"""Calculate Cost Per Click"""
return total_spendings / num_of_clicks if num_of_clicks > 0 else 0
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
self._bidding_strategy.__class__.__name__)
class RandomBiddingStrategy(object):
"""Random strategy that places random pertribations of a base bid"""
def __init__(self, bid):
"""Create random bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return np.random.rand() * self._bid
class FlatBiddingStrategy():
"""Constant bid"""
def __init__(self, bid):
"""Create flat bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return self._bid
class GoalBiddingStrategy():
"""Bid based on prospenity"""
def __init__(self, bid):
"""Create bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return prospenity * self._bid
class EffectiveCPCBiddingStrategy(GoalBiddingStrategy):
"""Bid based on prospenity and CPC calculated from training data"""
def __init__(self, data):
"""Create bidding strategy
Parameters
----------
data : pd.DataFrame
Historical data
"""
effective_cpc = data['paying_price'].sum() / data['click'].sum()
print(effective_cpc)
super().__init__(effective_cpc) | rtb/bidding.py | import collections
import numpy as np
RunResults = collections.namedtuple('RunResults', ['total_clicks',
'total_impressions',
'total_ad_spend'])
class BidSimulator:
"""Simulates given bidding strategy on a dataset"""
def __init__(self, data, bidding_strategy):
"""Initialize bidding simulator.
Parameters
----------
data : pandas.DataFrame
Historical data containing features for model predicti n, bidding
price, winning price, impressions and click indicators.
bidding_strategy : func
Function that retuns bid given prospenity to click and data row
"""
self._data = data
self._bidding_strategy = bidding_strategy
def run(self, ctr_model=None):
"""Run bidding simulator
Parameters
----------
ctr_model : sklearn-like model
Binary classifier for click prospenity
Returns
-------
bids : list
Bids for each entry in the data"""
total_impressions = 0
total_ad_spend = 0
total_clicks = 0
for i, row in self._data.iterrows():
if ctr_model is not None:
prospenity = ctr_model.predict_proba(
row.drop(['click',
'paying_price']).values.reshape(1, -1))[0][1]
else:
prospenity = None
bid = self._bidding_strategy(prospenity, row)
if bid >= row['paying_price']:
total_impressions += 1
total_ad_spend += row['paying_price']
if row['click']:
total_clicks += 1
return RunResults(total_clicks, total_impressions, total_ad_spend)
@staticmethod
def metrics_report(run_results):
"""Generate metric let g:pymode_lint = 0u
Parameters
----------
run_results : RunResults
Returns
-------
cpc : float
Cost Per Click.
ctr : float
Click Through Rate.
cpm : float
Cost Per Mille.
"""
ctr = BidSimulator.ctr(run_results.total_clicks,
run_results.total_impressions)
cpm = BidSimulator.cpm(run_results.total_ad_spend,
run_results.total_impressions)
cpc = BidSimulator.cpc(run_results.total_ad_spend,
run_results.total_clicks)
report = "CTR:\t%.2f\nCPM:\t%.3f\nCPC:\t%.3f" % (ctr, cpm, cpc)
return report
@staticmethod
def ctr(num_of_clicks, num_of_impressions):
"""Claculate Click Through Rate - frequency of clicks on ads."""
return num_of_clicks / num_of_impressions \
if num_of_impressions > 0 else 0
@staticmethod
def cpm(total_spendings, num_of_impressions):
"""Calculate Cost Per Mille
- total cost advertiser pays for 1000 impressions."""
return total_spendings / num_of_impressions * 1000 \
if num_of_impressions > 0 else 0
@staticmethod
def cpc(total_spendings, num_of_clicks):
"""Calculate Cost Per Click"""
return total_spendings / num_of_clicks if num_of_clicks > 0 else 0
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
self._bidding_strategy.__class__.__name__)
class RandomBiddingStrategy(object):
"""Random strategy that places random pertribations of a base bid"""
def __init__(self, bid):
"""Create random bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return np.random.rand() * self._bid
class FlatBiddingStrategy():
"""Constant bid"""
def __init__(self, bid):
"""Create flat bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return self._bid
class GoalBiddingStrategy():
"""Bid based on prospenity"""
def __init__(self, bid):
"""Create bidding strategy
Parameters
----------
bid : float
Bid value
"""
self._bid = bid
def __call__(self, prospenity, row):
"""Execute bidding strategy
Parameters
----------
prospenity : float
prospenity to click
row : dict-like
data row with features, pricing, impression and click data
Returns
-------
bid_price : float
"""
return prospenity * self._bid
class EffectiveCPCBiddingStrategy(GoalBiddingStrategy):
"""Bid based on prospenity and CPC calculated from training data"""
def __init__(self, data):
"""Create bidding strategy
Parameters
----------
data : pd.DataFrame
Historical data
"""
effective_cpc = data['paying_price'].sum() / data['click'].sum()
print(effective_cpc)
super().__init__(effective_cpc) | 0.892375 | 0.545225 |
from typing import List
from flotypes import *
from astree import *
from lexer import TokType, Token
from errors import SyntaxError
from errors import Range
def str_to_flotype(str):
if str == "int":
return FloInt(None)
if str == "float":
return FloFloat(None)
elif str == "void":
return FloVoid(None)
class Parser:
def __init__(self, tokens: List[Token]):
self.tokens = tokens
self.current_tok = None
self.current_i = -1
self.advance()
def advance(self):
self.current_i += 1
if self.current_i < len(self.tokens):
self.current_tok = self.tokens[self.current_i]
else:
self.current_tok = self.tokens[-1]
def parse(self):
res = self.stmts()
if self.current_tok.type != TokType.EOF:
SyntaxError(
self.current_tok.range,
f"Unexpected '{self.current_tok.type.value}', Expected '+', '-', '*' '/', '^' or an identifier",
).throw()
return res
def skip_new_lines(self) -> None:
while self.current_tok.type == TokType.LN:
self.advance()
def stmts(self):
stmts = []
range_start = self.current_tok.range
self.skip_new_lines()
while (
self.current_tok.type != TokType.RBRACE
and self.current_tok.type != TokType.EOF
):
stmt = self.stmt()
stmts.append(stmt)
self.skip_new_lines()
return StmtsNode(stmts, Range.merge(range_start, self.current_tok.range))
def block(self):
self.skip_new_lines()
if self.current_tok.type != TokType.LBRACE:
return self.expression()
self.advance()
if self.current_tok.type == TokType.RBRACE:
self.advance()
return []
stmts = self.expressions()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
self.advance()
return stmts
def stmt(self):
self.skip_new_lines()
tok = self.current_tok
if tok.isKeyword("import"):
return self.import_stmt()
if tok.isKeyword("const"):
return self.const_declaration()
if tok.isKeyword("type"):
return self.type_alias()
if tok.isKeyword("class"):
return self.class_declaration()
if tok.isKeyword("enum"):
return self.enum_declaration()
elif tok.isKeyword(("fnc")):
return self.fnc_def_stmt()
else:
SyntaxError(tok.range, f"Unexpected '{tok.value}'").throw()
def expressions(self):
expressions = []
range_start = self.current_tok.range
self.skip_new_lines()
while (
self.current_tok.type != TokType.RBRACE
and self.current_tok.type != TokType.EOF
):
stmt = self.expression()
expressions.append(stmt)
self.skip_new_lines()
return StmtsNode(expressions, Range.merge(range_start, self.current_tok.range))
def expression(self):
tok = self.current_tok
if tok.isKeyword("if"):
return self.if_stmt()
elif tok.isKeyword("for"):
return self.for_stmt()
elif tok.isKeyword("while"):
return self.while_stmt()
elif tok.inKeywordList(("return", "continue", "break")):
return self.change_flow_stmt()
return self.expr()
def import_stmt(self):
range_start = self.current_tok.range
self.advance()
ids = []
path = ""
if self.current_tok.type == TokType.IDENTIFER:
ids = self.identifier_list()
if not self.current_tok.isKeyword("from"):
SyntaxError(self.current_tok.range,
"Expected keyword 'from'").throw()
self.advance()
if self.current_tok.type != TokType.STR:
SyntaxError(self.current_tok.range, "Expected a string").throw()
path = self.current_tok
self.advance()
return ImportNode(
ids, path, Range.merge(
range_start, path.range)
)
def if_stmt(self) -> IfNode:
range_start = self.current_tok.range
self.advance()
cases = []
else_case = None
cond = self.expr()
stmts = self.block()
self.skip_new_lines()
cases.append((cond, stmts))
if self.current_tok.isKeyword("else"):
self.advance()
if self.current_tok.isKeyword("if"):
resCases = self.if_stmt()
cases += resCases.cases
else_case = resCases.else_case
else:
stmts = self.block()
else_case = stmts
range_end = (else_case or cases[len(cases) - 1][0]).range
return IfNode(cases, else_case, Range.merge(range_start, range_end))
def const_declaration(self) -> ConstDeclarationNode:
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(range_start, "Expected and identifier").throw()
name_tok = self.current_tok
self.advance()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected '='").throw()
self.advance()
value_node = self.expr()
node_range = Range.merge(range_start, self.current_tok.range)
return ConstDeclarationNode(name_tok, value_node, node_range)
def type_alias(self):
range_start = self.current_tok.range
self.advance()
identifier = self.current_tok
self.advance()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected =").throw()
self.advance()
type = self.composite_type()
node_range = Range.merge(range_start, type.range)
return TypeAliasNode(identifier, type, node_range)
def generic_constraint(self):
tok = self.current_tok
if tok.type != TokType.IDENTIFER:
SyntaxError(tok.range, "Expected an identifer").throw()
self.advance()
return tok
def generic_constraints(self):
constraints = [self.generic_constraint()]
while self.current_tok.type == TokType.COMMA:
self.advance()
constraints.append(self.generic_constraint())
return constraints
def class_declaration(self) -> Union[ClassDeclarationNode, GenericClassNode]:
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(range_start, "Expected and identifier").throw()
name = self.current_tok
self.advance()
constraints = None
if self.current_tok.type == TokType.LT:
self.advance()
constraints = self.generic_constraints()
if self.current_tok.type != TokType.GT:
SyntaxError(self.current_tok.range, "Expected a '>'").throw()
self.advance()
parent = None
if self.current_tok.isKeyword("extends"):
self.advance()
parent = self.prim_type()
class_body = self.class_block()
node_range = Range.merge(range_start, self.current_tok.range)
node = ClassDeclarationNode(name, parent, class_body, node_range)
if constraints != None:
node = GenericClassNode(constraints, node, node_range)
return node
def class_block(self):
if self.current_tok.type != TokType.LBRACE:
SyntaxError(self.current_tok.range, "Expected '{'").throw()
self.advance()
statements = []
range_start = self.current_tok.range
while self.current_tok.type != TokType.RBRACE:
self.skip_new_lines()
statements.append(self.class_stmt())
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
node_range = Range.merge(range_start, self.current_tok.range)
self.advance()
return StmtsNode(statements, node_range)
def class_stmt(self):
access_modifier = None
if self.current_tok.inKeywordList(["public", "private", "static"]):
access_modifier = self.current_tok
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected an Identifer").throw()
name = self.current_tok
self.advance()
if self.current_tok.type == TokType.COL:
self.advance()
property_type = self.composite_type()
node_range = Range.merge(name.range, property_type.range)
return PropertyDeclarationNode(access_modifier, name, property_type, node_range)
elif self.current_tok.type == TokType.LPAR:
method_body = self.function_body()
node_range = Range.merge(name.range, method_body.range)
return MethodDeclarationNode(access_modifier, name, method_body, node_range)
else:
SyntaxError(self.current_tok.range, "Expected a property declaration or a method declaration").throw()
def enum_declaration(self) -> EnumDeclarationNode:
self.advance()
range_start = self.current_tok.range
token_list = []
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected an Identifier").throw()
name = self.current_tok
self.advance()
if self.current_tok.type != TokType.LBRACE:
SyntaxError(self.current_tok.range, "Expected '{'").throw()
self.advance()
self.skip_new_lines()
while self.current_tok.type == TokType.IDENTIFER:
token_list.append(self.current_tok)
self.advance()
self.skip_new_lines()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
self.advance()
node_range = Range.merge(range_start, self.current_tok.range)
return EnumDeclarationNode(name, token_list, node_range)
def for_stmt(self) -> ForNode:
self.advance()
init = None
range_start = self.current_tok.range
init = self.expr()
if self.current_tok.isKeyword("in"):
self.advance()
it = self.expr()
stmts = self.block()
return ForEachNode(
init, it, stmts, Range.merge(
range_start, self.current_tok.range)
)
if self.current_tok.type != TokType.SEMICOL:
SyntaxError(self.current_tok.range, "Expected ';'").throw()
self.advance()
cond = self.expr()
if self.current_tok.type != TokType.SEMICOL:
SyntaxError(self.current_tok.range, "Expected ';'").throw()
self.advance()
incr_decr = self.expr()
stmts = self.block()
return ForNode(
init, cond, incr_decr, stmts, Range.merge(range_start, stmts.range)
)
def while_stmt(self):
self.advance()
cond = self.expr()
stmts = self.block()
return WhileNode(cond, stmts, Range.merge(cond.range, stmts.range))
def fnc_def_stmt(self):
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected Identifier").throw()
var_name = self.current_tok
self.advance()
if self.current_tok.type != TokType.LPAR:
SyntaxError(self.current_tok.range, "Expected '('").throw()
function_body = self.function_body()
return FncDefNode(var_name, function_body, Range.merge(range_start, function_body.range))
def function_body(self):
self.advance()
range_start = self.current_tok.range
args, is_var_arg = self.arg_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
self.advance()
return_type = None
if self.current_tok.type == TokType.COL:
self.advance()
return_type = self.composite_type()
body = None
if self.current_tok.type == TokType.LBRACE:
body = self.block()
return FncNode(
args,
body,
is_var_arg,
Range.merge(range_start, self.current_tok.range),
return_type,
)
def identifier_list(self):
args = []
if self.current_tok.type == TokType.IDENTIFER:
id = self.current_tok
self.advance()
args.append(id)
while self.current_tok.type == TokType.COMMA:
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(
self.current_tok.range, "Expected an Identifier"
).throw()
args.append(self.current_tok)
self.advance()
return args
def arg_item(self):
id = self.current_tok
default_val = None
self.advance()
if self.current_tok.type == TokType.EQ:
self.advance()
default_val = self.expr()
return (id, None, default_val)
if self.current_tok.type != TokType.COL:
SyntaxError(
id.range, "Expected ':' or '=' after identifier").throw()
self.advance()
type_id = self.composite_type()
if self.current_tok.type == TokType.EQ:
self.advance()
default_val = self.expr()
return (id, type_id, default_val)
def arg_list(self):
args = []
is_var_arg = False
if self.current_tok.type == TokType.DOT_DOT_DOT:
is_var_arg = True
self.advance()
if self.current_tok.type == TokType.IDENTIFER:
args.append(self.arg_item())
while self.current_tok.type == TokType.COMMA and not is_var_arg:
self.advance()
if self.current_tok.type == TokType.DOT_DOT_DOT:
is_var_arg = True
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(
self.current_tok.range, "Expected an Identifier"
).throw()
args.append(self.arg_item())
return args, is_var_arg
def change_flow_stmt(self):
range_start = self.current_tok.range
if self.current_tok.isKeyword("return"):
self.advance()
expr = None
if self.current_tok.type not in (TokType.LN, TokType.EOF, TokType.RBRACE):
expr = self.expr()
range = (
range_start if expr is None else Range.merge(
range_start, expr.range)
)
return ReturnNode(expr, range)
elif self.current_tok.isKeyword("continue"):
self.advance()
return ContinueNode(range_start)
elif self.current_tok.isKeyword("break"):
self.advance()
return BreakNode(range_start)
def expr(self):
return self.num_op(
self.bit_expr,
((TokType.KEYWORD, "as"), (TokType.KEYWORD, "is")),
self.composite_type,
)
def bit_expr(self):
return self.num_op(
self.comp_expr,
(
(TokType.KEYWORD, "and"),
(TokType.KEYWORD, "or"),
(TokType.KEYWORD, "xor"),
(TokType.KEYWORD, "in"),
TokType.SL,
TokType.SR,
),
)
def comp_expr(self):
if self.current_tok.type == TokType.NOT:
tok = self.current_tok
self.advance()
expr = self.comp_expr()
return UnaryNode(tok, expr, Range.merge(tok.range, expr.range))
return self.num_op(
self.arith_expr,
(
TokType.NEQ,
TokType.EEQ,
TokType.LT,
TokType.LEQ,
TokType.GT,
TokType.GTE,
),
)
def arith_expr(self):
return self.num_op(self.range_expr, (TokType.PLUS, TokType.MINUS))
def range_expr(self):
node = None
if self.current_tok.type != TokType.DOT_DOT:
node = self.arith_expr1()
start_range = node.range
else:
start_range = self.current_tok.range
if self.current_tok.type == TokType.DOT_DOT:
self.advance()
end = self.arith_expr1()
node = RangeNode(node, end, Range.merge(start_range, end.range))
return node
def arith_expr1(self):
return self.num_op(
self.unary_expr, (TokType.MULT, TokType.DIV,
TokType.MOD, TokType.POW)
)
def unary_expr(self):
tok = self.current_tok
if tok.type in (TokType.PLUS, TokType.MINUS, TokType.AMP):
self.advance()
f = self.unary_expr()
return UnaryNode(tok, f, Range.merge(tok.range, f.range))
elif tok.type in (TokType.PLUS_PLUS, TokType.MINUS_MINUS):
self.advance()
f = self.unary_expr()
return IncrDecrNode(
tok, f, True, Range.merge(tok.range, self.current_tok.range)
)
elif tok.isKeyword("new"):
return self.new_memexpr()
return self.unary_expr1()
def new_memexpr(self):
tok = self.current_tok
self.advance()
type = self.composite_type()
args = None
end_range = self.current_tok.range
if self.current_tok.type == TokType.LPAR:
self.advance()
if self.current_tok.type == TokType.RPAR:
node_range = Range.merge(tok.range, self.current_tok.range)
self.advance()
return NewMemNode(type, [], node_range)
args = self.expr_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected )").throw()
end_range = self.current_tok.range
self.advance()
node_range = Range.merge(tok.range, end_range)
return NewMemNode(type, args, node_range)
def unary_expr1(self):
node = self.expr_value_op()
if self.current_tok.type in (TokType.PLUS_PLUS, TokType.MINUS_MINUS) and (isinstance(node, VarAccessNode)
or isinstance(node, ArrayAccessNode) or isinstance(node, PropertyAccessNode)):
tok = self.current_tok
self.advance()
return IncrDecrNode(
tok, node, False, Range.merge(
tok.range, self.current_tok.range)
)
return node
def expr_list(self):
args = []
expr = self.expr()
args.append(expr)
while self.current_tok.type == TokType.COMMA:
self.advance()
expr = self.expr()
args.append(expr)
return args
def assign_part(self, node: Node):
self.advance()
value = self.expr()
node_range = Range.merge(node.range, value.range)
if isinstance(node, VarAccessNode):
return VarAssignNode(node.var_name, value, None, node_range)
if isinstance(node, ArrayAccessNode):
return ArrayAssignNode(node, value, node_range)
elif isinstance(node, PropertyAccessNode):
return PropertyAssignNode(node, value, node_range)
else:
SyntaxError(
node.range, "Unexpected expression expected identifier or array").throw()
def expr_value_op(self):
range_start = self.current_tok.range
node = self.expr_value()
while (
self.current_tok.type == TokType.LBRACKET
or self.current_tok.type == TokType.LPAR
or self.current_tok.type == TokType.DOT
):
if self.current_tok.type == TokType.DOT:
node = self.property_access(node)
elif self.current_tok.type == TokType.LBRACKET:
self.advance()
expr = self.expr()
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range, "Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
node = ArrayAccessNode(
node, expr, Range.merge(node.range, end_range))
elif self.current_tok.type == TokType.LPAR:
self.advance()
args = []
if self.current_tok.type != TokType.RPAR:
args = self.expr_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
end_range = self.current_tok.range
self.advance()
node = FncCallNode(
node, args, Range.merge(node.range, end_range))
if self.current_tok.type == TokType.EQ:
return self.assign_part(node)
node.range = Range.merge(range_start, node.range)
return node
def expr_value(self):
tok = self.current_tok
if tok.type == TokType.INT:
self.advance()
return IntNode(tok, tok.range)
if tok.type == TokType.FLOAT:
self.advance()
return FloatNode(tok, tok.range)
if tok.type == TokType.CHAR:
self.advance()
return CharNode(tok, tok.range)
elif tok.type == TokType.STR:
self.advance()
return StrNode(tok, tok.range)
elif tok.type == TokType.IDENTIFER:
self.advance()
node_type = None
if self.current_tok.type != TokType.EQ and self.current_tok.type != TokType.COL:
return VarAccessNode(tok, tok.range)
elif self.current_tok.type == TokType.COL:
self.advance()
node_type = self.composite_type()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected '='").throw()
self.advance()
value = self.expr()
node_range = Range.merge(tok.range, value.range)
return VarAssignNode(tok, value, node_type, node_range)
elif tok.type == TokType.LPAR:
self.advance()
exp = self.expr()
if self.current_tok.type == TokType.RPAR:
self.advance()
return exp
SyntaxError(self.current_tok.range, "Expected ')'").throw()
elif tok.type == TokType.LBRACKET:
self.advance()
list = []
if self.current_tok.type != TokType.RBRACKET:
list = self.expr_list()
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range, "Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
return ArrayNode(list, Range.merge(tok.range, end_range))
SyntaxError(
tok.range, f"Expected an expression value before '{tok}'").throw()
def property_access(self, expr):
self.advance()
ident = self.current_tok
node_range = ident.range
expr = PropertyAccessNode(expr, ident, node_range)
if ident.type != TokType.IDENTIFER:
SyntaxError(node_range, "Expected an Identifier").throw()
self.advance()
return expr
def prim_type(self):
tok = self.current_tok
self.advance()
if tok.type == TokType.LBRACE:
if self.current_tok.type != TokType.INT:
SyntaxError(self.current_tok.range, "Expected an int constant").throw()
size = self.current_tok.value
self.advance()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected a }").throw()
self.advance()
if not self.current_tok.inKeywordList(("int", "float")):
SyntaxError(self.current_tok.range,
"Expected an 'int' or 'float'").throw()
type = str_to_flotype(self.current_tok.value)
type.bits = size
end_range = self.current_tok.range
self.advance()
return TypeNode(type, Range.merge(tok.range, end_range))
elif tok.inKeywordList(("int", "float", "void")):
type = str_to_flotype(tok.value)
return TypeNode(type, tok.range)
elif tok.type == TokType.IDENTIFER:
type = FloObject(tok)
if self.current_tok.type == TokType.LT:
self.advance()
arg_list = self.type_list()
type = FloGeneric(tok, arg_list)
if self.current_tok.type != TokType.GT and self.current_tok.type != TokType.SR:
SyntaxError(self.current_tok.range, "Expected '>'").throw()
if self.current_tok.type == TokType.GT:
self.advance()
else:
self.current_tok.type = TokType.GT
return TypeNode(type, tok.range)
def type_list(self):
types = [self.composite_type()]
while(self.current_tok.type == TokType.COMMA):
self.advance()
types.append(self.composite_type())
return types
def fnc_type(self):
range_start = self.current_tok.range
self.advance()
arg_types = []
if self.current_tok.type != TokType.RPAR:
arg_types = self.type_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
self.advance()
if self.current_tok.type != TokType.ARROW:
SyntaxError(self.current_tok.range, "Expected '=>'").throw()
self.advance()
type = FloInlineFunc(None, arg_types, self.composite_type())
return TypeNode(type, Range.merge(range_start, type.return_type.range))
def composite_type(self):
tok = self.current_tok
type = None
if tok.inKeywordList(("int", "float", "void")) or tok.type == TokType.IDENTIFER or tok.type == TokType.LBRACE:
type = self.prim_type()
elif tok.type == TokType.LPAR:
return self.fnc_type()
while self.current_tok.type == TokType.MULT or self.current_tok.type == TokType.LBRACKET:
if self.current_tok.type == TokType.MULT:
end_range = self.current_tok.range
self.advance()
type = TypeNode(FloPointer(type), Range.merge(type.range, end_range))
else:
self.advance()
size = self.expr()
if self.current_tok.type != TokType.RBRACKET:
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range,
"Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
arr_ty = FloArray(None, size)
arr_ty.elm_type = type
type = TypeNode(arr_ty, Range.merge(type.range, end_range))
if type:
return type
else:
SyntaxError(tok.range, "Expected type definition").throw()
def num_op(self, func_a, toks, func_b=None):
if func_b == None:
func_b = func_a
left_node = func_a()
while (
self.current_tok.type in toks
or (self.current_tok.type, self.current_tok.value) in toks
):
op_tok = self.current_tok
self.advance()
if self.current_tok.type == TokType.EQ:
assign_node = self.assign_part(left_node)
node_range = Range.merge(left_node.range, assign_node.range)
num_op_node = NumOpNode(
left_node, op_tok, assign_node.value, assign_node.value.range)
assign_node.value = num_op_node
assign_node.range = node_range
return assign_node
else:
right_node = func_b()
left_node = NumOpNode(
left_node,
op_tok,
right_node,
Range.merge(left_node.range, right_node.range),
)
return left_node | src/parser.py | from typing import List
from flotypes import *
from astree import *
from lexer import TokType, Token
from errors import SyntaxError
from errors import Range
def str_to_flotype(str):
if str == "int":
return FloInt(None)
if str == "float":
return FloFloat(None)
elif str == "void":
return FloVoid(None)
class Parser:
def __init__(self, tokens: List[Token]):
self.tokens = tokens
self.current_tok = None
self.current_i = -1
self.advance()
def advance(self):
self.current_i += 1
if self.current_i < len(self.tokens):
self.current_tok = self.tokens[self.current_i]
else:
self.current_tok = self.tokens[-1]
def parse(self):
res = self.stmts()
if self.current_tok.type != TokType.EOF:
SyntaxError(
self.current_tok.range,
f"Unexpected '{self.current_tok.type.value}', Expected '+', '-', '*' '/', '^' or an identifier",
).throw()
return res
def skip_new_lines(self) -> None:
while self.current_tok.type == TokType.LN:
self.advance()
def stmts(self):
stmts = []
range_start = self.current_tok.range
self.skip_new_lines()
while (
self.current_tok.type != TokType.RBRACE
and self.current_tok.type != TokType.EOF
):
stmt = self.stmt()
stmts.append(stmt)
self.skip_new_lines()
return StmtsNode(stmts, Range.merge(range_start, self.current_tok.range))
def block(self):
self.skip_new_lines()
if self.current_tok.type != TokType.LBRACE:
return self.expression()
self.advance()
if self.current_tok.type == TokType.RBRACE:
self.advance()
return []
stmts = self.expressions()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
self.advance()
return stmts
def stmt(self):
self.skip_new_lines()
tok = self.current_tok
if tok.isKeyword("import"):
return self.import_stmt()
if tok.isKeyword("const"):
return self.const_declaration()
if tok.isKeyword("type"):
return self.type_alias()
if tok.isKeyword("class"):
return self.class_declaration()
if tok.isKeyword("enum"):
return self.enum_declaration()
elif tok.isKeyword(("fnc")):
return self.fnc_def_stmt()
else:
SyntaxError(tok.range, f"Unexpected '{tok.value}'").throw()
def expressions(self):
expressions = []
range_start = self.current_tok.range
self.skip_new_lines()
while (
self.current_tok.type != TokType.RBRACE
and self.current_tok.type != TokType.EOF
):
stmt = self.expression()
expressions.append(stmt)
self.skip_new_lines()
return StmtsNode(expressions, Range.merge(range_start, self.current_tok.range))
def expression(self):
tok = self.current_tok
if tok.isKeyword("if"):
return self.if_stmt()
elif tok.isKeyword("for"):
return self.for_stmt()
elif tok.isKeyword("while"):
return self.while_stmt()
elif tok.inKeywordList(("return", "continue", "break")):
return self.change_flow_stmt()
return self.expr()
def import_stmt(self):
range_start = self.current_tok.range
self.advance()
ids = []
path = ""
if self.current_tok.type == TokType.IDENTIFER:
ids = self.identifier_list()
if not self.current_tok.isKeyword("from"):
SyntaxError(self.current_tok.range,
"Expected keyword 'from'").throw()
self.advance()
if self.current_tok.type != TokType.STR:
SyntaxError(self.current_tok.range, "Expected a string").throw()
path = self.current_tok
self.advance()
return ImportNode(
ids, path, Range.merge(
range_start, path.range)
)
def if_stmt(self) -> IfNode:
range_start = self.current_tok.range
self.advance()
cases = []
else_case = None
cond = self.expr()
stmts = self.block()
self.skip_new_lines()
cases.append((cond, stmts))
if self.current_tok.isKeyword("else"):
self.advance()
if self.current_tok.isKeyword("if"):
resCases = self.if_stmt()
cases += resCases.cases
else_case = resCases.else_case
else:
stmts = self.block()
else_case = stmts
range_end = (else_case or cases[len(cases) - 1][0]).range
return IfNode(cases, else_case, Range.merge(range_start, range_end))
def const_declaration(self) -> ConstDeclarationNode:
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(range_start, "Expected and identifier").throw()
name_tok = self.current_tok
self.advance()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected '='").throw()
self.advance()
value_node = self.expr()
node_range = Range.merge(range_start, self.current_tok.range)
return ConstDeclarationNode(name_tok, value_node, node_range)
def type_alias(self):
range_start = self.current_tok.range
self.advance()
identifier = self.current_tok
self.advance()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected =").throw()
self.advance()
type = self.composite_type()
node_range = Range.merge(range_start, type.range)
return TypeAliasNode(identifier, type, node_range)
def generic_constraint(self):
tok = self.current_tok
if tok.type != TokType.IDENTIFER:
SyntaxError(tok.range, "Expected an identifer").throw()
self.advance()
return tok
def generic_constraints(self):
constraints = [self.generic_constraint()]
while self.current_tok.type == TokType.COMMA:
self.advance()
constraints.append(self.generic_constraint())
return constraints
def class_declaration(self) -> Union[ClassDeclarationNode, GenericClassNode]:
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(range_start, "Expected and identifier").throw()
name = self.current_tok
self.advance()
constraints = None
if self.current_tok.type == TokType.LT:
self.advance()
constraints = self.generic_constraints()
if self.current_tok.type != TokType.GT:
SyntaxError(self.current_tok.range, "Expected a '>'").throw()
self.advance()
parent = None
if self.current_tok.isKeyword("extends"):
self.advance()
parent = self.prim_type()
class_body = self.class_block()
node_range = Range.merge(range_start, self.current_tok.range)
node = ClassDeclarationNode(name, parent, class_body, node_range)
if constraints != None:
node = GenericClassNode(constraints, node, node_range)
return node
def class_block(self):
if self.current_tok.type != TokType.LBRACE:
SyntaxError(self.current_tok.range, "Expected '{'").throw()
self.advance()
statements = []
range_start = self.current_tok.range
while self.current_tok.type != TokType.RBRACE:
self.skip_new_lines()
statements.append(self.class_stmt())
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
node_range = Range.merge(range_start, self.current_tok.range)
self.advance()
return StmtsNode(statements, node_range)
def class_stmt(self):
access_modifier = None
if self.current_tok.inKeywordList(["public", "private", "static"]):
access_modifier = self.current_tok
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected an Identifer").throw()
name = self.current_tok
self.advance()
if self.current_tok.type == TokType.COL:
self.advance()
property_type = self.composite_type()
node_range = Range.merge(name.range, property_type.range)
return PropertyDeclarationNode(access_modifier, name, property_type, node_range)
elif self.current_tok.type == TokType.LPAR:
method_body = self.function_body()
node_range = Range.merge(name.range, method_body.range)
return MethodDeclarationNode(access_modifier, name, method_body, node_range)
else:
SyntaxError(self.current_tok.range, "Expected a property declaration or a method declaration").throw()
def enum_declaration(self) -> EnumDeclarationNode:
self.advance()
range_start = self.current_tok.range
token_list = []
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected an Identifier").throw()
name = self.current_tok
self.advance()
if self.current_tok.type != TokType.LBRACE:
SyntaxError(self.current_tok.range, "Expected '{'").throw()
self.advance()
self.skip_new_lines()
while self.current_tok.type == TokType.IDENTIFER:
token_list.append(self.current_tok)
self.advance()
self.skip_new_lines()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected '}'").throw()
self.advance()
node_range = Range.merge(range_start, self.current_tok.range)
return EnumDeclarationNode(name, token_list, node_range)
def for_stmt(self) -> ForNode:
self.advance()
init = None
range_start = self.current_tok.range
init = self.expr()
if self.current_tok.isKeyword("in"):
self.advance()
it = self.expr()
stmts = self.block()
return ForEachNode(
init, it, stmts, Range.merge(
range_start, self.current_tok.range)
)
if self.current_tok.type != TokType.SEMICOL:
SyntaxError(self.current_tok.range, "Expected ';'").throw()
self.advance()
cond = self.expr()
if self.current_tok.type != TokType.SEMICOL:
SyntaxError(self.current_tok.range, "Expected ';'").throw()
self.advance()
incr_decr = self.expr()
stmts = self.block()
return ForNode(
init, cond, incr_decr, stmts, Range.merge(range_start, stmts.range)
)
def while_stmt(self):
self.advance()
cond = self.expr()
stmts = self.block()
return WhileNode(cond, stmts, Range.merge(cond.range, stmts.range))
def fnc_def_stmt(self):
self.advance()
range_start = self.current_tok.range
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(self.current_tok.range, "Expected Identifier").throw()
var_name = self.current_tok
self.advance()
if self.current_tok.type != TokType.LPAR:
SyntaxError(self.current_tok.range, "Expected '('").throw()
function_body = self.function_body()
return FncDefNode(var_name, function_body, Range.merge(range_start, function_body.range))
def function_body(self):
self.advance()
range_start = self.current_tok.range
args, is_var_arg = self.arg_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
self.advance()
return_type = None
if self.current_tok.type == TokType.COL:
self.advance()
return_type = self.composite_type()
body = None
if self.current_tok.type == TokType.LBRACE:
body = self.block()
return FncNode(
args,
body,
is_var_arg,
Range.merge(range_start, self.current_tok.range),
return_type,
)
def identifier_list(self):
args = []
if self.current_tok.type == TokType.IDENTIFER:
id = self.current_tok
self.advance()
args.append(id)
while self.current_tok.type == TokType.COMMA:
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(
self.current_tok.range, "Expected an Identifier"
).throw()
args.append(self.current_tok)
self.advance()
return args
def arg_item(self):
id = self.current_tok
default_val = None
self.advance()
if self.current_tok.type == TokType.EQ:
self.advance()
default_val = self.expr()
return (id, None, default_val)
if self.current_tok.type != TokType.COL:
SyntaxError(
id.range, "Expected ':' or '=' after identifier").throw()
self.advance()
type_id = self.composite_type()
if self.current_tok.type == TokType.EQ:
self.advance()
default_val = self.expr()
return (id, type_id, default_val)
def arg_list(self):
args = []
is_var_arg = False
if self.current_tok.type == TokType.DOT_DOT_DOT:
is_var_arg = True
self.advance()
if self.current_tok.type == TokType.IDENTIFER:
args.append(self.arg_item())
while self.current_tok.type == TokType.COMMA and not is_var_arg:
self.advance()
if self.current_tok.type == TokType.DOT_DOT_DOT:
is_var_arg = True
self.advance()
if self.current_tok.type != TokType.IDENTIFER:
SyntaxError(
self.current_tok.range, "Expected an Identifier"
).throw()
args.append(self.arg_item())
return args, is_var_arg
def change_flow_stmt(self):
range_start = self.current_tok.range
if self.current_tok.isKeyword("return"):
self.advance()
expr = None
if self.current_tok.type not in (TokType.LN, TokType.EOF, TokType.RBRACE):
expr = self.expr()
range = (
range_start if expr is None else Range.merge(
range_start, expr.range)
)
return ReturnNode(expr, range)
elif self.current_tok.isKeyword("continue"):
self.advance()
return ContinueNode(range_start)
elif self.current_tok.isKeyword("break"):
self.advance()
return BreakNode(range_start)
def expr(self):
return self.num_op(
self.bit_expr,
((TokType.KEYWORD, "as"), (TokType.KEYWORD, "is")),
self.composite_type,
)
def bit_expr(self):
return self.num_op(
self.comp_expr,
(
(TokType.KEYWORD, "and"),
(TokType.KEYWORD, "or"),
(TokType.KEYWORD, "xor"),
(TokType.KEYWORD, "in"),
TokType.SL,
TokType.SR,
),
)
def comp_expr(self):
if self.current_tok.type == TokType.NOT:
tok = self.current_tok
self.advance()
expr = self.comp_expr()
return UnaryNode(tok, expr, Range.merge(tok.range, expr.range))
return self.num_op(
self.arith_expr,
(
TokType.NEQ,
TokType.EEQ,
TokType.LT,
TokType.LEQ,
TokType.GT,
TokType.GTE,
),
)
def arith_expr(self):
return self.num_op(self.range_expr, (TokType.PLUS, TokType.MINUS))
def range_expr(self):
node = None
if self.current_tok.type != TokType.DOT_DOT:
node = self.arith_expr1()
start_range = node.range
else:
start_range = self.current_tok.range
if self.current_tok.type == TokType.DOT_DOT:
self.advance()
end = self.arith_expr1()
node = RangeNode(node, end, Range.merge(start_range, end.range))
return node
def arith_expr1(self):
return self.num_op(
self.unary_expr, (TokType.MULT, TokType.DIV,
TokType.MOD, TokType.POW)
)
def unary_expr(self):
tok = self.current_tok
if tok.type in (TokType.PLUS, TokType.MINUS, TokType.AMP):
self.advance()
f = self.unary_expr()
return UnaryNode(tok, f, Range.merge(tok.range, f.range))
elif tok.type in (TokType.PLUS_PLUS, TokType.MINUS_MINUS):
self.advance()
f = self.unary_expr()
return IncrDecrNode(
tok, f, True, Range.merge(tok.range, self.current_tok.range)
)
elif tok.isKeyword("new"):
return self.new_memexpr()
return self.unary_expr1()
def new_memexpr(self):
tok = self.current_tok
self.advance()
type = self.composite_type()
args = None
end_range = self.current_tok.range
if self.current_tok.type == TokType.LPAR:
self.advance()
if self.current_tok.type == TokType.RPAR:
node_range = Range.merge(tok.range, self.current_tok.range)
self.advance()
return NewMemNode(type, [], node_range)
args = self.expr_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected )").throw()
end_range = self.current_tok.range
self.advance()
node_range = Range.merge(tok.range, end_range)
return NewMemNode(type, args, node_range)
def unary_expr1(self):
node = self.expr_value_op()
if self.current_tok.type in (TokType.PLUS_PLUS, TokType.MINUS_MINUS) and (isinstance(node, VarAccessNode)
or isinstance(node, ArrayAccessNode) or isinstance(node, PropertyAccessNode)):
tok = self.current_tok
self.advance()
return IncrDecrNode(
tok, node, False, Range.merge(
tok.range, self.current_tok.range)
)
return node
def expr_list(self):
args = []
expr = self.expr()
args.append(expr)
while self.current_tok.type == TokType.COMMA:
self.advance()
expr = self.expr()
args.append(expr)
return args
def assign_part(self, node: Node):
self.advance()
value = self.expr()
node_range = Range.merge(node.range, value.range)
if isinstance(node, VarAccessNode):
return VarAssignNode(node.var_name, value, None, node_range)
if isinstance(node, ArrayAccessNode):
return ArrayAssignNode(node, value, node_range)
elif isinstance(node, PropertyAccessNode):
return PropertyAssignNode(node, value, node_range)
else:
SyntaxError(
node.range, "Unexpected expression expected identifier or array").throw()
def expr_value_op(self):
range_start = self.current_tok.range
node = self.expr_value()
while (
self.current_tok.type == TokType.LBRACKET
or self.current_tok.type == TokType.LPAR
or self.current_tok.type == TokType.DOT
):
if self.current_tok.type == TokType.DOT:
node = self.property_access(node)
elif self.current_tok.type == TokType.LBRACKET:
self.advance()
expr = self.expr()
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range, "Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
node = ArrayAccessNode(
node, expr, Range.merge(node.range, end_range))
elif self.current_tok.type == TokType.LPAR:
self.advance()
args = []
if self.current_tok.type != TokType.RPAR:
args = self.expr_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
end_range = self.current_tok.range
self.advance()
node = FncCallNode(
node, args, Range.merge(node.range, end_range))
if self.current_tok.type == TokType.EQ:
return self.assign_part(node)
node.range = Range.merge(range_start, node.range)
return node
def expr_value(self):
tok = self.current_tok
if tok.type == TokType.INT:
self.advance()
return IntNode(tok, tok.range)
if tok.type == TokType.FLOAT:
self.advance()
return FloatNode(tok, tok.range)
if tok.type == TokType.CHAR:
self.advance()
return CharNode(tok, tok.range)
elif tok.type == TokType.STR:
self.advance()
return StrNode(tok, tok.range)
elif tok.type == TokType.IDENTIFER:
self.advance()
node_type = None
if self.current_tok.type != TokType.EQ and self.current_tok.type != TokType.COL:
return VarAccessNode(tok, tok.range)
elif self.current_tok.type == TokType.COL:
self.advance()
node_type = self.composite_type()
if self.current_tok.type != TokType.EQ:
SyntaxError(self.current_tok.range, "Expected '='").throw()
self.advance()
value = self.expr()
node_range = Range.merge(tok.range, value.range)
return VarAssignNode(tok, value, node_type, node_range)
elif tok.type == TokType.LPAR:
self.advance()
exp = self.expr()
if self.current_tok.type == TokType.RPAR:
self.advance()
return exp
SyntaxError(self.current_tok.range, "Expected ')'").throw()
elif tok.type == TokType.LBRACKET:
self.advance()
list = []
if self.current_tok.type != TokType.RBRACKET:
list = self.expr_list()
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range, "Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
return ArrayNode(list, Range.merge(tok.range, end_range))
SyntaxError(
tok.range, f"Expected an expression value before '{tok}'").throw()
def property_access(self, expr):
self.advance()
ident = self.current_tok
node_range = ident.range
expr = PropertyAccessNode(expr, ident, node_range)
if ident.type != TokType.IDENTIFER:
SyntaxError(node_range, "Expected an Identifier").throw()
self.advance()
return expr
def prim_type(self):
tok = self.current_tok
self.advance()
if tok.type == TokType.LBRACE:
if self.current_tok.type != TokType.INT:
SyntaxError(self.current_tok.range, "Expected an int constant").throw()
size = self.current_tok.value
self.advance()
if self.current_tok.type != TokType.RBRACE:
SyntaxError(self.current_tok.range, "Expected a }").throw()
self.advance()
if not self.current_tok.inKeywordList(("int", "float")):
SyntaxError(self.current_tok.range,
"Expected an 'int' or 'float'").throw()
type = str_to_flotype(self.current_tok.value)
type.bits = size
end_range = self.current_tok.range
self.advance()
return TypeNode(type, Range.merge(tok.range, end_range))
elif tok.inKeywordList(("int", "float", "void")):
type = str_to_flotype(tok.value)
return TypeNode(type, tok.range)
elif tok.type == TokType.IDENTIFER:
type = FloObject(tok)
if self.current_tok.type == TokType.LT:
self.advance()
arg_list = self.type_list()
type = FloGeneric(tok, arg_list)
if self.current_tok.type != TokType.GT and self.current_tok.type != TokType.SR:
SyntaxError(self.current_tok.range, "Expected '>'").throw()
if self.current_tok.type == TokType.GT:
self.advance()
else:
self.current_tok.type = TokType.GT
return TypeNode(type, tok.range)
def type_list(self):
types = [self.composite_type()]
while(self.current_tok.type == TokType.COMMA):
self.advance()
types.append(self.composite_type())
return types
def fnc_type(self):
range_start = self.current_tok.range
self.advance()
arg_types = []
if self.current_tok.type != TokType.RPAR:
arg_types = self.type_list()
if self.current_tok.type != TokType.RPAR:
SyntaxError(self.current_tok.range, "Expected ')'").throw()
self.advance()
if self.current_tok.type != TokType.ARROW:
SyntaxError(self.current_tok.range, "Expected '=>'").throw()
self.advance()
type = FloInlineFunc(None, arg_types, self.composite_type())
return TypeNode(type, Range.merge(range_start, type.return_type.range))
def composite_type(self):
tok = self.current_tok
type = None
if tok.inKeywordList(("int", "float", "void")) or tok.type == TokType.IDENTIFER or tok.type == TokType.LBRACE:
type = self.prim_type()
elif tok.type == TokType.LPAR:
return self.fnc_type()
while self.current_tok.type == TokType.MULT or self.current_tok.type == TokType.LBRACKET:
if self.current_tok.type == TokType.MULT:
end_range = self.current_tok.range
self.advance()
type = TypeNode(FloPointer(type), Range.merge(type.range, end_range))
else:
self.advance()
size = self.expr()
if self.current_tok.type != TokType.RBRACKET:
if self.current_tok.type != TokType.RBRACKET:
SyntaxError(self.current_tok.range,
"Expected ']'").throw()
end_range = self.current_tok.range
self.advance()
arr_ty = FloArray(None, size)
arr_ty.elm_type = type
type = TypeNode(arr_ty, Range.merge(type.range, end_range))
if type:
return type
else:
SyntaxError(tok.range, "Expected type definition").throw()
def num_op(self, func_a, toks, func_b=None):
if func_b == None:
func_b = func_a
left_node = func_a()
while (
self.current_tok.type in toks
or (self.current_tok.type, self.current_tok.value) in toks
):
op_tok = self.current_tok
self.advance()
if self.current_tok.type == TokType.EQ:
assign_node = self.assign_part(left_node)
node_range = Range.merge(left_node.range, assign_node.range)
num_op_node = NumOpNode(
left_node, op_tok, assign_node.value, assign_node.value.range)
assign_node.value = num_op_node
assign_node.range = node_range
return assign_node
else:
right_node = func_b()
left_node = NumOpNode(
left_node,
op_tok,
right_node,
Range.merge(left_node.range, right_node.range),
)
return left_node | 0.696887 | 0.140189 |
from kim.exception import RoleError
class Role(set):
"""Roles are a fundamental feature of Kim. It's very common to need
to provide a different view of your data or to only require a selection of
fields when marshaling data. ``Roles`` in Kim allow users
to shape their data at runtime in a simple yet flexible manner.
``Roles`` are added to your :py:class:`~.Mapper` declarations
using the ``__roles__`` attribute.
Usage::
from kim import Mapper, whitelist, field
class UserMapper(Mapper):
__type__ = User
id = field.Integer(read_only=True)
name = field.String(required=True)
company = field.Nested('myapp.mappers.CompanyMapper')
__roles__ = {
'id_only': whitelist('id')
}
"""
def __init__(self, *args, **kwargs):
"""initialise a new :class:`Role`.
:param whitelist: pass a boolean indicating whether this
role is a whitelist
"""
self.whitelist = kwargs.pop('whitelist', True)
super(Role, self).__init__(args)
@property
def fields(self):
"""return an iterable containing all the field names defined in this
role.
:rtype: list
:returns: iterable of field names
"""
return [k for k in self]
def __contains__(self, field_name):
"""overloaded membership test that inverts the check depending on
wether the role is a whitelist or blacklist.
If the role is defined as whitelist=True the normal membership test
is applied ie::
>>> 'name' in whitelist('name')
True
For blacklist the test is flipped as we are aiming to ensure the field
name is not present in the role::
>>> 'other_name' in blacklist('name')
True
>>> 'name' in blacklist('name')
False
:param field_name: name of a field to test for membership
:rtype: boolean
:returns: boolean indicating wether field_name is found in the role
"""
if self.whitelist:
return super(Role, self).__contains__(field_name)
else:
return not super(Role, self).__contains__(field_name)
def __or__(self, other):
"""Override handling of producing the union of two Roles to provide
native support for merging whitelist and blacklist roles correctly.
This overloading allows users to produce the union of two roles that
may, on one side, want to allow fields and on the other exclude them.
Usage::
>>> from kim.role import whitelist, blacklist
>>> my_role = whitelist('foo', 'bar') | blacklist('foo', 'baz')
>>> my_role
Role('bar')
:param other: another instance of :class:`kim.role.Role`
:raises: :class:`kim.exception.RoleError`
:rtype: :class:`kim.role.Role`
:returns: a new :class:`kim.role.Role` containng the set of field names
"""
if not isinstance(other, Role):
raise RoleError('union of built in types is not supported with roles')
whitelist = True
if self.whitelist and other.whitelist:
# both roles are whitelists, return the union of both sets
result = super(Role, self).__or__(other)
elif self.whitelist and not other.whitelist:
# we need to remove the fields in self(whitelist)
# that appear in other(blacklist)
result = super(Role, self).__sub__(other)
elif not self.whitelist and other.whitelist:
# Same as above, except we are keeping the fields from other
result = other.__sub__(self)
else:
# both roles are blacklist, union them and set whitelist=False
whitelist = False
result = super(Role, self).__or__(other)
return Role(*[k for k in result], whitelist=whitelist)
def __and__(self, other):
"""Override handling of producing the intersection of two Roles to provide
native support for merging whitelist and blacklist roles correctly.
This overloading allows users to produce the intersection of two roles that
may, on one side, want to allow fields and on the other exclude them.
.. codeblock:: python
>>>from kim.role import whitelist, blacklist
>>>my_role = whitelist('foo', 'bar') & blacklist('foo', 'baz')
>>>my_role
Role('bar')
:param other: another instance of :py:class:``.Role``
:raises: :py:class:`.RoleError``
:rtype: :py:class:``.Role``
:returns: a new :py:class:``.Role`` containng the set of field names
"""
if not isinstance(other, Role):
raise RoleError('intersection of built types is '
'not supported with roles')
whitelist = True
if self.whitelist and other.whitelist:
# both roles are whitelists, return the union of both sets
result = super(Role, self).__and__(other)
elif self.whitelist and not other.whitelist:
# we need to remove the fields in self(whitelist)
# that appear in other(blacklist)
result = super(Role, self).__sub__(other)
elif not self.whitelist and other.whitelist:
# Same as above, except we are keeping the fields from other
result = other.__sub__(self)
else: # both roles are blacklist, union them and set whitelist=False
whitelist = False
result = super(Role, self).__or__(other)
return Role(*[k for k in result], whitelist=whitelist)
class whitelist(Role):
""" Whitelists are roles that define a list of fields that are
permitted for inclusion when marhsaling or serializing.
For example, a whitelist role called ``id_only`` that contains
the field name ``id`` instructs kim that whenever
the ``id_only`` role is used **only** the ``id`` field should be
considered in the input/output data.
Usage::
from kim import whitelist
id_only_role = whitelist('id')
class IdMixin(object):
id = fields.Integer(read_only=True)
__roles__ = {
'id_only': id_only
}
"""
def __init__(self, *args, **kwargs):
self.whitelist = True
kwargs['whitelist'] = True
super(whitelist, self).__init__(*args, **kwargs)
class blacklist(Role):
""" Blacklists are role that act in the opposite manner to whitelists.
They define a list of fields that should not be used
when marshaling and serializing data. A blacklist role named ``id_less``
that contained the field name ``id`` would instruct kim that every
field defined on the mapper should be considered except ``id``.
Usage::
from kim import whitelist
class UserMapper(Mapper):
id_less_role = blacklist('id')
__roles__ = {
'id_less': blacklist('id')
}
"""
def __init__(self, *args, **kwargs):
kwargs['whitelist'] = False
super(blacklist, self).__init__(*args, **kwargs) | kim/role.py |
from kim.exception import RoleError
class Role(set):
"""Roles are a fundamental feature of Kim. It's very common to need
to provide a different view of your data or to only require a selection of
fields when marshaling data. ``Roles`` in Kim allow users
to shape their data at runtime in a simple yet flexible manner.
``Roles`` are added to your :py:class:`~.Mapper` declarations
using the ``__roles__`` attribute.
Usage::
from kim import Mapper, whitelist, field
class UserMapper(Mapper):
__type__ = User
id = field.Integer(read_only=True)
name = field.String(required=True)
company = field.Nested('myapp.mappers.CompanyMapper')
__roles__ = {
'id_only': whitelist('id')
}
"""
def __init__(self, *args, **kwargs):
"""initialise a new :class:`Role`.
:param whitelist: pass a boolean indicating whether this
role is a whitelist
"""
self.whitelist = kwargs.pop('whitelist', True)
super(Role, self).__init__(args)
@property
def fields(self):
"""return an iterable containing all the field names defined in this
role.
:rtype: list
:returns: iterable of field names
"""
return [k for k in self]
def __contains__(self, field_name):
"""overloaded membership test that inverts the check depending on
wether the role is a whitelist or blacklist.
If the role is defined as whitelist=True the normal membership test
is applied ie::
>>> 'name' in whitelist('name')
True
For blacklist the test is flipped as we are aiming to ensure the field
name is not present in the role::
>>> 'other_name' in blacklist('name')
True
>>> 'name' in blacklist('name')
False
:param field_name: name of a field to test for membership
:rtype: boolean
:returns: boolean indicating wether field_name is found in the role
"""
if self.whitelist:
return super(Role, self).__contains__(field_name)
else:
return not super(Role, self).__contains__(field_name)
def __or__(self, other):
"""Override handling of producing the union of two Roles to provide
native support for merging whitelist and blacklist roles correctly.
This overloading allows users to produce the union of two roles that
may, on one side, want to allow fields and on the other exclude them.
Usage::
>>> from kim.role import whitelist, blacklist
>>> my_role = whitelist('foo', 'bar') | blacklist('foo', 'baz')
>>> my_role
Role('bar')
:param other: another instance of :class:`kim.role.Role`
:raises: :class:`kim.exception.RoleError`
:rtype: :class:`kim.role.Role`
:returns: a new :class:`kim.role.Role` containng the set of field names
"""
if not isinstance(other, Role):
raise RoleError('union of built in types is not supported with roles')
whitelist = True
if self.whitelist and other.whitelist:
# both roles are whitelists, return the union of both sets
result = super(Role, self).__or__(other)
elif self.whitelist and not other.whitelist:
# we need to remove the fields in self(whitelist)
# that appear in other(blacklist)
result = super(Role, self).__sub__(other)
elif not self.whitelist and other.whitelist:
# Same as above, except we are keeping the fields from other
result = other.__sub__(self)
else:
# both roles are blacklist, union them and set whitelist=False
whitelist = False
result = super(Role, self).__or__(other)
return Role(*[k for k in result], whitelist=whitelist)
def __and__(self, other):
"""Override handling of producing the intersection of two Roles to provide
native support for merging whitelist and blacklist roles correctly.
This overloading allows users to produce the intersection of two roles that
may, on one side, want to allow fields and on the other exclude them.
.. codeblock:: python
>>>from kim.role import whitelist, blacklist
>>>my_role = whitelist('foo', 'bar') & blacklist('foo', 'baz')
>>>my_role
Role('bar')
:param other: another instance of :py:class:``.Role``
:raises: :py:class:`.RoleError``
:rtype: :py:class:``.Role``
:returns: a new :py:class:``.Role`` containng the set of field names
"""
if not isinstance(other, Role):
raise RoleError('intersection of built types is '
'not supported with roles')
whitelist = True
if self.whitelist and other.whitelist:
# both roles are whitelists, return the union of both sets
result = super(Role, self).__and__(other)
elif self.whitelist and not other.whitelist:
# we need to remove the fields in self(whitelist)
# that appear in other(blacklist)
result = super(Role, self).__sub__(other)
elif not self.whitelist and other.whitelist:
# Same as above, except we are keeping the fields from other
result = other.__sub__(self)
else: # both roles are blacklist, union them and set whitelist=False
whitelist = False
result = super(Role, self).__or__(other)
return Role(*[k for k in result], whitelist=whitelist)
class whitelist(Role):
""" Whitelists are roles that define a list of fields that are
permitted for inclusion when marhsaling or serializing.
For example, a whitelist role called ``id_only`` that contains
the field name ``id`` instructs kim that whenever
the ``id_only`` role is used **only** the ``id`` field should be
considered in the input/output data.
Usage::
from kim import whitelist
id_only_role = whitelist('id')
class IdMixin(object):
id = fields.Integer(read_only=True)
__roles__ = {
'id_only': id_only
}
"""
def __init__(self, *args, **kwargs):
self.whitelist = True
kwargs['whitelist'] = True
super(whitelist, self).__init__(*args, **kwargs)
class blacklist(Role):
""" Blacklists are role that act in the opposite manner to whitelists.
They define a list of fields that should not be used
when marshaling and serializing data. A blacklist role named ``id_less``
that contained the field name ``id`` would instruct kim that every
field defined on the mapper should be considered except ``id``.
Usage::
from kim import whitelist
class UserMapper(Mapper):
id_less_role = blacklist('id')
__roles__ = {
'id_less': blacklist('id')
}
"""
def __init__(self, *args, **kwargs):
kwargs['whitelist'] = False
super(blacklist, self).__init__(*args, **kwargs) | 0.885817 | 0.48377 |
from PIL import Image, UnidentifiedImageError
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
from UserApp.managers import UserManager
ROLE_CHOICES = (
(0, "user"),
(1, "admin"),
)
class User(AbstractBaseUser, PermissionsMixin):
"""
This class represents basic user.
=======================================
Attributes:
first_name: Describes user`s first name
type: str, max_length: 40, required field
last_name: Describes user`s last name
type: str, max_length: 40, required field
middle_name: Describes user`s middle name
type: str, max_length: 40
profile_pic: User`s avatar
type: ImageField, default path = media/default_profile_pictures/default_pic.svg
birth_date: Describes user`s birth date
type: datetime.date
register_date: Describes the date of user`s registration,
auto-filled with the date user has been created
type: datetime.date, CAN`T BE CHANGED
phone_number: Describes user`s phone number in international format
(like +380 00 000 00 00)
type: PhoneNumber, used as a username field, required field, unique field
more about phoneNumber: https://github.com/stefanfoulis/django-phonenumber-field
email: Describes user`s email
type: str, required field, unique field
role: Describes user`s role, admin(1) is an administrator
type: int, default value = 0, required field
"""
telegram_id = models.CharField(
verbose_name=_('telegram id'), null=True, unique=True, max_length=40, default=None
)
first_name = models.CharField(
verbose_name=_("first name"), blank=False, null=False, max_length=40
)
last_name = models.CharField(
verbose_name=_("last name"), blank=False, null=False, max_length=40
)
middle_name = models.CharField(
verbose_name=_("middle name"), blank=True, null=False, max_length=40
)
profile_pic = models.ImageField(
verbose_name=_("profile picture"),
upload_to="profile_pictures/",
default="default_profile_pictures/default_pic.svg",
)
birth_date = models.DateField(
verbose_name=_("date of birth"),
blank=True,
null=True,
auto_now=False,
auto_now_add=False,
)
register_date = models.DateField(
verbose_name=_("date of registration"),
blank=False,
null=False,
auto_now=False,
auto_now_add=True,
editable=False,
)
phone_number = PhoneNumberField(
verbose_name=_("phone number"), blank=False, null=False, unique=True
)
email = models.EmailField(
verbose_name=_("email"), blank=False, null=False, unique=True
)
role = models.IntegerField(verbose_name=_("role"), default=0, choices=ROLE_CHOICES)
is_active = models.BooleanField(verbose_name=_("is active"), default=True)
is_bot = models.BooleanField(verbose_name=_("is bot"), default=False)
wishlist = models.ManyToManyField('ProductApp.Product', related_name='wishlist', blank=True)
objects = UserManager()
USERNAME_FIELD = "phone_number"
REQUIRED_FIELDS = ["first_name", "last_name", "email"]
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
def __str__(self) -> str:
return self.email
def has_perm(self, perm, obj=None):
return self.role
def has_module_perms(self, app_label):
if app_label == "token_blacklist":
return self.is_superuser
return self.role
def save(self, *args, **kwargs):
if self.pk:
for user in User.objects.filter(id=self.pk):
if (
user.profile_pic.name != "default_profile_pictures/default_pic.svg"
and user.profile_pic.name != self.profile_pic.name # noqa: W503
): # 'and' cannot be replaced or restyled in the way it needs to be
user.profile_pic.delete(save=False)
super().save(*args, **kwargs)
try:
# Need this try block because default image is .svg format
# Can be replaced with checking if self.profile_pic.name is not default
image_path = self.profile_pic.path
img = Image.open(image_path)
if img.height > 300 or img.width > 300:
new_img_size = (300, 300)
img.thumbnail(new_img_size)
img.save(image_path)
except UnidentifiedImageError:
pass
@property
def is_staff(self) -> int:
return (not self.is_bot) and self.role | ProjectShop/UserApp/models.py | from PIL import Image, UnidentifiedImageError
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.db import models
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.modelfields import PhoneNumberField
from UserApp.managers import UserManager
ROLE_CHOICES = (
(0, "user"),
(1, "admin"),
)
class User(AbstractBaseUser, PermissionsMixin):
"""
This class represents basic user.
=======================================
Attributes:
first_name: Describes user`s first name
type: str, max_length: 40, required field
last_name: Describes user`s last name
type: str, max_length: 40, required field
middle_name: Describes user`s middle name
type: str, max_length: 40
profile_pic: User`s avatar
type: ImageField, default path = media/default_profile_pictures/default_pic.svg
birth_date: Describes user`s birth date
type: datetime.date
register_date: Describes the date of user`s registration,
auto-filled with the date user has been created
type: datetime.date, CAN`T BE CHANGED
phone_number: Describes user`s phone number in international format
(like +380 00 000 00 00)
type: PhoneNumber, used as a username field, required field, unique field
more about phoneNumber: https://github.com/stefanfoulis/django-phonenumber-field
email: Describes user`s email
type: str, required field, unique field
role: Describes user`s role, admin(1) is an administrator
type: int, default value = 0, required field
"""
telegram_id = models.CharField(
verbose_name=_('telegram id'), null=True, unique=True, max_length=40, default=None
)
first_name = models.CharField(
verbose_name=_("first name"), blank=False, null=False, max_length=40
)
last_name = models.CharField(
verbose_name=_("last name"), blank=False, null=False, max_length=40
)
middle_name = models.CharField(
verbose_name=_("middle name"), blank=True, null=False, max_length=40
)
profile_pic = models.ImageField(
verbose_name=_("profile picture"),
upload_to="profile_pictures/",
default="default_profile_pictures/default_pic.svg",
)
birth_date = models.DateField(
verbose_name=_("date of birth"),
blank=True,
null=True,
auto_now=False,
auto_now_add=False,
)
register_date = models.DateField(
verbose_name=_("date of registration"),
blank=False,
null=False,
auto_now=False,
auto_now_add=True,
editable=False,
)
phone_number = PhoneNumberField(
verbose_name=_("phone number"), blank=False, null=False, unique=True
)
email = models.EmailField(
verbose_name=_("email"), blank=False, null=False, unique=True
)
role = models.IntegerField(verbose_name=_("role"), default=0, choices=ROLE_CHOICES)
is_active = models.BooleanField(verbose_name=_("is active"), default=True)
is_bot = models.BooleanField(verbose_name=_("is bot"), default=False)
wishlist = models.ManyToManyField('ProductApp.Product', related_name='wishlist', blank=True)
objects = UserManager()
USERNAME_FIELD = "phone_number"
REQUIRED_FIELDS = ["first_name", "last_name", "email"]
class Meta:
verbose_name = _("user")
verbose_name_plural = _("users")
def __str__(self) -> str:
return self.email
def has_perm(self, perm, obj=None):
return self.role
def has_module_perms(self, app_label):
if app_label == "token_blacklist":
return self.is_superuser
return self.role
def save(self, *args, **kwargs):
if self.pk:
for user in User.objects.filter(id=self.pk):
if (
user.profile_pic.name != "default_profile_pictures/default_pic.svg"
and user.profile_pic.name != self.profile_pic.name # noqa: W503
): # 'and' cannot be replaced or restyled in the way it needs to be
user.profile_pic.delete(save=False)
super().save(*args, **kwargs)
try:
# Need this try block because default image is .svg format
# Can be replaced with checking if self.profile_pic.name is not default
image_path = self.profile_pic.path
img = Image.open(image_path)
if img.height > 300 or img.width > 300:
new_img_size = (300, 300)
img.thumbnail(new_img_size)
img.save(image_path)
except UnidentifiedImageError:
pass
@property
def is_staff(self) -> int:
return (not self.is_bot) and self.role | 0.672117 | 0.106319 |
from pymongo import MongoClient
from datetime import datetime
from dotenv import load_dotenv
import os
services_dummy_data = [
{
'name': 'dns-service',
'host': {
'type': 'ip',
'value': '1.1.1.1'
},
'port': '53',
'proto': 'udp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'wrong-hostname',
'host': {
'type': 'hostname',
'value': 'xxx.local.sdsdsd'
},
'port': '123',
'proto': 'udp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'home-ssh-service',
'host': {
'type': 'ip',
'value': '192.168.1.1'
},
'port': '22',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'home-ntp-service',
'host': {
'type': 'ip',
'value': '192.168.1.1'
},
'port': '123',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'localhost-nmea-service',
'host': {
'type': 'ip',
'value': '172.16.1.126'
},
'port': '10110',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'dns-google',
'host': {
'type': 'hostname',
'value': 'google-public-dns-b.google.com'
},
'port': '53',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
}
]
# Load environment vars
load_dotenv('.env-watchdog')
mongodb_url = os.getenv("MONGODB_URL")
with MongoClient(mongodb_url) as client:
# Use 'watchdogdb' db
db = client.watchdogdb
service = db.service
# Drop collection if exists
service.drop()
service.insert_many(services_dummy_data) | init_db.py | from pymongo import MongoClient
from datetime import datetime
from dotenv import load_dotenv
import os
services_dummy_data = [
{
'name': 'dns-service',
'host': {
'type': 'ip',
'value': '1.1.1.1'
},
'port': '53',
'proto': 'udp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'wrong-hostname',
'host': {
'type': 'hostname',
'value': 'xxx.local.sdsdsd'
},
'port': '123',
'proto': 'udp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'home-ssh-service',
'host': {
'type': 'ip',
'value': '192.168.1.1'
},
'port': '22',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'home-ntp-service',
'host': {
'type': 'ip',
'value': '192.168.1.1'
},
'port': '123',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'localhost-nmea-service',
'host': {
'type': 'ip',
'value': '172.16.1.126'
},
'port': '10110',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
},
{
'name': 'dns-google',
'host': {
'type': 'hostname',
'value': 'google-public-dns-b.google.com'
},
'port': '53',
'proto': 'tcp',
'timestamps': {
'last_responded': None,
'last_tested': None,
'created': datetime.utcnow(),
'edited': datetime.utcnow()
},
'status': 'up'
}
]
# Load environment vars
load_dotenv('.env-watchdog')
mongodb_url = os.getenv("MONGODB_URL")
with MongoClient(mongodb_url) as client:
# Use 'watchdogdb' db
db = client.watchdogdb
service = db.service
# Drop collection if exists
service.drop()
service.insert_many(services_dummy_data) | 0.486819 | 0.122681 |
import numpy as np
from cykdtree import PyKDTree
DIRECTION_MAPPING = {
'x': 0,
'y': 1,
'z': 2,
}
def _plot(plot_nodes, filename, outlines_only=False):
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
if outlines_only:
cb_pad = 0
figsize = (4, 4)
else:
cb_pad = .06
figsize = (4.5, 4)
fig = plt.figure(figsize=figsize)
aspect = figsize[1]/figsize[0]
pad = .02
axs = (1 - 2*pad)
axes = fig.add_axes([pad, pad, axs*aspect, axs])
if outlines_only is False:
cb_ax = fig.add_axes([axs*aspect, pad, axs - cb_pad - axs*aspect, axs])
axes.set_aspect('equal')
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
patches = []
npts_a = []
for le, re, npts in plot_nodes:
patches.append(Rectangle(le, re[0] - le[0], re[1] - le[1]))
npts_a.append(npts)
pc = PatchCollection(patches, edgecolors='k')
if outlines_only is False:
pc.set_array(np.array(npts_a))
pc.set_clim(0, 16)
else:
pc.set_facecolor('red')
pc.set_alpha(0.1)
axes.add_collection(pc)
if outlines_only is False:
cbar = fig.colorbar(pc, ax=axes, cax=cb_ax, cmap='viridis')
cbar.set_label('Number of Points')
if filename is None:
plt.show()
else:
plt.savefig(filename)
class ParticleProjectionKDTree(object):
def __init__(self, positions, direction, left_edge, right_edge,
amr_nested=True):
"""A data structure for projecting particles using a KDTree
Parameters
----------
positions : ndarray
The input positions to project. The positions must be of
shape ``(nparticles, 3)``.
direction : string
The axis to project along. One of 'x', 'y', or 'z'.
left_edge : ndarray
The 3D coordinates of the lower left bounding box corner.
Particles to the left of this point will be discarded.
right_edge : ndarray
The 3D coordinates of the upper right bounding box corner.
Particles to the left of this point will be discarded.
amr_nested : bool
Force the kdtree to split at nearest AMR cell boundary for
KDTree level
"""
self.direction = direction
self.kdtree = PyKDTree(positions, left_edge, right_edge,
periodic=(True, True, True), leafsize=16,
amr_nested=amr_nested)
def plot(self, filename=None):
plot_nodes = []
for leaf in self.kdtree.leaves:
le = leaf.left_edge
re = leaf.right_edge
d = DIRECTION_MAPPING[self.direction]
le = np.hstack([le[:d], le[d+1:]])
re = np.hstack([re[:d], re[d+1:]])
plot_nodes.append((le, re, leaf.npts))
_plot(plot_nodes, filename, outlines_only=True)
class ParticleSliceKDTree(object):
def __init__(self, positions, direction, coord, left_edge, right_edge,
amr_nested=True):
"""A data structure for projecting particles using a KDTree
Parameters
----------
positions : ndarray
The input positions to project. The positions must be of
shape ``(nparticles, 3)``.
direction : string
The axis to project along. One of 'x', 'y', or 'z'.
coord : float
The coordinate along the ``direction`` axis to slice through.
left_edge : ndarray
The 3D coordinates of the lower left bounding box corner.
Particles to the left of this point will be discarded.
right_edge : ndarray
The 3D coordinates of the upper right bounding box corner.
Particles to the left of this point will be discarded.
amr_nested : bool
Force the kdtree to split at nearest AMR cell boundary for
KDTree level
"""
self.coord = coord
self.direction = direction
self.kdtree = PyKDTree(positions, left_edge, right_edge,
periodic=(True, True, True), leafsize=16,
amr_nested=amr_nested)
def plot(self, filename):
plot_nodes = []
for leaf in self.kdtree.leaves:
le = leaf.left_edge
re = leaf.right_edge
d = DIRECTION_MAPPING[self.direction]
if not le[d] < self.coord <= re[d]:
continue
le = np.hstack([le[:d], le[d+1:]])
re = np.hstack([re[:d], re[d+1:]])
plot_nodes.append((le, re, leaf.npts))
_plot(plot_nodes, filename)
def natural_pow2_resolution(vals):
"""Find power of 2 that offers a natural spacing given value"""
return 2**(np.floor(np.log2(min(vals))) - 5)
def pin_power_2(inp, res, top_or_bottom):
"""Round inp to the nearest power of 2"""
ret = [0, 0]
for i, val in enumerate(inp):
if val == 0:
ret[i] = val
continue
# find nearest power of 2
adj = np.fmod(val, res)
ret[i] = val - (-(1 + top_or_bottom)*res/2 + adj)
if ret[i] > 1:
ret[i] = 1
return ret | qtree/kdtree.py | import numpy as np
from cykdtree import PyKDTree
DIRECTION_MAPPING = {
'x': 0,
'y': 1,
'z': 2,
}
def _plot(plot_nodes, filename, outlines_only=False):
from matplotlib import pyplot as plt
from matplotlib.patches import Rectangle
from matplotlib.collections import PatchCollection
if outlines_only:
cb_pad = 0
figsize = (4, 4)
else:
cb_pad = .06
figsize = (4.5, 4)
fig = plt.figure(figsize=figsize)
aspect = figsize[1]/figsize[0]
pad = .02
axs = (1 - 2*pad)
axes = fig.add_axes([pad, pad, axs*aspect, axs])
if outlines_only is False:
cb_ax = fig.add_axes([axs*aspect, pad, axs - cb_pad - axs*aspect, axs])
axes.set_aspect('equal')
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
patches = []
npts_a = []
for le, re, npts in plot_nodes:
patches.append(Rectangle(le, re[0] - le[0], re[1] - le[1]))
npts_a.append(npts)
pc = PatchCollection(patches, edgecolors='k')
if outlines_only is False:
pc.set_array(np.array(npts_a))
pc.set_clim(0, 16)
else:
pc.set_facecolor('red')
pc.set_alpha(0.1)
axes.add_collection(pc)
if outlines_only is False:
cbar = fig.colorbar(pc, ax=axes, cax=cb_ax, cmap='viridis')
cbar.set_label('Number of Points')
if filename is None:
plt.show()
else:
plt.savefig(filename)
class ParticleProjectionKDTree(object):
def __init__(self, positions, direction, left_edge, right_edge,
amr_nested=True):
"""A data structure for projecting particles using a KDTree
Parameters
----------
positions : ndarray
The input positions to project. The positions must be of
shape ``(nparticles, 3)``.
direction : string
The axis to project along. One of 'x', 'y', or 'z'.
left_edge : ndarray
The 3D coordinates of the lower left bounding box corner.
Particles to the left of this point will be discarded.
right_edge : ndarray
The 3D coordinates of the upper right bounding box corner.
Particles to the left of this point will be discarded.
amr_nested : bool
Force the kdtree to split at nearest AMR cell boundary for
KDTree level
"""
self.direction = direction
self.kdtree = PyKDTree(positions, left_edge, right_edge,
periodic=(True, True, True), leafsize=16,
amr_nested=amr_nested)
def plot(self, filename=None):
plot_nodes = []
for leaf in self.kdtree.leaves:
le = leaf.left_edge
re = leaf.right_edge
d = DIRECTION_MAPPING[self.direction]
le = np.hstack([le[:d], le[d+1:]])
re = np.hstack([re[:d], re[d+1:]])
plot_nodes.append((le, re, leaf.npts))
_plot(plot_nodes, filename, outlines_only=True)
class ParticleSliceKDTree(object):
def __init__(self, positions, direction, coord, left_edge, right_edge,
amr_nested=True):
"""A data structure for projecting particles using a KDTree
Parameters
----------
positions : ndarray
The input positions to project. The positions must be of
shape ``(nparticles, 3)``.
direction : string
The axis to project along. One of 'x', 'y', or 'z'.
coord : float
The coordinate along the ``direction`` axis to slice through.
left_edge : ndarray
The 3D coordinates of the lower left bounding box corner.
Particles to the left of this point will be discarded.
right_edge : ndarray
The 3D coordinates of the upper right bounding box corner.
Particles to the left of this point will be discarded.
amr_nested : bool
Force the kdtree to split at nearest AMR cell boundary for
KDTree level
"""
self.coord = coord
self.direction = direction
self.kdtree = PyKDTree(positions, left_edge, right_edge,
periodic=(True, True, True), leafsize=16,
amr_nested=amr_nested)
def plot(self, filename):
plot_nodes = []
for leaf in self.kdtree.leaves:
le = leaf.left_edge
re = leaf.right_edge
d = DIRECTION_MAPPING[self.direction]
if not le[d] < self.coord <= re[d]:
continue
le = np.hstack([le[:d], le[d+1:]])
re = np.hstack([re[:d], re[d+1:]])
plot_nodes.append((le, re, leaf.npts))
_plot(plot_nodes, filename)
def natural_pow2_resolution(vals):
"""Find power of 2 that offers a natural spacing given value"""
return 2**(np.floor(np.log2(min(vals))) - 5)
def pin_power_2(inp, res, top_or_bottom):
"""Round inp to the nearest power of 2"""
ret = [0, 0]
for i, val in enumerate(inp):
if val == 0:
ret[i] = val
continue
# find nearest power of 2
adj = np.fmod(val, res)
ret[i] = val - (-(1 + top_or_bottom)*res/2 + adj)
if ret[i] > 1:
ret[i] = 1
return ret | 0.839701 | 0.651327 |
"""Integration tests for gcs_ocn_bq_ingest"""
import json
import os
import uuid
from typing import List
import pytest
from google.cloud import bigquery
from google.cloud import error_reporting
from google.cloud import storage
import gcs_ocn_bq_ingest.common.ordering
import gcs_ocn_bq_ingest.common.utils
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
LOAD_JOB_POLLING_TIMEOUT = 10 # seconds
@pytest.fixture(scope="package")
def bq() -> bigquery.Client:
"""BigQuery Client"""
return bigquery.Client(location="US")
@pytest.fixture(scope="package")
def gcs() -> storage.Client:
"""GCS Client"""
return storage.Client()
@pytest.fixture(scope="package")
def error() -> error_reporting.Client:
"""GCS Client"""
return error_reporting.Client()
@pytest.fixture
def gcs_bucket(request, gcs: storage.Client) -> storage.Bucket:
"""GCS bucket for test artifacts"""
bucket = gcs.create_bucket(f"test_gcs_ocn_bq_ingest_{str(uuid.uuid4())}")
bucket.versioning_enabled = True
bucket.patch()
# overide default field delimiter at bucket level
load_config_json = {
"fieldDelimiter": "|",
}
load_json_blob: storage.Blob = bucket.blob("_config/load.json")
load_json_blob.upload_from_string(json.dumps(load_config_json))
def teardown():
# Since bucket has object versioning enabled, you must
# delete all versions of objects before you can delete the bucket.
for blob in gcs.list_blobs(bucket, versions=True):
blob.delete()
bucket.delete(force=True)
request.addfinalizer(teardown)
return bucket
@pytest.fixture
def mock_env(gcs, monkeypatch):
"""
environment variable mocks
All tests use this fixture; it is specified in the
pyest.ini file as:
[pytest]
usefixtures = mock_env
For more information on module-wide fixtures, see:
https://docs.pytest.org/en/stable/fixture.html#use-fixtures-in-classes-and-modules-with-usefixtures
"""
# Infer project from the gcs client application default credentials.
monkeypatch.setenv("GCP_PROJECT", gcs.project)
monkeypatch.setenv("FUNCTION_NAME", "integration-test")
monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540")
monkeypatch.setenv("BQ_PROJECT", gcs.project)
@pytest.fixture
def ordered_mock_env(monkeypatch):
"""environment variable mocks"""
monkeypatch.setenv("ORDER_PER_TABLE", "TRUE")
@pytest.fixture
def dest_dataset(request, bq, monkeypatch):
random_dataset = (f"test_bq_ingest_gcf_"
f"{str(uuid.uuid4())[:8].replace('-', '_')}")
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}"
f".{random_dataset}")
dataset.location = "US"
bq.create_dataset(dataset)
print(f"created dataset {dataset.dataset_id}")
def teardown():
bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return dataset
@pytest.fixture
def dest_table(monkeypatch, request, bq, dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table = bq.create_table(
bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nation_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
))
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
@pytest.fixture
def gcs_data(gcs_bucket, dest_dataset, dest_table) -> storage.Blob:
data_objs: List[storage.Blob] = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_data_under_sub_dirs(gcs_bucket, dest_dataset,
dest_table) -> storage.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "foo", "bar", "baz", test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_truncating_load_config(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
config_objs: List[storage.Blob] = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_table.table_id,
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({"writeDisposition": "WRITE_TRUNCATE"}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_batched_data(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
"""
upload two batches of data
"""
data_objs: List[storage.Blob] = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_external_config(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_config(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable specified is used
to load data.
"""
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"writeDisposition":
"WRITE_TRUNCATE",
"fieldDelimiter":
"|",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"\$?(?P<yyyy>[\d]{4})/?" # partition year (yyyy) (optional)
r"(?P<mm>[\d]{2})?/?" # partition month (mm) (optional)
r"(?P<dd>[\d]{2})?/?" # partition day (dd) (optional)
r"(?P<hh>[\d]{2})?/?" # partition hour (hh) (optional)
)
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable specified is used
to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)" # ignore everything leading up to partition
r"(?:[\d]{4})?/?"
r"(?:[\d]{2})?/?"
r"(?:[\d]{2})?/?"
r"(?P<batch>[\d]{2})/?" # batch
)
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": destination_regex,
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config_hive_partitioned(
gcs_bucket, dest_dataset,
dest_hive_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable and destinationRegex
specified is used to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_hive_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"(?P<yyyy>[\d]{4})/"
r"(?P<mm>[\d]{2})/"
r"(?P<dd>[\d]{2})/"
r"(?P<hh>[\d]{2})/"
# r"^(?:[\w\-_0-9]+)/(?P<dataset>[\w\-_0-9\.]+)/"
# r"(?P<table>[\w\-_0-9]+)/?"
# r"(?:incremental|history)?/?"
# r"(?:[0-9]{4})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?P<batch>[0-9]+)/?"
)
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_hive_partitioned_table.project,
"datasetId": dest_hive_partitioned_table.dataset_id,
"tableId": dest_hive_partitioned_table.table_id
},
"destinationRegex": destination_regex,
"dataSourceName": "some-onprem-data-source"
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config_partitioned_alternate(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable and destinationRegex
specified is used to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_hive_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"year=(?P<yyyy>[\d]{4})/"
r"month=(?P<mm>[\d]{1,2})/"
r"day=(?P<dd>[\d]{1,2})/"
r"hr=(?P<hh>[\d]{1,2})/")
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": destination_regex,
"dataSourceName": "some-onprem-data-source"
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_partitioned_data(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id,
partition, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
dot_blob: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, partition,
".file_that_starts_with_dot"
]))
dot_blob.upload_from_string("")
data_objs.append(dot_blob)
return data_objs
@pytest.fixture
def gcs_partitioned_data_allow_jagged(
gcs_bucket, dest_dataset,
dest_partitioned_table_allow_jagged) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv.gz", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table_allow_jagged.table_id, partition,
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_partitioned_parquet_data(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet",
"_SUCCESS"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join(
[partition, test_file]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_data(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # hour
"hive_part_column=9999",
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_parquet_data(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # batch
"hive_part_column=9999",
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
# Add _SUCCESS file under the hour partition folder
data_obj = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # batch
"_SUCCESS"
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, "_SUCCESS"))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_parquet_data_alternate(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
f"year={partition[1:5]}", # year
f"month={partition[5:7]}", # month
f"day={partition[7:9]}", # day
f"hr={partition[9:]}", # batch
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
# Add _SUCCESS file under the hour partition folder
data_obj = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
f"year={partition[1:5]}", # year
f"month={partition[5:7]}", # month
f"day={partition[7:9]}", # day
f"hr={partition[9:]}", # batch
"_SUCCESS"
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, "_SUCCESS"))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def dest_partitioned_table(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_hive_partitioned_table(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
schema.append(bigquery.SchemaField('hive_part_column', 'INT64'))
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_partitioned_table_allow_jagged(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
extra_field_for_jagged_row_test = bigquery.schema.SchemaField(
"extra_jagged_row_test_column", "STRING")
schema.append(extra_field_for_jagged_row_test)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_ordered_update_table(gcs, gcs_bucket, bq,
dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table: bigquery.Table = bigquery.Table(
f"{dest_dataset.project}.{dest_dataset.dataset_id}"
f".cf_test_ordering_{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table = bq.create_table(table)
# Our test query only updates on a single row so we need to populate
# original row.
# This can be used to simulate an existing _bqlock from a prior run of the
# subscriber loop with a job that has succeeded.
job: bigquery.LoadJob = bq.load_table_from_json(
[{
"id": 1,
"alpha_update": ""
}],
table,
job_id_prefix=gcs_ocn_bq_ingest.common.constants.DEFAULT_JOB_PREFIX)
# The subscriber will be responsible for cleaning up this file.
bqlock_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}", table.table_id,
"_bqlock"
]))
bqlock_obj.upload_from_string(
json.dumps(dict(job_id=job.job_id,
table=table.reference.to_api_repr())))
return table
@pytest.fixture
def gcs_ordered_update_data(gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.Blob]:
data_objs = []
older_success_blob: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "00", "_SUCCESS"
]))
older_success_blob.upload_from_string("")
data_objs.append(older_success_blob)
chunks = {
"01",
"02",
"03",
}
for chunk in chunks:
for test_file in ["data.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, chunk, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "ordering",
chunk, test_file))
data_objs.append(data_obj)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_backlog(gcs, gcs_bucket, gcs_ordered_update_data) -> List[storage.Blob]:
data_objs = []
# We will deal with the last incremental in the test itself to test the
# behavior of a new backlog subscriber.
for success_blob in gcs_ordered_update_data:
gcs_ocn_bq_ingest.common.ordering.backlog_publisher(gcs, success_blob)
backlog_blob = gcs_ocn_bq_ingest.common.ordering.success_blob_to_backlog_blob(
gcs, success_blob)
backlog_blob.upload_from_string("")
data_objs.append(backlog_blob)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_external_update_config(gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = """
UPDATE {dest_dataset}.{dest_table} dest
SET alpha_update = CONCAT(dest.alpha_update, src.alpha_update)
FROM temp_ext src
WHERE dest.id = src.id
"""
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
backfill_blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
gcs_ocn_bq_ingest.common.constants.BACKFILL_FILENAME
]))
backfill_blob.upload_from_string("")
config_objs.append(sql_obj)
config_objs.append(config_obj)
config_objs.append(backfill_blob)
return config_objs
@pytest.fixture
def gcs_external_partitioned_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_objs.append(sql_obj)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, "_config",
"external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
config = {
"schema": public_table.to_api_repr()['schema'],
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_partitioned_config_allow_jagged(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table_allow_jagged) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table_allow_jagged.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_objs.append(sql_obj)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table_allow_jagged.table_id,
"_config", "external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
extra_field_for_jagged_row_test = bigquery.schema.SchemaField(
"extra_jagged_row_test_column", "STRING")
jagged_schema = public_table.schema + [extra_field_for_jagged_row_test]
config = {
"schema": {
"fields": [
schema_field.to_api_repr() for schema_field in jagged_schema
]
},
"compression": "GZIP",
"csvOptions": {
"allowJaggedRows": True,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_partitioned_parquet_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
# Upload SQL query used to load table
sql_obj = gcs_bucket.blob("/".join([
"_config",
"bq_transform.sql",
]))
sql_obj.upload_from_string("INSERT {dest_dataset}.{dest_table} "
"SELECT * FROM temp_ext;")
config_objs.append(sql_obj)
# Upload external table definition
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration
config_obj = gcs_bucket.blob("/".join(["_config", "external.json"]))
config_obj.upload_from_string(json.dumps({"sourceFormat": "PARQUET"}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_hive_partitioned_parquet_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
# Upload SQL query used to load table
sql_obj = gcs_bucket.blob("/".join([
"_config",
"bq_transform.sql",
]))
sql_obj.upload_from_string("INSERT {dest_dataset}.{dest_table} "
"SELECT * FROM temp_ext;")
config_objs.append(sql_obj)
# Upload external table definition
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration
config_obj = gcs_bucket.blob("/".join(["_config", "external.json"]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"hivePartitioningOptions": {
"mode": "AUTO"
}
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def no_use_error_reporting(monkeypatch):
monkeypatch.setenv("USE_ERROR_REPORTING_API", "False")
@pytest.fixture
def gcs_external_config_bad_statement(
gcs_bucket, dest_dataset, dest_table,
no_use_error_reporting) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = ("Woops this isn't valid SQL;\n"
"INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;")
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
return config_objs | tools/cloud_functions/gcs_event_based_ingest/tests/conftest.py | """Integration tests for gcs_ocn_bq_ingest"""
import json
import os
import uuid
from typing import List
import pytest
from google.cloud import bigquery
from google.cloud import error_reporting
from google.cloud import storage
import gcs_ocn_bq_ingest.common.ordering
import gcs_ocn_bq_ingest.common.utils
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
LOAD_JOB_POLLING_TIMEOUT = 10 # seconds
@pytest.fixture(scope="package")
def bq() -> bigquery.Client:
"""BigQuery Client"""
return bigquery.Client(location="US")
@pytest.fixture(scope="package")
def gcs() -> storage.Client:
"""GCS Client"""
return storage.Client()
@pytest.fixture(scope="package")
def error() -> error_reporting.Client:
"""GCS Client"""
return error_reporting.Client()
@pytest.fixture
def gcs_bucket(request, gcs: storage.Client) -> storage.Bucket:
"""GCS bucket for test artifacts"""
bucket = gcs.create_bucket(f"test_gcs_ocn_bq_ingest_{str(uuid.uuid4())}")
bucket.versioning_enabled = True
bucket.patch()
# overide default field delimiter at bucket level
load_config_json = {
"fieldDelimiter": "|",
}
load_json_blob: storage.Blob = bucket.blob("_config/load.json")
load_json_blob.upload_from_string(json.dumps(load_config_json))
def teardown():
# Since bucket has object versioning enabled, you must
# delete all versions of objects before you can delete the bucket.
for blob in gcs.list_blobs(bucket, versions=True):
blob.delete()
bucket.delete(force=True)
request.addfinalizer(teardown)
return bucket
@pytest.fixture
def mock_env(gcs, monkeypatch):
"""
environment variable mocks
All tests use this fixture; it is specified in the
pyest.ini file as:
[pytest]
usefixtures = mock_env
For more information on module-wide fixtures, see:
https://docs.pytest.org/en/stable/fixture.html#use-fixtures-in-classes-and-modules-with-usefixtures
"""
# Infer project from the gcs client application default credentials.
monkeypatch.setenv("GCP_PROJECT", gcs.project)
monkeypatch.setenv("FUNCTION_NAME", "integration-test")
monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540")
monkeypatch.setenv("BQ_PROJECT", gcs.project)
@pytest.fixture
def ordered_mock_env(monkeypatch):
"""environment variable mocks"""
monkeypatch.setenv("ORDER_PER_TABLE", "TRUE")
@pytest.fixture
def dest_dataset(request, bq, monkeypatch):
random_dataset = (f"test_bq_ingest_gcf_"
f"{str(uuid.uuid4())[:8].replace('-', '_')}")
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}"
f".{random_dataset}")
dataset.location = "US"
bq.create_dataset(dataset)
print(f"created dataset {dataset.dataset_id}")
def teardown():
bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return dataset
@pytest.fixture
def dest_table(monkeypatch, request, bq, dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table = bq.create_table(
bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nation_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
))
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
@pytest.fixture
def gcs_data(gcs_bucket, dest_dataset, dest_table) -> storage.Blob:
data_objs: List[storage.Blob] = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_data_under_sub_dirs(gcs_bucket, dest_dataset,
dest_table) -> storage.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "foo", "bar", "baz", test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_truncating_load_config(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
config_objs: List[storage.Blob] = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_table.table_id,
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({"writeDisposition": "WRITE_TRUNCATE"}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_batched_data(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
"""
upload two batches of data
"""
data_objs: List[storage.Blob] = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_external_config(gcs_bucket, dest_dataset,
dest_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_config(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable specified is used
to load data.
"""
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"writeDisposition":
"WRITE_TRUNCATE",
"fieldDelimiter":
"|",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"\$?(?P<yyyy>[\d]{4})/?" # partition year (yyyy) (optional)
r"(?P<mm>[\d]{2})?/?" # partition month (mm) (optional)
r"(?P<dd>[\d]{2})?/?" # partition day (dd) (optional)
r"(?P<hh>[\d]{2})?/?" # partition hour (hh) (optional)
)
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable specified is used
to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)" # ignore everything leading up to partition
r"(?:[\d]{4})?/?"
r"(?:[\d]{2})?/?"
r"(?:[\d]{2})?/?"
r"(?P<batch>[\d]{2})/?" # batch
)
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": destination_regex,
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config_hive_partitioned(
gcs_bucket, dest_dataset,
dest_hive_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable and destinationRegex
specified is used to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_hive_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"(?P<yyyy>[\d]{4})/"
r"(?P<mm>[\d]{2})/"
r"(?P<dd>[\d]{2})/"
r"(?P<hh>[\d]{2})/"
# r"^(?:[\w\-_0-9]+)/(?P<dataset>[\w\-_0-9\.]+)/"
# r"(?P<table>[\w\-_0-9]+)/?"
# r"(?:incremental|history)?/?"
# r"(?:[0-9]{4})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?:[0-9]{2})?/?"
# r"(?P<batch>[0-9]+)/?"
)
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_hive_partitioned_table.project,
"datasetId": dest_hive_partitioned_table.dataset_id,
"tableId": dest_hive_partitioned_table.table_id
},
"destinationRegex": destination_regex,
"dataSourceName": "some-onprem-data-source"
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_destination_parquet_config_partitioned_alternate(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
"""
This tests that a load.json file with destinationTable and destinationRegex
specified is used to load data.
:param gcs_bucket:
:param dest_dataset:
:param dest_hive_partitioned_table:
:return:
"""
destination_regex = (
r"(?P<table>.*?)/" # ignore everything leading up to partition
r"year=(?P<yyyy>[\d]{4})/"
r"month=(?P<mm>[\d]{1,2})/"
r"day=(?P<dd>[\d]{1,2})/"
r"hr=(?P<hh>[\d]{1,2})/")
config_objs = []
config_obj: storage.Blob = gcs_bucket.blob("/".join([
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"destinationTable": {
"projectId": dest_partitioned_table.project,
"datasetId": dest_partitioned_table.dataset_id,
"tableId": dest_partitioned_table.table_id
},
"destinationRegex": destination_regex,
"dataSourceName": "some-onprem-data-source"
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_partitioned_data(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id,
partition, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
dot_blob: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, partition,
".file_that_starts_with_dot"
]))
dot_blob.upload_from_string("")
data_objs.append(dot_blob)
return data_objs
@pytest.fixture
def gcs_partitioned_data_allow_jagged(
gcs_bucket, dest_dataset,
dest_partitioned_table_allow_jagged) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv.gz", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table_allow_jagged.table_id, partition,
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_partitioned_parquet_data(gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet",
"_SUCCESS"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join(
[partition, test_file]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_data(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # hour
"hive_part_column=9999",
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_parquet_data(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # batch
"hive_part_column=9999",
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
# Add _SUCCESS file under the hour partition folder
data_obj = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
partition[1:5], # year
partition[5:7], # month
partition[7:9], # day
partition[9:], # batch
"_SUCCESS"
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, "_SUCCESS"))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def gcs_split_path_partitioned_parquet_data_alternate(
gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in [
"nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet"
]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
f"year={partition[1:5]}", # year
f"month={partition[5:7]}", # month
f"day={partition[7:9]}", # day
f"hr={partition[9:]}", # batch
test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
# Add _SUCCESS file under the hour partition folder
data_obj = gcs_bucket.blob("/".join([
"foo",
"bar",
"baz",
f"year={partition[1:5]}", # year
f"month={partition[5:7]}", # month
f"day={partition[7:9]}", # day
f"hr={partition[9:]}", # batch
"_SUCCESS"
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, "_SUCCESS"))
data_objs.append(data_obj)
return data_objs
@pytest.fixture
def dest_partitioned_table(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_hive_partitioned_table(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
schema.append(bigquery.SchemaField('hive_part_column', 'INT64'))
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_partitioned_table_allow_jagged(bq: bigquery.Client, dest_dataset,
monkeypatch) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
if os.getenv('GCP_PROJECT') is None:
monkeypatch.setenv("GCP_PROJECT", bq.project)
extra_field_for_jagged_row_test = bigquery.schema.SchemaField(
"extra_jagged_row_test_column", "STRING")
schema.append(extra_field_for_jagged_row_test)
table: bigquery.Table = bigquery.Table(
f"{os.getenv('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
return table
@pytest.fixture
def dest_ordered_update_table(gcs, gcs_bucket, bq,
dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table: bigquery.Table = bigquery.Table(
f"{dest_dataset.project}.{dest_dataset.dataset_id}"
f".cf_test_ordering_{str(uuid.uuid4()).replace('-', '_')}",
schema=schema,
)
table = bq.create_table(table)
# Our test query only updates on a single row so we need to populate
# original row.
# This can be used to simulate an existing _bqlock from a prior run of the
# subscriber loop with a job that has succeeded.
job: bigquery.LoadJob = bq.load_table_from_json(
[{
"id": 1,
"alpha_update": ""
}],
table,
job_id_prefix=gcs_ocn_bq_ingest.common.constants.DEFAULT_JOB_PREFIX)
# The subscriber will be responsible for cleaning up this file.
bqlock_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}", table.table_id,
"_bqlock"
]))
bqlock_obj.upload_from_string(
json.dumps(dict(job_id=job.job_id,
table=table.reference.to_api_repr())))
return table
@pytest.fixture
def gcs_ordered_update_data(gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.Blob]:
data_objs = []
older_success_blob: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "00", "_SUCCESS"
]))
older_success_blob.upload_from_string("")
data_objs.append(older_success_blob)
chunks = {
"01",
"02",
"03",
}
for chunk in chunks:
for test_file in ["data.csv", "_SUCCESS"]:
data_obj: storage.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, chunk, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "ordering",
chunk, test_file))
data_objs.append(data_obj)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_backlog(gcs, gcs_bucket, gcs_ordered_update_data) -> List[storage.Blob]:
data_objs = []
# We will deal with the last incremental in the test itself to test the
# behavior of a new backlog subscriber.
for success_blob in gcs_ordered_update_data:
gcs_ocn_bq_ingest.common.ordering.backlog_publisher(gcs, success_blob)
backlog_blob = gcs_ocn_bq_ingest.common.ordering.success_blob_to_backlog_blob(
gcs, success_blob)
backlog_blob.upload_from_string("")
data_objs.append(backlog_blob)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_external_update_config(gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = """
UPDATE {dest_dataset}.{dest_table} dest
SET alpha_update = CONCAT(dest.alpha_update, src.alpha_update)
FROM temp_ext src
WHERE dest.id = src.id
"""
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
backfill_blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
gcs_ocn_bq_ingest.common.constants.BACKFILL_FILENAME
]))
backfill_blob.upload_from_string("")
config_objs.append(sql_obj)
config_objs.append(config_obj)
config_objs.append(backfill_blob)
return config_objs
@pytest.fixture
def gcs_external_partitioned_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_objs.append(sql_obj)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, "_config",
"external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
config = {
"schema": public_table.to_api_repr()['schema'],
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_partitioned_config_allow_jagged(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table_allow_jagged) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table_allow_jagged.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_objs.append(sql_obj)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table_allow_jagged.table_id,
"_config", "external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
extra_field_for_jagged_row_test = bigquery.schema.SchemaField(
"extra_jagged_row_test_column", "STRING")
jagged_schema = public_table.schema + [extra_field_for_jagged_row_test]
config = {
"schema": {
"fields": [
schema_field.to_api_repr() for schema_field in jagged_schema
]
},
"compression": "GZIP",
"csvOptions": {
"allowJaggedRows": True,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_partitioned_parquet_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
# Upload SQL query used to load table
sql_obj = gcs_bucket.blob("/".join([
"_config",
"bq_transform.sql",
]))
sql_obj.upload_from_string("INSERT {dest_dataset}.{dest_table} "
"SELECT * FROM temp_ext;")
config_objs.append(sql_obj)
# Upload external table definition
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration
config_obj = gcs_bucket.blob("/".join(["_config", "external.json"]))
config_obj.upload_from_string(json.dumps({"sourceFormat": "PARQUET"}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def gcs_external_hive_partitioned_parquet_config(
bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.Blob]:
config_objs = []
# Upload SQL query used to load table
sql_obj = gcs_bucket.blob("/".join([
"_config",
"bq_transform.sql",
]))
sql_obj.upload_from_string("INSERT {dest_dataset}.{dest_table} "
"SELECT * FROM temp_ext;")
config_objs.append(sql_obj)
# Upload external table definition
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration
config_obj = gcs_bucket.blob("/".join(["_config", "external.json"]))
config_obj.upload_from_string(
json.dumps({
"sourceFormat": "PARQUET",
"hivePartitioningOptions": {
"mode": "AUTO"
}
}))
config_objs.append(config_obj)
return config_objs
@pytest.fixture
def no_use_error_reporting(monkeypatch):
monkeypatch.setenv("USE_ERROR_REPORTING_API", "False")
@pytest.fixture
def gcs_external_config_bad_statement(
gcs_bucket, dest_dataset, dest_table,
no_use_error_reporting) -> List[storage.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = ("Woops this isn't valid SQL;\n"
"INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;")
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
return config_objs | 0.543106 | 0.248278 |
import datetime
import collections
import os
import unittest
import pickle
import numpy as np
import xarray as xr
import pyinterp.backends.xarray
import pyinterp
class Degraded(unittest.TestCase):
def test_axis_identifier(self):
ident = pyinterp.backends.xarray.AxisIdentifier(xr.DataArray())
self.assertTrue(ident.longitude() is None)
self.assertTrue(ident.latitude() is None)
def test_dims_from_data_array(self):
array = xr.DataArray()
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 1)
array = xr.DataArray(data=np.zeros((2, 2), dtype="float64"))
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 2)
array = xr.DataArray(data=np.zeros((2, 2), dtype="float64"),
coords=[("lon", xr.DataArray(data=np.arange(2)),
dict(units="degrees_east")),
("lat", xr.DataArray(data=np.arange(2)))],
dims=['lon', 'lat'])
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 2)
class Grid2D(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"mss.nc")
def test_biavariate(self):
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid2D)
self.assertIsInstance(grid, pyinterp.Grid2D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid2D)
self.assertIsInstance(grid, pyinterp.Grid2D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
z = grid.bivariate(
collections.OrderedDict(lon=x.flatten(), lat=y.flatten()))
self.assertIsInstance(z, np.ndarray)
z = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="nearest")
self.assertIsInstance(z, np.ndarray)
z = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="inverse_distance_weighting")
self.assertIsInstance(z, np.ndarray)
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss,
geodetic=False)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid2D)
w = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="inverse_distance_weighting")
self.assertNotEqual(
np.ma.fix_invalid(z).mean(),
np.ma.fix_invalid(w).mean())
with self.assertRaises(TypeError):
grid.bivariate((x.flatten(), y.flatten()))
with self.assertRaises(IndexError):
grid.bivariate(
collections.OrderedDict(lon=x.flatten(),
lat=y.flatten(),
time=np.arange(3)))
with self.assertRaises(IndexError):
grid.bivariate(
collections.OrderedDict(longitude=x.flatten(),
lat=y.flatten()))
with self.assertRaises(ValueError):
grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True)
lon = pyinterp.Axis(np.linspace(0, 360, 100), is_circle=True)
lat = pyinterp.Axis(np.linspace(-80, 80, 50), is_circle=False)
array, _ = np.meshgrid(lon[:], lat[:])
with self.assertRaises(ValueError):
pyinterp.Grid2D(lon, lat, array)
grid = pyinterp.Grid2D(lon, lat, array.T)
self.assertIsInstance(grid, pyinterp.Grid2D)
self.assertIsInstance(str(grid), str)
with self.assertRaises(ValueError):
pyinterp.Grid2D(lon, lat, array, increasing_axes='_')
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).mss)
z = grid(collections.OrderedDict(lon=x.flatten(), lat=y.flatten()),
method="bilinear")
self.assertIsInstance(z, np.ndarray)
def test_bicubic(self):
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
z = grid.bicubic(
collections.OrderedDict(lon=x.flatten(), lat=y.flatten()))
self.assertIsInstance(z, np.ndarray)
for fitting_model in [
'linear', 'polynomial', 'c_spline', 'c_spline_periodic',
'akima', 'akima_periodic', 'steffen'
]:
other = grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
fitting_model=fitting_model)
self.assertNotEqual((z - other).mean(), 0)
with self.assertRaises(ValueError):
grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True)
with self.assertRaises(ValueError):
grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True,
boundary="sym")
x_axis = pyinterp.Axis(np.linspace(-180, 179, 360), is_circle=True)
y_axis = pyinterp.Axis(np.linspace(-90, 90, 181), is_circle=False)
z_axis = pyinterp.Axis(np.linspace(0, 10, 10), is_circle=False)
matrix, _ = np.meshgrid(x_axis[:], y_axis[:])
grid = pyinterp.Grid2D(x_axis, y_axis, matrix.T)
self.assertIsInstance(grid, pyinterp.Grid2D)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten(), fitting_model='_')
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten(), boundary='_')
grid = pyinterp.Grid2D(x_axis.flip(inplace=False), y_axis, matrix.T)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
grid = pyinterp.Grid2D(x_axis, y_axis.flip(), matrix.T)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
matrix, _, _ = np.meshgrid(x_axis[:], y_axis[:], z_axis[:])
grid = pyinterp.Grid3D(x_axis, y_axis, z_axis,
matrix.transpose(1, 0, 2))
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).mss)
self.assertEqual(grid.ndim, 2)
self.assertTrue(isinstance(grid.grid, pyinterp.backends.xarray.Grid2D))
z = grid(collections.OrderedDict(lon=x.flatten(), lat=y.flatten()),
method="bicubic",
bicubic_kwargs=dict(nx=3, ny=3))
self.assertIsInstance(z, np.ndarray)
class Trivariate(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"tcw.nc")
def test(self):
grid = pyinterp.backends.xarray.Grid3D(xr.load_dataset(self.GRID).tcw,
increasing_axes=True)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid3D)
self.assertIsInstance(grid, pyinterp.Grid3D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid3D)
self.assertIsInstance(grid, pyinterp.Grid3D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.z, pyinterp.TemporalAxis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
time = np.array([datetime.datetime(2002, 7, 2, 15, 0)],
dtype="datetime64")
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
z = grid.trivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
z = grid.bicubic(
collections.OrderedDict(longitude=x.flatten()[1:2],
latitude=y.flatten()[1:2],
time=t.flatten()[1:2]))
self.assertIsInstance(z, np.ndarray)
with self.assertRaises(ValueError):
time = np.array([datetime.datetime(2012, 7, 2, 15, 0)],
dtype="datetime64")
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
grid.trivariate(collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()),
bounds_error=True)
array = xr.load_dataset(self.GRID).tcw
array.time.values = array.time.values.astype("float64")
grid = pyinterp.backends.xarray.Grid3D(array, increasing_axes=True)
x, y, t = np.meshgrid(lon, lat, time.astype("float64"), indexing="ij")
z = grid.trivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).tcw, increasing_axes=True)
self.assertEqual(grid.ndim, 3)
self.assertTrue(isinstance(grid.grid, pyinterp.backends.xarray.Grid3D))
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
z = grid(
dict(longitude=x.flatten(), latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
class Quadrivariate(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"pres_temp_4D.nc")
def test(self):
grid = pyinterp.backends.xarray.Grid4D(xr.load_dataset(
self.GRID).pressure,
increasing_axes=True)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid4D)
self.assertIsInstance(grid, pyinterp.Grid4D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid4D)
self.assertIsInstance(grid, pyinterp.Grid4D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.z, pyinterp.TemporalAxis)
self.assertIsInstance(grid.u, pyinterp.Axis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-125, -70, 0.25)
lat = np.arange(-25, 50, 0.25)
level = 0.5
time = np.datetime64('2000-01-01T12:00')
x, y, z, t = np.meshgrid(lon, lat, level, time, indexing="ij")
pressure = grid.quadrivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
level=z.flatten(),
time=t.flatten()))
self.assertIsInstance(pressure, np.ndarray)
with self.assertRaises(ValueError):
time = 5
x, y, t = np.meshgrid(lon, lat, level, time, indexing="ij")
pressure = grid.quadrivariate(collections.OrderedDict(
longitude=x.flatten(),
latitude=y.flatten(),
level=z.flatten(),
time=t.flatten()),
bounds_error=True)
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).pressure, increasing_axes=True)
self.assertEqual(grid.ndim, 4)
self.assertTrue(isinstance(grid.grid,
pyinterp.backends.xarray.Grid4D))
class TestRTree(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"mss.nc")
def init(self, dtype):
lon = np.arange(-180, 180, 10, dtype=dtype)
lat = np.arange(-90, 90, 10, dtype=dtype)
lon, lat = np.meshgrid(lon, lat)
data = lon * 0
mesh = pyinterp.RTree(dtype=dtype)
self.assertIsInstance(mesh, pyinterp.RTree)
self.assertEqual(len(mesh), 0)
self.assertFalse(bool(mesh))
mesh.packing(
np.vstack((lon.flatten(), lat.flatten())).T, data.flatten())
self.assertEqual(len(mesh), len(lon.flatten()))
self.assertTrue(bool(mesh))
(x_min, y_min, z_min), (x_max, y_max, z_max) = mesh.bounds()
self.assertEqual(x_min, -180)
self.assertEqual(y_min, -90.0)
self.assertEqual(x_max, 180.0)
self.assertEqual(y_max, 80)
self.assertAlmostEqual(z_min,
0,
delta=1e-6 if dtype == np.float64 else 0.5)
self.assertAlmostEqual(z_max,
0,
delta=1e-6 if dtype == np.float64 else 0.5)
mesh.clear()
self.assertEqual(len(mesh), 0)
self.assertFalse(bool(mesh))
mesh.insert(
np.vstack((lon.flatten(), lat.flatten())).T, data.flatten())
self.assertEqual(len(mesh), len(lon.flatten()))
self.assertIsInstance(pickle.loads(pickle.dumps(mesh)), pyinterp.RTree)
def test_init(self):
self.init(dtype=np.float32)
self.init(dtype=np.float64)
with self.assertRaises(ValueError):
self.init(np.int8)
with self.assertRaises(ValueError):
mesh = pyinterp.RTree()
mesh.__setstate__((1, ))
def load_data(self):
ds = xr.load_dataset(self.GRID)
z = ds.mss.T
x, y = np.meshgrid(ds.lon.values, ds.lat.values, indexing='ij')
mesh = pyinterp.RTree()
mesh.packing(
np.vstack((x.flatten(), y.flatten())).T, z.values.flatten())
return mesh
def test_interpolate(self):
mesh = self.load_data()
lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0
lat = np.arange(-90, 90, 1 / 3.0) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
coordinates = np.vstack((x.flatten(), y.flatten())).T
mesh.query(coordinates)
mesh.inverse_distance_weighting(coordinates)
mesh.radial_basis_function(coordinates)
if __name__ == "__main__":
unittest.main() | tests/test_interpolator.py | import datetime
import collections
import os
import unittest
import pickle
import numpy as np
import xarray as xr
import pyinterp.backends.xarray
import pyinterp
class Degraded(unittest.TestCase):
def test_axis_identifier(self):
ident = pyinterp.backends.xarray.AxisIdentifier(xr.DataArray())
self.assertTrue(ident.longitude() is None)
self.assertTrue(ident.latitude() is None)
def test_dims_from_data_array(self):
array = xr.DataArray()
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 1)
array = xr.DataArray(data=np.zeros((2, 2), dtype="float64"))
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 2)
array = xr.DataArray(data=np.zeros((2, 2), dtype="float64"),
coords=[("lon", xr.DataArray(data=np.arange(2)),
dict(units="degrees_east")),
("lat", xr.DataArray(data=np.arange(2)))],
dims=['lon', 'lat'])
with self.assertRaises(ValueError):
pyinterp.backends.xarray._dims_from_data_array(array, True, 2)
class Grid2D(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"mss.nc")
def test_biavariate(self):
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid2D)
self.assertIsInstance(grid, pyinterp.Grid2D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid2D)
self.assertIsInstance(grid, pyinterp.Grid2D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
z = grid.bivariate(
collections.OrderedDict(lon=x.flatten(), lat=y.flatten()))
self.assertIsInstance(z, np.ndarray)
z = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="nearest")
self.assertIsInstance(z, np.ndarray)
z = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="inverse_distance_weighting")
self.assertIsInstance(z, np.ndarray)
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss,
geodetic=False)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid2D)
w = grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
interpolator="inverse_distance_weighting")
self.assertNotEqual(
np.ma.fix_invalid(z).mean(),
np.ma.fix_invalid(w).mean())
with self.assertRaises(TypeError):
grid.bivariate((x.flatten(), y.flatten()))
with self.assertRaises(IndexError):
grid.bivariate(
collections.OrderedDict(lon=x.flatten(),
lat=y.flatten(),
time=np.arange(3)))
with self.assertRaises(IndexError):
grid.bivariate(
collections.OrderedDict(longitude=x.flatten(),
lat=y.flatten()))
with self.assertRaises(ValueError):
grid.bivariate(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True)
lon = pyinterp.Axis(np.linspace(0, 360, 100), is_circle=True)
lat = pyinterp.Axis(np.linspace(-80, 80, 50), is_circle=False)
array, _ = np.meshgrid(lon[:], lat[:])
with self.assertRaises(ValueError):
pyinterp.Grid2D(lon, lat, array)
grid = pyinterp.Grid2D(lon, lat, array.T)
self.assertIsInstance(grid, pyinterp.Grid2D)
self.assertIsInstance(str(grid), str)
with self.assertRaises(ValueError):
pyinterp.Grid2D(lon, lat, array, increasing_axes='_')
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).mss)
z = grid(collections.OrderedDict(lon=x.flatten(), lat=y.flatten()),
method="bilinear")
self.assertIsInstance(z, np.ndarray)
def test_bicubic(self):
grid = pyinterp.backends.xarray.Grid2D(xr.load_dataset(self.GRID).mss)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
z = grid.bicubic(
collections.OrderedDict(lon=x.flatten(), lat=y.flatten()))
self.assertIsInstance(z, np.ndarray)
for fitting_model in [
'linear', 'polynomial', 'c_spline', 'c_spline_periodic',
'akima', 'akima_periodic', 'steffen'
]:
other = grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
fitting_model=fitting_model)
self.assertNotEqual((z - other).mean(), 0)
with self.assertRaises(ValueError):
grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True)
with self.assertRaises(ValueError):
grid.bicubic(collections.OrderedDict(lon=x.flatten(),
lat=y.flatten()),
bounds_error=True,
boundary="sym")
x_axis = pyinterp.Axis(np.linspace(-180, 179, 360), is_circle=True)
y_axis = pyinterp.Axis(np.linspace(-90, 90, 181), is_circle=False)
z_axis = pyinterp.Axis(np.linspace(0, 10, 10), is_circle=False)
matrix, _ = np.meshgrid(x_axis[:], y_axis[:])
grid = pyinterp.Grid2D(x_axis, y_axis, matrix.T)
self.assertIsInstance(grid, pyinterp.Grid2D)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten(), fitting_model='_')
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten(), boundary='_')
grid = pyinterp.Grid2D(x_axis.flip(inplace=False), y_axis, matrix.T)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
grid = pyinterp.Grid2D(x_axis, y_axis.flip(), matrix.T)
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
matrix, _, _ = np.meshgrid(x_axis[:], y_axis[:], z_axis[:])
grid = pyinterp.Grid3D(x_axis, y_axis, z_axis,
matrix.transpose(1, 0, 2))
with self.assertRaises(ValueError):
pyinterp.bicubic(grid, x.flatten(), y.flatten())
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).mss)
self.assertEqual(grid.ndim, 2)
self.assertTrue(isinstance(grid.grid, pyinterp.backends.xarray.Grid2D))
z = grid(collections.OrderedDict(lon=x.flatten(), lat=y.flatten()),
method="bicubic",
bicubic_kwargs=dict(nx=3, ny=3))
self.assertIsInstance(z, np.ndarray)
class Trivariate(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"tcw.nc")
def test(self):
grid = pyinterp.backends.xarray.Grid3D(xr.load_dataset(self.GRID).tcw,
increasing_axes=True)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid3D)
self.assertIsInstance(grid, pyinterp.Grid3D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid3D)
self.assertIsInstance(grid, pyinterp.Grid3D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.z, pyinterp.TemporalAxis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-180, 180, 1) + 1 / 3.0
lat = np.arange(-90, 90, 1) + 1 / 3.0
time = np.array([datetime.datetime(2002, 7, 2, 15, 0)],
dtype="datetime64")
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
z = grid.trivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
z = grid.bicubic(
collections.OrderedDict(longitude=x.flatten()[1:2],
latitude=y.flatten()[1:2],
time=t.flatten()[1:2]))
self.assertIsInstance(z, np.ndarray)
with self.assertRaises(ValueError):
time = np.array([datetime.datetime(2012, 7, 2, 15, 0)],
dtype="datetime64")
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
grid.trivariate(collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()),
bounds_error=True)
array = xr.load_dataset(self.GRID).tcw
array.time.values = array.time.values.astype("float64")
grid = pyinterp.backends.xarray.Grid3D(array, increasing_axes=True)
x, y, t = np.meshgrid(lon, lat, time.astype("float64"), indexing="ij")
z = grid.trivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).tcw, increasing_axes=True)
self.assertEqual(grid.ndim, 3)
self.assertTrue(isinstance(grid.grid, pyinterp.backends.xarray.Grid3D))
x, y, t = np.meshgrid(lon, lat, time, indexing="ij")
z = grid(
dict(longitude=x.flatten(), latitude=y.flatten(),
time=t.flatten()))
self.assertIsInstance(z, np.ndarray)
class Quadrivariate(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"pres_temp_4D.nc")
def test(self):
grid = pyinterp.backends.xarray.Grid4D(xr.load_dataset(
self.GRID).pressure,
increasing_axes=True)
self.assertIsInstance(grid, pyinterp.backends.xarray.Grid4D)
self.assertIsInstance(grid, pyinterp.Grid4D)
other = pickle.loads(pickle.dumps(grid))
self.assertIsInstance(other, pyinterp.backends.xarray.Grid4D)
self.assertIsInstance(grid, pyinterp.Grid4D)
self.assertIsInstance(grid.x, pyinterp.Axis)
self.assertIsInstance(grid.y, pyinterp.Axis)
self.assertIsInstance(grid.z, pyinterp.TemporalAxis)
self.assertIsInstance(grid.u, pyinterp.Axis)
self.assertIsInstance(grid.array, np.ndarray)
lon = np.arange(-125, -70, 0.25)
lat = np.arange(-25, 50, 0.25)
level = 0.5
time = np.datetime64('2000-01-01T12:00')
x, y, z, t = np.meshgrid(lon, lat, level, time, indexing="ij")
pressure = grid.quadrivariate(
collections.OrderedDict(longitude=x.flatten(),
latitude=y.flatten(),
level=z.flatten(),
time=t.flatten()))
self.assertIsInstance(pressure, np.ndarray)
with self.assertRaises(ValueError):
time = 5
x, y, t = np.meshgrid(lon, lat, level, time, indexing="ij")
pressure = grid.quadrivariate(collections.OrderedDict(
longitude=x.flatten(),
latitude=y.flatten(),
level=z.flatten(),
time=t.flatten()),
bounds_error=True)
grid = pyinterp.backends.xarray.RegularGridInterpolator(
xr.load_dataset(self.GRID).pressure, increasing_axes=True)
self.assertEqual(grid.ndim, 4)
self.assertTrue(isinstance(grid.grid,
pyinterp.backends.xarray.Grid4D))
class TestRTree(unittest.TestCase):
GRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dataset",
"mss.nc")
def init(self, dtype):
lon = np.arange(-180, 180, 10, dtype=dtype)
lat = np.arange(-90, 90, 10, dtype=dtype)
lon, lat = np.meshgrid(lon, lat)
data = lon * 0
mesh = pyinterp.RTree(dtype=dtype)
self.assertIsInstance(mesh, pyinterp.RTree)
self.assertEqual(len(mesh), 0)
self.assertFalse(bool(mesh))
mesh.packing(
np.vstack((lon.flatten(), lat.flatten())).T, data.flatten())
self.assertEqual(len(mesh), len(lon.flatten()))
self.assertTrue(bool(mesh))
(x_min, y_min, z_min), (x_max, y_max, z_max) = mesh.bounds()
self.assertEqual(x_min, -180)
self.assertEqual(y_min, -90.0)
self.assertEqual(x_max, 180.0)
self.assertEqual(y_max, 80)
self.assertAlmostEqual(z_min,
0,
delta=1e-6 if dtype == np.float64 else 0.5)
self.assertAlmostEqual(z_max,
0,
delta=1e-6 if dtype == np.float64 else 0.5)
mesh.clear()
self.assertEqual(len(mesh), 0)
self.assertFalse(bool(mesh))
mesh.insert(
np.vstack((lon.flatten(), lat.flatten())).T, data.flatten())
self.assertEqual(len(mesh), len(lon.flatten()))
self.assertIsInstance(pickle.loads(pickle.dumps(mesh)), pyinterp.RTree)
def test_init(self):
self.init(dtype=np.float32)
self.init(dtype=np.float64)
with self.assertRaises(ValueError):
self.init(np.int8)
with self.assertRaises(ValueError):
mesh = pyinterp.RTree()
mesh.__setstate__((1, ))
def load_data(self):
ds = xr.load_dataset(self.GRID)
z = ds.mss.T
x, y = np.meshgrid(ds.lon.values, ds.lat.values, indexing='ij')
mesh = pyinterp.RTree()
mesh.packing(
np.vstack((x.flatten(), y.flatten())).T, z.values.flatten())
return mesh
def test_interpolate(self):
mesh = self.load_data()
lon = np.arange(-180, 180, 1 / 3.0) + 1 / 3.0
lat = np.arange(-90, 90, 1 / 3.0) + 1 / 3.0
x, y = np.meshgrid(lon, lat, indexing="ij")
coordinates = np.vstack((x.flatten(), y.flatten())).T
mesh.query(coordinates)
mesh.inverse_distance_weighting(coordinates)
mesh.radial_basis_function(coordinates)
if __name__ == "__main__":
unittest.main() | 0.651466 | 0.625667 |
import sys
sys.path.append('../..')
import unittest
import numpy as np
import pandas as pd
from sklearn import linear_model
from ramp.estimators import (Probabilities,
BinaryProbabilities,
wrap_sklearn_like_estimator)
from ramp import shortcuts
from ramp.tests.test_features import make_data
class DummyProbEstimator(object):
def __init__(self, n_clses):
self.n_clses = n_clses
self._coefs = "coefs"
def fit(self, x, y):
pass
def predict_proba(self, x):
return np.zeros((len(x), self.n_clses))
class TestEstimators(unittest.TestCase):
def setUp(self):
self.data = make_data(10)
def test_probabilities(self):
inner_est = DummyProbEstimator(3)
est = wrap_sklearn_like_estimator(inner_est)
# test attr wrap
self.assertEqual(est._coefs, inner_est._coefs)
self.assertRaises(AttributeError, getattr, est, 'nope_not_attr')
preds = est.predict(self.data.values)
self.assertEqual(preds.shape, (10, 3))
def test_binary_probabilities(self):
inner_est = DummyProbEstimator(2)
est = wrap_sklearn_like_estimator(inner_est)
# test attr wrap
self.assertEqual(est._coefs, inner_est._coefs)
preds = est.predict(self.data.values)
self.assertEqual(preds.shape, (10, ))
def test_sklearn_probabilities(self):
# test multi-class
self.data['target'] = [0] * 5 + [1] * 3 + [2] * 2
inner_est = linear_model.LogisticRegression()
est = wrap_sklearn_like_estimator(inner_est)
x = self.data[['a', 'b']]
est.fit(x, self.data.target)
preds = est.predict(x)
self.assertEqual(preds.shape, (10, 3))
# test binary, single output
self.data['target'] = [0] * 5 + [1] * 5
est = BinaryProbabilities(inner_est)
x = self.data[['a', 'b']]
est.fit(x, self.data.target)
preds = est.predict(x)
self.assertEqual(preds.shape, (10, ))
if __name__ == '__main__':
unittest.main() | ramp/tests/test_estimators.py | import sys
sys.path.append('../..')
import unittest
import numpy as np
import pandas as pd
from sklearn import linear_model
from ramp.estimators import (Probabilities,
BinaryProbabilities,
wrap_sklearn_like_estimator)
from ramp import shortcuts
from ramp.tests.test_features import make_data
class DummyProbEstimator(object):
def __init__(self, n_clses):
self.n_clses = n_clses
self._coefs = "coefs"
def fit(self, x, y):
pass
def predict_proba(self, x):
return np.zeros((len(x), self.n_clses))
class TestEstimators(unittest.TestCase):
def setUp(self):
self.data = make_data(10)
def test_probabilities(self):
inner_est = DummyProbEstimator(3)
est = wrap_sklearn_like_estimator(inner_est)
# test attr wrap
self.assertEqual(est._coefs, inner_est._coefs)
self.assertRaises(AttributeError, getattr, est, 'nope_not_attr')
preds = est.predict(self.data.values)
self.assertEqual(preds.shape, (10, 3))
def test_binary_probabilities(self):
inner_est = DummyProbEstimator(2)
est = wrap_sklearn_like_estimator(inner_est)
# test attr wrap
self.assertEqual(est._coefs, inner_est._coefs)
preds = est.predict(self.data.values)
self.assertEqual(preds.shape, (10, ))
def test_sklearn_probabilities(self):
# test multi-class
self.data['target'] = [0] * 5 + [1] * 3 + [2] * 2
inner_est = linear_model.LogisticRegression()
est = wrap_sklearn_like_estimator(inner_est)
x = self.data[['a', 'b']]
est.fit(x, self.data.target)
preds = est.predict(x)
self.assertEqual(preds.shape, (10, 3))
# test binary, single output
self.data['target'] = [0] * 5 + [1] * 5
est = BinaryProbabilities(inner_est)
x = self.data[['a', 'b']]
est.fit(x, self.data.target)
preds = est.predict(x)
self.assertEqual(preds.shape, (10, ))
if __name__ == '__main__':
unittest.main() | 0.562417 | 0.455744 |
import numpy as np
from sympl import (
Prognostic, get_constant, get_numpy_arrays_with_properties,
restore_data_arrays_with_properties
)
import spharm
Re = get_constant('planetary_radius', 'm')
Omega = get_constant('planetary_rotation_rate', 's^-1')
class LinearizedDynamics(Prognostic):
"""
Prescribes vorticity tendency based on the linearized barotropic
vorticity equation.
"""
# INPUT: vortcity (mean & pert), latitude, longitude
input_properties = {
'perturbation_atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vortp',
},
'base_atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vortb',
},
'latitude': {
'dims': ['lat', 'lon'],
'units': 'radians',
}
}
# DIAGS: u (mean & pert), v (mean & pert), and psi (mean & pert)
diagnostic_properties = {
'perturbation_eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'up',
'dims_like' : 'latitude'
},
'base_eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'ub',
'dims_like' : 'latitude'
},
'perturbation_northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'vp',
'dims_like' : 'latitude'
},
'base_northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'vb',
'dims_like' : 'latitude'
},
'perturbation_atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psip',
'dims_like' : 'latitude'
},
'base_atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psib',
'dims_like' : 'latitude'
},
}
# TENDENCIES: vorticity (prime only)
tendency_properties = {
'perturbation_atmosphere_relative_vorticity': {
'units': 's^-2'
}
}
def __init__(self, ntrunc=21):
self._ntrunc = ntrunc
def __call__(self, state):
"""
Gets tendencies and diagnostics from the passed model state.
Copied from sympl develop branch (to-be v0.3.3), ignoring checks.
Args
----
state : dict
A model state dictionary.
Returns
-------
tendencies : dict
A dictionary whose keys are strings indicating
state quantities and values are the time derivative of those
quantities in units/second at the time of the input state.
diagnostics : dict
A dictionary whose keys are strings indicating
state quantities and values are the value of those quantities
at the time of the input state.
Raises
------
KeyError
If a required quantity is missing from the state.
InvalidStateError
If state is not a valid input for the Prognostic instance.
"""
raw_state = get_numpy_arrays_with_properties(state, self.input_properties)
raw_state['time'] = state['time']
raw_tendencies, raw_diagnostics = self.array_call(raw_state)
tendencies = restore_data_arrays_with_properties(
raw_tendencies, self.tendency_properties,
state, self.input_properties)
diagnostics = restore_data_arrays_with_properties(
raw_diagnostics, self.diagnostic_properties,
state, self.input_properties)
return tendencies, diagnostics
def array_call(self, state):
"""
Calculates the vorticity tendency from the current state using
the barotropic vorticity equation.
Args
----
state : dict
A dictionary of numpy arrays containing the model state.
Returns
-------
tendencies : dict
A single-item dictionary containing the vorticity
tendency numpy array.
diagnostics : dict
A dictionary of numpy arrays containing the diagnostics
that were needed to compute the vorticity tendency.
"""
# Get numpy arrays from state
vortp = state['vortp']
vortb = state['vortb']
theta = state['latitude']
# Compute diagnostics (streamfunction) for tendency calculation
# using spherical harmonics
s = spharm.Spharmt(theta.shape[1], theta.shape[0], rsphere=Re,
gridtype='gaussian', legfunc='computed')
vortp_spec = s.grdtospec(vortp, ntrunc=self._ntrunc)
vortb_spec = s.grdtospec(vortb, ntrunc=self._ntrunc)
div_spec = np.zeros(vortb_spec.shape) # Only want NON-DIVERGENT wind
# Get the winds
up, vp = s.getuv(vortp_spec, div_spec)
ub, vb = s.getuv(vortb_spec, div_spec)
# And now the streamfunction
psip, _ = s.getpsichi(up, vp)
psib, _ = s.getpsichi(ub, vb)
# Here we actually compute vorticity tendency from the linearized equation
f = 2 * Omega * np.sin(theta)
beta = s.getgrad(s.grdtospec(f, ntrunc=self._ntrunc))[1]
dvortp_dx = s.getgrad(vortp_spec)[0]
dvortp_dy = s.getgrad(vortp_spec)[1]
dvortb_dx = s.getgrad(vortb_spec)[0]
dvortb_dy = s.getgrad(vortb_spec)[1]
vort_tend = - (vp+vb) * beta - up * dvortb_dx - vp * dvortb_dy - \
ub * dvortp_dx - vb * dvortp_dy
tendencies = {'vortp': vort_tend}
# Collect the diagnostics into a dictionary
diagnostics = {'up': up, 'vp': vp, 'ub': ub, 'vb': vb,
'psip': psip, 'psib': psib}
return tendencies, diagnostics
class NonlinearDynamics(Prognostic):
"""
Prescribes vorticity tendency based on the full barotropic
vorticity equation.
"""
# INPUT: vortcity (mean & pert), latitude, longitude
input_properties = {
'atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vort',
},
'latitude': {
'dims': ['lat', 'lon'],
'units': 'radians',
}
}
# DIAGS: u (mean & pert), v (mean & pert), and psi (mean & pert)
diagnostic_properties = {
'eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'u',
'dims_like': 'latitude'
},
'northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'v',
'dims_like': 'latitude'
},
'atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psi',
'dims_like': 'latitude'
}
}
# TENDENCIES: vorticity (prime only)
tendency_properties = {
'atmosphere_relative_vorticity': {
'units': 's^-2',
'dims_like': 'latitude'
}
}
def __init__(self, ntrunc=21):
self._ntrunc = ntrunc
def __call__(self, state):
"""
Gets tendencies and diagnostics from the passed model state.
Copied from sympl develop branch (to-be v0.3.3), ignoring checks.
Args
----
state : dict
A model state dictionary.
Returns
-------
tendencies : dict
A dictionary whose keys are strings indicating
state quantities and values are the time derivative of those
quantities in units/second at the time of the input state.
diagnostics : dict
A dictionary whose keys are strings indicating
state quantities and values are the value of those quantities
at the time of the input state.
Raises
------
KeyError
If a required quantity is missing from the state.
InvalidStateError
If state is not a valid input for the Prognostic instance.
"""
raw_state = get_numpy_arrays_with_properties(state, self.input_properties)
raw_state['time'] = state['time']
raw_tendencies, raw_diagnostics = self.array_call(raw_state)
tendencies = restore_data_arrays_with_properties(
raw_tendencies, self.tendency_properties,
state, self.input_properties)
# print(state.keys())
# print(raw_state.keys())
# print(raw_diagnostics.keys())
# print(self.diagnostic_properties.keys())
# print(self.input_properties.keys())
# print('-----------')
diagnostics = restore_data_arrays_with_properties(
raw_diagnostics, self.diagnostic_properties,
state, self.input_properties)
return tendencies, diagnostics
def array_call(self, state):
"""
Calculates the vorticity tendency from the current state using
the barotropic vorticity equation.
Args
----
state : dict
A dictionary of numpy arrays containing the model state.
Returns
-------
tendencies : dict
A single-item dictionary containing the vorticity
tendency numpy array.
diagnostics : dict
A dictionary of numpy arrays containing the diagnostics
that were needed to compute the vorticity tendency.
"""
# Get numpy arrays from state
vort = state['vort']
theta = state['latitude']
# Compute diagnostics (streamfunction) for tendency calculation
# using spherical harmonics
s = spharm.Spharmt(theta.shape[1], theta.shape[0], rsphere=Re,
gridtype='gaussian', legfunc='computed')
vort_spec = s.grdtospec(vort, ntrunc=self._ntrunc)
div_spec = np.zeros(vort_spec.shape) # Only want NON-DIVERGENT wind
# Get the winds
u, v = s.getuv(vort_spec, div_spec)
# And now the streamfunction
psi, _ = s.getpsichi(u, v)
# Here we actually compute vorticity tendency
# Compute tendency with beta as only forcing
f = 2 * Omega * np.sin(theta)
beta = s.getgrad(s.grdtospec(f, ntrunc=self._ntrunc))[1]
dvort_dx = s.getgrad(vort_spec)[0]
dvort_dy = s.getgrad(vort_spec)[1]
vort_tend = - v * beta - u * dvort_dx - v * dvort_dy
tendencies = {'vort': vort_tend}
# Collect the diagnostics into a dictionary
diagnostics = {'u': u, 'v': v, 'psi': psi}
return tendencies, diagnostics | barotropy/_core/dynamics.py | import numpy as np
from sympl import (
Prognostic, get_constant, get_numpy_arrays_with_properties,
restore_data_arrays_with_properties
)
import spharm
Re = get_constant('planetary_radius', 'm')
Omega = get_constant('planetary_rotation_rate', 's^-1')
class LinearizedDynamics(Prognostic):
"""
Prescribes vorticity tendency based on the linearized barotropic
vorticity equation.
"""
# INPUT: vortcity (mean & pert), latitude, longitude
input_properties = {
'perturbation_atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vortp',
},
'base_atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vortb',
},
'latitude': {
'dims': ['lat', 'lon'],
'units': 'radians',
}
}
# DIAGS: u (mean & pert), v (mean & pert), and psi (mean & pert)
diagnostic_properties = {
'perturbation_eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'up',
'dims_like' : 'latitude'
},
'base_eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'ub',
'dims_like' : 'latitude'
},
'perturbation_northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'vp',
'dims_like' : 'latitude'
},
'base_northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'vb',
'dims_like' : 'latitude'
},
'perturbation_atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psip',
'dims_like' : 'latitude'
},
'base_atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psib',
'dims_like' : 'latitude'
},
}
# TENDENCIES: vorticity (prime only)
tendency_properties = {
'perturbation_atmosphere_relative_vorticity': {
'units': 's^-2'
}
}
def __init__(self, ntrunc=21):
self._ntrunc = ntrunc
def __call__(self, state):
"""
Gets tendencies and diagnostics from the passed model state.
Copied from sympl develop branch (to-be v0.3.3), ignoring checks.
Args
----
state : dict
A model state dictionary.
Returns
-------
tendencies : dict
A dictionary whose keys are strings indicating
state quantities and values are the time derivative of those
quantities in units/second at the time of the input state.
diagnostics : dict
A dictionary whose keys are strings indicating
state quantities and values are the value of those quantities
at the time of the input state.
Raises
------
KeyError
If a required quantity is missing from the state.
InvalidStateError
If state is not a valid input for the Prognostic instance.
"""
raw_state = get_numpy_arrays_with_properties(state, self.input_properties)
raw_state['time'] = state['time']
raw_tendencies, raw_diagnostics = self.array_call(raw_state)
tendencies = restore_data_arrays_with_properties(
raw_tendencies, self.tendency_properties,
state, self.input_properties)
diagnostics = restore_data_arrays_with_properties(
raw_diagnostics, self.diagnostic_properties,
state, self.input_properties)
return tendencies, diagnostics
def array_call(self, state):
"""
Calculates the vorticity tendency from the current state using
the barotropic vorticity equation.
Args
----
state : dict
A dictionary of numpy arrays containing the model state.
Returns
-------
tendencies : dict
A single-item dictionary containing the vorticity
tendency numpy array.
diagnostics : dict
A dictionary of numpy arrays containing the diagnostics
that were needed to compute the vorticity tendency.
"""
# Get numpy arrays from state
vortp = state['vortp']
vortb = state['vortb']
theta = state['latitude']
# Compute diagnostics (streamfunction) for tendency calculation
# using spherical harmonics
s = spharm.Spharmt(theta.shape[1], theta.shape[0], rsphere=Re,
gridtype='gaussian', legfunc='computed')
vortp_spec = s.grdtospec(vortp, ntrunc=self._ntrunc)
vortb_spec = s.grdtospec(vortb, ntrunc=self._ntrunc)
div_spec = np.zeros(vortb_spec.shape) # Only want NON-DIVERGENT wind
# Get the winds
up, vp = s.getuv(vortp_spec, div_spec)
ub, vb = s.getuv(vortb_spec, div_spec)
# And now the streamfunction
psip, _ = s.getpsichi(up, vp)
psib, _ = s.getpsichi(ub, vb)
# Here we actually compute vorticity tendency from the linearized equation
f = 2 * Omega * np.sin(theta)
beta = s.getgrad(s.grdtospec(f, ntrunc=self._ntrunc))[1]
dvortp_dx = s.getgrad(vortp_spec)[0]
dvortp_dy = s.getgrad(vortp_spec)[1]
dvortb_dx = s.getgrad(vortb_spec)[0]
dvortb_dy = s.getgrad(vortb_spec)[1]
vort_tend = - (vp+vb) * beta - up * dvortb_dx - vp * dvortb_dy - \
ub * dvortp_dx - vb * dvortp_dy
tendencies = {'vortp': vort_tend}
# Collect the diagnostics into a dictionary
diagnostics = {'up': up, 'vp': vp, 'ub': ub, 'vb': vb,
'psip': psip, 'psib': psib}
return tendencies, diagnostics
class NonlinearDynamics(Prognostic):
"""
Prescribes vorticity tendency based on the full barotropic
vorticity equation.
"""
# INPUT: vortcity (mean & pert), latitude, longitude
input_properties = {
'atmosphere_relative_vorticity': {
'dims': ['lat', 'lon'],
'units': 's^-1',
'alias': 'vort',
},
'latitude': {
'dims': ['lat', 'lon'],
'units': 'radians',
}
}
# DIAGS: u (mean & pert), v (mean & pert), and psi (mean & pert)
diagnostic_properties = {
'eastward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'u',
'dims_like': 'latitude'
},
'northward_wind': {
#'dims': ['lat', 'lon'],
'units': 'm s^-1',
'alias': 'v',
'dims_like': 'latitude'
},
'atmosphere_horizontal_streamfunction': {
#'dims': ['lat', 'lon'],
'units': 'm^2 s^-1',
'alias': 'psi',
'dims_like': 'latitude'
}
}
# TENDENCIES: vorticity (prime only)
tendency_properties = {
'atmosphere_relative_vorticity': {
'units': 's^-2',
'dims_like': 'latitude'
}
}
def __init__(self, ntrunc=21):
self._ntrunc = ntrunc
def __call__(self, state):
"""
Gets tendencies and diagnostics from the passed model state.
Copied from sympl develop branch (to-be v0.3.3), ignoring checks.
Args
----
state : dict
A model state dictionary.
Returns
-------
tendencies : dict
A dictionary whose keys are strings indicating
state quantities and values are the time derivative of those
quantities in units/second at the time of the input state.
diagnostics : dict
A dictionary whose keys are strings indicating
state quantities and values are the value of those quantities
at the time of the input state.
Raises
------
KeyError
If a required quantity is missing from the state.
InvalidStateError
If state is not a valid input for the Prognostic instance.
"""
raw_state = get_numpy_arrays_with_properties(state, self.input_properties)
raw_state['time'] = state['time']
raw_tendencies, raw_diagnostics = self.array_call(raw_state)
tendencies = restore_data_arrays_with_properties(
raw_tendencies, self.tendency_properties,
state, self.input_properties)
# print(state.keys())
# print(raw_state.keys())
# print(raw_diagnostics.keys())
# print(self.diagnostic_properties.keys())
# print(self.input_properties.keys())
# print('-----------')
diagnostics = restore_data_arrays_with_properties(
raw_diagnostics, self.diagnostic_properties,
state, self.input_properties)
return tendencies, diagnostics
def array_call(self, state):
"""
Calculates the vorticity tendency from the current state using
the barotropic vorticity equation.
Args
----
state : dict
A dictionary of numpy arrays containing the model state.
Returns
-------
tendencies : dict
A single-item dictionary containing the vorticity
tendency numpy array.
diagnostics : dict
A dictionary of numpy arrays containing the diagnostics
that were needed to compute the vorticity tendency.
"""
# Get numpy arrays from state
vort = state['vort']
theta = state['latitude']
# Compute diagnostics (streamfunction) for tendency calculation
# using spherical harmonics
s = spharm.Spharmt(theta.shape[1], theta.shape[0], rsphere=Re,
gridtype='gaussian', legfunc='computed')
vort_spec = s.grdtospec(vort, ntrunc=self._ntrunc)
div_spec = np.zeros(vort_spec.shape) # Only want NON-DIVERGENT wind
# Get the winds
u, v = s.getuv(vort_spec, div_spec)
# And now the streamfunction
psi, _ = s.getpsichi(u, v)
# Here we actually compute vorticity tendency
# Compute tendency with beta as only forcing
f = 2 * Omega * np.sin(theta)
beta = s.getgrad(s.grdtospec(f, ntrunc=self._ntrunc))[1]
dvort_dx = s.getgrad(vort_spec)[0]
dvort_dy = s.getgrad(vort_spec)[1]
vort_tend = - v * beta - u * dvort_dx - v * dvort_dy
tendencies = {'vort': vort_tend}
# Collect the diagnostics into a dictionary
diagnostics = {'u': u, 'v': v, 'psi': psi}
return tendencies, diagnostics | 0.888964 | 0.533337 |
from msvc.src.features.featurize import featurize, load_extractor
class TestLoadExtractor(object):
def test_lda(self) -> None:
"""test load LinearDiscriminantAnalysis"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
for lda in [
"ocSVM_sgdlinear_LDA1_-_ocSVM_sgdlinear_LDA2_backup",
"spherical-ocLDA_neg",
"spherical-LDA1_-_spherical-LDA2",
]:
assert type(load_extractor(lda)) == LinearDiscriminantAnalysis
def test_ocsvm(self) -> None:
"""test load OneClassSVM"""
from sklearn.svm import OneClassSVM
assert type(load_extractor("LDA-ocSVM_sigmoid_pos")) == OneClassSVM
def test_ocsvm_sgdlinear(self) -> None:
"""test load SGDOneClassSVM"""
from sklearn.linear_model import SGDOneClassSVM
assert type(load_extractor("ocSVM_sgdlinear_neu")) == SGDOneClassSVM
def test_lof(self) -> None:
"""test load LocalOutlierFactor"""
from sklearn.neighbors import LocalOutlierFactor
assert type(load_extractor("LDA-LOF_pos_20")) == LocalOutlierFactor
def test_robust_scaler(self) -> None:
"""test load RobustScaler"""
from sklearn.preprocessing import RobustScaler
assert type(load_extractor("robust_scaler")) == RobustScaler
class TestFeaturize(object):
def test_with_frill(self) -> None:
"""test featurization of FRILL embeddings"""
import pandas as pd
mock_frill_in: pd.DataFrame = pd.read_feather(
"msvc/tests/features/test_df.feather"
)
mock_features_out: pd.DataFrame = pd.read_feather(
"msvc/tests/features/test_features.feather"
)
test_output = featurize(mock_frill_in)
assert all(test_output == mock_features_out)
assert all(test_output.index == mock_features_out.index)
assert all(test_output.columns == mock_features_out.columns) | msvc/tests/features/test_featurize.py |
from msvc.src.features.featurize import featurize, load_extractor
class TestLoadExtractor(object):
def test_lda(self) -> None:
"""test load LinearDiscriminantAnalysis"""
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
for lda in [
"ocSVM_sgdlinear_LDA1_-_ocSVM_sgdlinear_LDA2_backup",
"spherical-ocLDA_neg",
"spherical-LDA1_-_spherical-LDA2",
]:
assert type(load_extractor(lda)) == LinearDiscriminantAnalysis
def test_ocsvm(self) -> None:
"""test load OneClassSVM"""
from sklearn.svm import OneClassSVM
assert type(load_extractor("LDA-ocSVM_sigmoid_pos")) == OneClassSVM
def test_ocsvm_sgdlinear(self) -> None:
"""test load SGDOneClassSVM"""
from sklearn.linear_model import SGDOneClassSVM
assert type(load_extractor("ocSVM_sgdlinear_neu")) == SGDOneClassSVM
def test_lof(self) -> None:
"""test load LocalOutlierFactor"""
from sklearn.neighbors import LocalOutlierFactor
assert type(load_extractor("LDA-LOF_pos_20")) == LocalOutlierFactor
def test_robust_scaler(self) -> None:
"""test load RobustScaler"""
from sklearn.preprocessing import RobustScaler
assert type(load_extractor("robust_scaler")) == RobustScaler
class TestFeaturize(object):
def test_with_frill(self) -> None:
"""test featurization of FRILL embeddings"""
import pandas as pd
mock_frill_in: pd.DataFrame = pd.read_feather(
"msvc/tests/features/test_df.feather"
)
mock_features_out: pd.DataFrame = pd.read_feather(
"msvc/tests/features/test_features.feather"
)
test_output = featurize(mock_frill_in)
assert all(test_output == mock_features_out)
assert all(test_output.index == mock_features_out.index)
assert all(test_output.columns == mock_features_out.columns) | 0.797675 | 0.349366 |
import json
import re
from kobold import compare
def parse_body(content_type, content):
if content_type is None:
return content
if content_type == 'application/json':
return json.loads(content)
elif re.compile('application/json;.*').search(content_type):
return json.loads(content)
else:
return content
def response_matches(expected,
response,
type_compare=None):
'''
Compare two HTTP responses (using kobold.compare).
A type_compare may be provided, but if none is set,
the default is for hashes to be compared using "full" mode,
and for lists to be compared using "ordered" mode.
Headers, however, are supplied an override to ensure that
they are compared in "existing" mode.
The first two arguments are the expected and actual response.
Responses are expected to be a hash, with three keys: status_code,
body and headers. By default, the status_code of the response is
expected to be 200 (ie. success of the request is enforced), and no
headers are enforced.
Body will be a string, unless the actual response has a
Content-Type header of "application/json". In that case,
we parse the JSON and set body to be the parsed data structure.
Remember, by default all keys in the body will be compared.
type_compare can be set to "existing" to change this behavior.
Ultimately, the expected and response hashes are compared using
kobold.compare, and the result is returned. In the case of a match,
the result will be the string "match". In the case of a mismatch,
the result will be a tuple of two elements - the first describing
the mismatched values in the first argument, and the second
describing the mismatched values in the second argument.
'''
if type_compare is None:
type_compare = {
'hash' : 'full',
'ordered' : True}
else:
if isinstance(type_compare, str):
type_compare = {
'hash' : type_compare,
'ordered' : True}
default_expected =\
{'status_code' : 200,
'headers' : {}}
default_expected_headers =\
{'__compare' : 'existing'}
for key, value in default_expected.items():
expected.setdefault(key, value)
for key, value in default_expected_headers.items():
expected['headers'].setdefault(key, value)
content_type = response.headers.get('Content-Type')
actual = {'status_code' : response.status_code,
'headers' : response.headers,
'body' : parse_body(content_type, response.data)}
return compare.compare(expected,
actual,
type_compare) | kobold/response.py | import json
import re
from kobold import compare
def parse_body(content_type, content):
if content_type is None:
return content
if content_type == 'application/json':
return json.loads(content)
elif re.compile('application/json;.*').search(content_type):
return json.loads(content)
else:
return content
def response_matches(expected,
response,
type_compare=None):
'''
Compare two HTTP responses (using kobold.compare).
A type_compare may be provided, but if none is set,
the default is for hashes to be compared using "full" mode,
and for lists to be compared using "ordered" mode.
Headers, however, are supplied an override to ensure that
they are compared in "existing" mode.
The first two arguments are the expected and actual response.
Responses are expected to be a hash, with three keys: status_code,
body and headers. By default, the status_code of the response is
expected to be 200 (ie. success of the request is enforced), and no
headers are enforced.
Body will be a string, unless the actual response has a
Content-Type header of "application/json". In that case,
we parse the JSON and set body to be the parsed data structure.
Remember, by default all keys in the body will be compared.
type_compare can be set to "existing" to change this behavior.
Ultimately, the expected and response hashes are compared using
kobold.compare, and the result is returned. In the case of a match,
the result will be the string "match". In the case of a mismatch,
the result will be a tuple of two elements - the first describing
the mismatched values in the first argument, and the second
describing the mismatched values in the second argument.
'''
if type_compare is None:
type_compare = {
'hash' : 'full',
'ordered' : True}
else:
if isinstance(type_compare, str):
type_compare = {
'hash' : type_compare,
'ordered' : True}
default_expected =\
{'status_code' : 200,
'headers' : {}}
default_expected_headers =\
{'__compare' : 'existing'}
for key, value in default_expected.items():
expected.setdefault(key, value)
for key, value in default_expected_headers.items():
expected['headers'].setdefault(key, value)
content_type = response.headers.get('Content-Type')
actual = {'status_code' : response.status_code,
'headers' : response.headers,
'body' : parse_body(content_type, response.data)}
return compare.compare(expected,
actual,
type_compare) | 0.561455 | 0.319639 |
import os
import unittest
import warnings
from nose.tools import eq_, ok_
from ddtrace.utils.deprecation import deprecation, deprecated, format_message
from ddtrace.utils.formats import asbool, get_env
class TestUtilities(unittest.TestCase):
def test_asbool(self):
# ensure the value is properly cast
eq_(asbool("True"), True)
eq_(asbool("true"), True)
eq_(asbool("1"), True)
eq_(asbool("False"), False)
eq_(asbool("false"), False)
eq_(asbool(None), False)
eq_(asbool(""), False)
eq_(asbool(True), True)
eq_(asbool(False), False)
def test_get_env(self):
# ensure `get_env` returns a default value if environment variables
# are not set
value = get_env('django', 'distributed_tracing')
ok_(value is None)
value = get_env('django', 'distributed_tracing', False)
ok_(value is False)
def test_get_env_found(self):
# ensure `get_env` returns a value if the environment variable is set
os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = '1'
value = get_env('requests', 'distributed_tracing')
eq_(value, '1')
def test_get_env_found_legacy(self):
# ensure `get_env` returns a value if legacy environment variables
# are used, raising a Deprecation warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = '1'
value = get_env('requests', 'distributed_tracing')
eq_(value, '1')
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('Use `DD_` prefix instead' in str(w[-1].message))
def test_get_env_key_priority(self):
# ensure `get_env` use `DD_` with highest priority
os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = 'highest'
os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = 'lowest'
value = get_env('requests', 'distributed_tracing')
eq_(value, 'highest')
def test_deprecation_formatter(self):
# ensure the formatter returns the proper message
msg = format_message(
'deprecated_function',
'use something else instead',
'1.0.0',
)
expected = "'deprecated_function' is deprecated and will be remove in future versions (1.0.0). use something else instead"
eq_(msg, expected)
def test_deprecation(self):
# ensure `deprecation` properly raise a DeprecationWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecation(
name='fn',
message='message',
version='1.0.0'
)
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('message' in str(w[-1].message))
def test_deprecated_decorator(self):
# ensure `deprecated` decorator properly raise a DeprecationWarning
@deprecated('decorator', version='1.0.0')
def fxn():
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fxn()
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('decorator' in str(w[-1].message)) | tests/test_utils.py | import os
import unittest
import warnings
from nose.tools import eq_, ok_
from ddtrace.utils.deprecation import deprecation, deprecated, format_message
from ddtrace.utils.formats import asbool, get_env
class TestUtilities(unittest.TestCase):
def test_asbool(self):
# ensure the value is properly cast
eq_(asbool("True"), True)
eq_(asbool("true"), True)
eq_(asbool("1"), True)
eq_(asbool("False"), False)
eq_(asbool("false"), False)
eq_(asbool(None), False)
eq_(asbool(""), False)
eq_(asbool(True), True)
eq_(asbool(False), False)
def test_get_env(self):
# ensure `get_env` returns a default value if environment variables
# are not set
value = get_env('django', 'distributed_tracing')
ok_(value is None)
value = get_env('django', 'distributed_tracing', False)
ok_(value is False)
def test_get_env_found(self):
# ensure `get_env` returns a value if the environment variable is set
os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = '1'
value = get_env('requests', 'distributed_tracing')
eq_(value, '1')
def test_get_env_found_legacy(self):
# ensure `get_env` returns a value if legacy environment variables
# are used, raising a Deprecation warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = '1'
value = get_env('requests', 'distributed_tracing')
eq_(value, '1')
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('Use `DD_` prefix instead' in str(w[-1].message))
def test_get_env_key_priority(self):
# ensure `get_env` use `DD_` with highest priority
os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = 'highest'
os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = 'lowest'
value = get_env('requests', 'distributed_tracing')
eq_(value, 'highest')
def test_deprecation_formatter(self):
# ensure the formatter returns the proper message
msg = format_message(
'deprecated_function',
'use something else instead',
'1.0.0',
)
expected = "'deprecated_function' is deprecated and will be remove in future versions (1.0.0). use something else instead"
eq_(msg, expected)
def test_deprecation(self):
# ensure `deprecation` properly raise a DeprecationWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
deprecation(
name='fn',
message='message',
version='1.0.0'
)
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('message' in str(w[-1].message))
def test_deprecated_decorator(self):
# ensure `deprecated` decorator properly raise a DeprecationWarning
@deprecated('decorator', version='1.0.0')
def fxn():
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fxn()
ok_(len(w) == 1)
ok_(issubclass(w[-1].category, DeprecationWarning))
ok_('decorator' in str(w[-1].message)) | 0.613237 | 0.242935 |
import datetime
from sqlalchemy import Boolean, Column, Integer, String, Table, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import DateTime
from ..database import mapper_registry
from app.entities.case import Case, DocketEntry, DistrictCase, AppellateCase
cases_table = Table(
'cases',
mapper_registry.metadata,
Column('id', Integer, primary_key=True, index=True),
Column('title', String, nullable=False),
Column('date_filed', DateTime),
Column('sealed', Boolean, default=False),
Column('type', String),
Column('court', String),
Column('status', String, nullable=True),
Column('original_case_id', Integer),
Column('reviewed', Boolean, default=False),
Column('remanded', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
docket_entry_table = Table(
"docket_entries",
mapper_registry.metadata,
Column('id', Integer, nullable=False, primary_key=True),
Column('case_id', Integer, ForeignKey('cases.id'), nullable=False),
Column('sequence_no', Integer, nullable=False),
Column('court', String),
Column('text', String, nullable=False),
Column('date_filed', DateTime),
Column('entry_type', String, nullable=False),
Column('sealed', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
def run_mappers():
mapper_registry.map_imperatively(DocketEntry, docket_entry_table)
mapper_registry.map_imperatively(
Case,
cases_table,
polymorphic_on=cases_table.c.type,
polymorphic_identity="case",
properties={
'docket_entries': relationship(
DocketEntry,
order_by="asc(DocketEntry.sequence_no)"
)
}
)
mapper_registry.map_imperatively(
DistrictCase,
inherits=Case,
polymorphic_identity="district"
)
mapper_registry.map_imperatively(
AppellateCase,
inherits=Case,
polymorphic_identity="appellate"
) | app/data/case/case.py | import datetime
from sqlalchemy import Boolean, Column, Integer, String, Table, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import DateTime
from ..database import mapper_registry
from app.entities.case import Case, DocketEntry, DistrictCase, AppellateCase
cases_table = Table(
'cases',
mapper_registry.metadata,
Column('id', Integer, primary_key=True, index=True),
Column('title', String, nullable=False),
Column('date_filed', DateTime),
Column('sealed', Boolean, default=False),
Column('type', String),
Column('court', String),
Column('status', String, nullable=True),
Column('original_case_id', Integer),
Column('reviewed', Boolean, default=False),
Column('remanded', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
docket_entry_table = Table(
"docket_entries",
mapper_registry.metadata,
Column('id', Integer, nullable=False, primary_key=True),
Column('case_id', Integer, ForeignKey('cases.id'), nullable=False),
Column('sequence_no', Integer, nullable=False),
Column('court', String),
Column('text', String, nullable=False),
Column('date_filed', DateTime),
Column('entry_type', String, nullable=False),
Column('sealed', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
def run_mappers():
mapper_registry.map_imperatively(DocketEntry, docket_entry_table)
mapper_registry.map_imperatively(
Case,
cases_table,
polymorphic_on=cases_table.c.type,
polymorphic_identity="case",
properties={
'docket_entries': relationship(
DocketEntry,
order_by="asc(DocketEntry.sequence_no)"
)
}
)
mapper_registry.map_imperatively(
DistrictCase,
inherits=Case,
polymorphic_identity="district"
)
mapper_registry.map_imperatively(
AppellateCase,
inherits=Case,
polymorphic_identity="appellate"
) | 0.458591 | 0.131619 |
import math
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import Dense, Input
def MLP(input_dim, hidden_dim_list, w_init=tf.initializers.Orthogonal(0.2),
activation=tf.nn.relu, *args, **kwargs):
"""Multiple fully-connected layers for approximation
Args:
input_dim (int): size of input tensor
hidden_dim_list (list[int]): a list of dimensions of hidden layers
w_init (callable): initialization method for weights
activation (callable): activation function of hidden layers
Return:
input tensor, output tensor
"""
l = inputs = Input([None, input_dim], name='input_layer')
for i in range(len(hidden_dim_list)):
l = Dense(n_units=hidden_dim_list[i], act=activation, W_init=w_init, name='mlp_layer%d' % (i + 1))(l)
outputs = l
return inputs, outputs
def MLPModel(input_dim, hidden_dim_list, w_init=tf.initializers.Orthogonal(0.2),
activation=tf.nn.relu, *args, **kwargs):
"""Multiple fully-connected layers for approximation
Args:
input_dim (int): size of input tensor
hidden_dim_list (list[int]): a list of dimensions of hidden layers
w_init (callable): initialization method for weights
activation (callable): activation function of hidden layers
Return:
input tensor, output tensor
"""
l = inputs = Input([None, input_dim], name='Input_Layer')
for i in range(len(hidden_dim_list)):
l = Dense(n_units=hidden_dim_list[i], act=activation, W_init=w_init, name='Hidden_Layer%d' % (i + 1))(l)
outputs = l
return tl.models.Model(inputs=inputs, outputs=outputs)
def CNN(input_shape, conv_kwargs=None):
"""Multiple convolutional layers for approximation
Default setting is equal to architecture used in DQN
Args:
input_shape (tuple[int]): (H, W, C)
conv_kwargs (list[param]): list of conv parameters for tl.layers.Conv2d
Return:
input tensor, output tensor
"""
if not conv_kwargs:
in_channels = input_shape[-1]
conv_kwargs = [
{
'in_channels': in_channels, 'n_filter': 32, 'act': tf.nn.relu,
'filter_size': (8, 8), 'strides': (4, 4), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 32, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (4, 4), 'strides': (2, 2), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 64, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (3, 3), 'strides': (1, 1), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
}
]
l = inputs = tl.layers.Input((1,) + input_shape, name='input_layer')
for i, kwargs in enumerate(conv_kwargs):
kwargs['name'] = kwargs.get('name', 'cnn_layer{}'.format(i + 1))
l = tl.layers.Conv2d(**kwargs)(l)
outputs = tl.layers.Flatten(name='flatten_layer')(l)
return inputs, outputs
def CNNModel(input_shape, conv_kwargs=None):
"""Multiple convolutional layers for approximation
Default setting is equal to architecture used in DQN
Args:
input_shape (tuple[int]): (H, W, C)
conv_kwargs (list[param]): list of conv parameters for tl.layers.Conv2d
Return:
tl.model.Model
"""
if not conv_kwargs:
in_channels = input_shape[-1]
conv_kwargs = [
{
'in_channels': in_channels, 'n_filter': 32, 'act': tf.nn.relu,
'filter_size': (8, 8), 'strides': (4, 4), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 32, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (4, 4), 'strides': (2, 2), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 64, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (3, 3), 'strides': (1, 1), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
}
]
ni = tl.layers.Input((1,) + input_shape, name='CNN_Input')
hi = ni
for i, kwargs in enumerate(conv_kwargs):
kwargs['name'] = kwargs.get('name', 'CNN_Layer{}'.format(i + 1))
hi = tl.layers.Conv2d(**kwargs)(hi)
no = tl.layers.Flatten(name='Flatten_Layer')(hi)
return tl.models.Model(inputs=ni, outputs=no) | rlzoo/common/basic_nets.py | import math
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import Dense, Input
def MLP(input_dim, hidden_dim_list, w_init=tf.initializers.Orthogonal(0.2),
activation=tf.nn.relu, *args, **kwargs):
"""Multiple fully-connected layers for approximation
Args:
input_dim (int): size of input tensor
hidden_dim_list (list[int]): a list of dimensions of hidden layers
w_init (callable): initialization method for weights
activation (callable): activation function of hidden layers
Return:
input tensor, output tensor
"""
l = inputs = Input([None, input_dim], name='input_layer')
for i in range(len(hidden_dim_list)):
l = Dense(n_units=hidden_dim_list[i], act=activation, W_init=w_init, name='mlp_layer%d' % (i + 1))(l)
outputs = l
return inputs, outputs
def MLPModel(input_dim, hidden_dim_list, w_init=tf.initializers.Orthogonal(0.2),
activation=tf.nn.relu, *args, **kwargs):
"""Multiple fully-connected layers for approximation
Args:
input_dim (int): size of input tensor
hidden_dim_list (list[int]): a list of dimensions of hidden layers
w_init (callable): initialization method for weights
activation (callable): activation function of hidden layers
Return:
input tensor, output tensor
"""
l = inputs = Input([None, input_dim], name='Input_Layer')
for i in range(len(hidden_dim_list)):
l = Dense(n_units=hidden_dim_list[i], act=activation, W_init=w_init, name='Hidden_Layer%d' % (i + 1))(l)
outputs = l
return tl.models.Model(inputs=inputs, outputs=outputs)
def CNN(input_shape, conv_kwargs=None):
"""Multiple convolutional layers for approximation
Default setting is equal to architecture used in DQN
Args:
input_shape (tuple[int]): (H, W, C)
conv_kwargs (list[param]): list of conv parameters for tl.layers.Conv2d
Return:
input tensor, output tensor
"""
if not conv_kwargs:
in_channels = input_shape[-1]
conv_kwargs = [
{
'in_channels': in_channels, 'n_filter': 32, 'act': tf.nn.relu,
'filter_size': (8, 8), 'strides': (4, 4), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 32, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (4, 4), 'strides': (2, 2), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 64, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (3, 3), 'strides': (1, 1), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
}
]
l = inputs = tl.layers.Input((1,) + input_shape, name='input_layer')
for i, kwargs in enumerate(conv_kwargs):
kwargs['name'] = kwargs.get('name', 'cnn_layer{}'.format(i + 1))
l = tl.layers.Conv2d(**kwargs)(l)
outputs = tl.layers.Flatten(name='flatten_layer')(l)
return inputs, outputs
def CNNModel(input_shape, conv_kwargs=None):
"""Multiple convolutional layers for approximation
Default setting is equal to architecture used in DQN
Args:
input_shape (tuple[int]): (H, W, C)
conv_kwargs (list[param]): list of conv parameters for tl.layers.Conv2d
Return:
tl.model.Model
"""
if not conv_kwargs:
in_channels = input_shape[-1]
conv_kwargs = [
{
'in_channels': in_channels, 'n_filter': 32, 'act': tf.nn.relu,
'filter_size': (8, 8), 'strides': (4, 4), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 32, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (4, 4), 'strides': (2, 2), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
},
{
'in_channels': 64, 'n_filter': 64, 'act': tf.nn.relu,
'filter_size': (3, 3), 'strides': (1, 1), 'padding': 'VALID',
'W_init': tf.initializers.GlorotUniform()
}
]
ni = tl.layers.Input((1,) + input_shape, name='CNN_Input')
hi = ni
for i, kwargs in enumerate(conv_kwargs):
kwargs['name'] = kwargs.get('name', 'CNN_Layer{}'.format(i + 1))
hi = tl.layers.Conv2d(**kwargs)(hi)
no = tl.layers.Flatten(name='Flatten_Layer')(hi)
return tl.models.Model(inputs=ni, outputs=no) | 0.889307 | 0.403508 |
from __future__ import print_function
import os
import io
try:
import httplib2
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient import errors
from apiclient import http
except:
print('Run the following code to install the module: \n \
pip install --upgrade google-api-python-client')
# The following codes and instances are used for the first time authorizing the
# connection and getting the credentials. Put the drive-python-quickstart.json in
# the same folder as the codes and use it instead.
"""
# If modifying these scopes, delete your previously saved credentials
# at ~/drive-python-quickstart.json
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'"""
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
"""credential_dir = os.getcwd()
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials"""
store = Storage('drive-python-quickstart.json')
credentials = store.get()
return credentials
def get_file_id(service, file_name):
"""Search the drive and find the file id of the given file name.
Args:
service: Drive API service instance.
file_name: Name of the file to search.
Returns:
The exclusive id of the file. If there are multiple files with the same name.
return None and pritn error.
"""
results = service.files().list().execute()
items_raw = results.get('items', [])
items = [item for item in items_raw if item.get('title')==file_name]
if not items:
print('\tAn error occurred: No files found.')
return None
elif len(items)>1:
print('\tAn error occurred: Duplicated file names! Please check.')
return None
else:
print('\tGot the file id. Print metadata:')
return items[0].get('id')
def print_file_metadata(service, file_id):
"""Print a file's metadata.
Args:
service: Drive API service instance.
file_id: ID of the file to print metadata for.
"""
try:
file = service.files().get(fileId=file_id).execute()
print ('\t\tTitle: %s' % file['title'])
print ('\t\tMIME type: %s' % file['mimeType'])
except errors.HttpError, error:
print ('\t\tAn error occurred: %s' % error)
def download_file(service, file_id, local_fd):
"""Download a Drive file's content to the local filesystem.
Args:
service: Drive API Service instance.
file_id: ID of the Drive file that will downloaded.
local_fd: io.Base or file object, the stream that the Drive file's
contents will be written to.
"""
request = service.files().get_media(fileId=file_id)
fh=io.FileIO(local_fd, 'wb')
media_request = http.MediaIoBaseDownload(fh, request)
while True:
try:
download_progress, done = media_request.next_chunk()
except errors.HttpError, error:
print ('\tAn error occurred: %s' % error)
return
if download_progress:
print ('\tDownload Progress: %d%%' % int(download_progress.progress() * 100))
if done:
print ('\tDownload Complete')
return
def check_and_update(table_file_name_dict, local_fd=None):
"""Check if the file exist in the (given) data folder path. If not, connect
to the drive and download the correct version.
Args:
table_file_name_dict: A dict of table name and its corresponding file name.
local_fd: Give a path to hold the files. If None, the default path will be
used.
Returns:
The path being used.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v2', http=http)
print('Local data read/write folder path:')
if not local_fd:
local_fd=os.getcwd()+'/data/'
print('\tDefault path: '+local_fd)
else:
if local_fd[len(local_fd)-1] != '/':
local_fd = local_fd + '/'
print('\tCustomed path: '+local_fd)
if not os.path.exists(local_fd):
print('\tCreating the folder...')
os.makedirs(local_fd)
for table_name, file_name in table_file_name_dict.items():
print('\nData: '+table_name+' \nFile: '+file_name)
if not os.path.exists(local_fd+file_name):
print('File does not exist. Searching from drive...')
file_id=get_file_id(service, file_name)
if file_id:
print_file_metadata(service,file_id)
download_file(service, file_id, local_fd+file_name)
else:
print('File already exists.')
return local_fd | connect_to_drive.py | from __future__ import print_function
import os
import io
try:
import httplib2
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
from apiclient import errors
from apiclient import http
except:
print('Run the following code to install the module: \n \
pip install --upgrade google-api-python-client')
# The following codes and instances are used for the first time authorizing the
# connection and getting the credentials. Put the drive-python-quickstart.json in
# the same folder as the codes and use it instead.
"""
# If modifying these scopes, delete your previously saved credentials
# at ~/drive-python-quickstart.json
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'"""
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
"""credential_dir = os.getcwd()
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials"""
store = Storage('drive-python-quickstart.json')
credentials = store.get()
return credentials
def get_file_id(service, file_name):
"""Search the drive and find the file id of the given file name.
Args:
service: Drive API service instance.
file_name: Name of the file to search.
Returns:
The exclusive id of the file. If there are multiple files with the same name.
return None and pritn error.
"""
results = service.files().list().execute()
items_raw = results.get('items', [])
items = [item for item in items_raw if item.get('title')==file_name]
if not items:
print('\tAn error occurred: No files found.')
return None
elif len(items)>1:
print('\tAn error occurred: Duplicated file names! Please check.')
return None
else:
print('\tGot the file id. Print metadata:')
return items[0].get('id')
def print_file_metadata(service, file_id):
"""Print a file's metadata.
Args:
service: Drive API service instance.
file_id: ID of the file to print metadata for.
"""
try:
file = service.files().get(fileId=file_id).execute()
print ('\t\tTitle: %s' % file['title'])
print ('\t\tMIME type: %s' % file['mimeType'])
except errors.HttpError, error:
print ('\t\tAn error occurred: %s' % error)
def download_file(service, file_id, local_fd):
"""Download a Drive file's content to the local filesystem.
Args:
service: Drive API Service instance.
file_id: ID of the Drive file that will downloaded.
local_fd: io.Base or file object, the stream that the Drive file's
contents will be written to.
"""
request = service.files().get_media(fileId=file_id)
fh=io.FileIO(local_fd, 'wb')
media_request = http.MediaIoBaseDownload(fh, request)
while True:
try:
download_progress, done = media_request.next_chunk()
except errors.HttpError, error:
print ('\tAn error occurred: %s' % error)
return
if download_progress:
print ('\tDownload Progress: %d%%' % int(download_progress.progress() * 100))
if done:
print ('\tDownload Complete')
return
def check_and_update(table_file_name_dict, local_fd=None):
"""Check if the file exist in the (given) data folder path. If not, connect
to the drive and download the correct version.
Args:
table_file_name_dict: A dict of table name and its corresponding file name.
local_fd: Give a path to hold the files. If None, the default path will be
used.
Returns:
The path being used.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v2', http=http)
print('Local data read/write folder path:')
if not local_fd:
local_fd=os.getcwd()+'/data/'
print('\tDefault path: '+local_fd)
else:
if local_fd[len(local_fd)-1] != '/':
local_fd = local_fd + '/'
print('\tCustomed path: '+local_fd)
if not os.path.exists(local_fd):
print('\tCreating the folder...')
os.makedirs(local_fd)
for table_name, file_name in table_file_name_dict.items():
print('\nData: '+table_name+' \nFile: '+file_name)
if not os.path.exists(local_fd+file_name):
print('File does not exist. Searching from drive...')
file_id=get_file_id(service, file_name)
if file_id:
print_file_metadata(service,file_id)
download_file(service, file_id, local_fd+file_name)
else:
print('File already exists.')
return local_fd | 0.517815 | 0.146606 |
# A. Distinct values
# 1. Comparing sets
# Countries recorded as countries of death but not as countries of birth. Distinct method will show the unique values in the document
countries = set(db.laureates.distinct("diedCountry")) - set(db.laureates.distinct("bornCountry"))
print(countries)
# 2. Count countries of affiliation
# The number of distinct countries of laureate affiliation for prizes
count = len(db.laureates.distinct("prizes.affiliations.country"))
print(count)
# B. Distinct values given filters
# 1. Born here, went there
# In which countries have USA-born laureates had affiliations for their prizes? The string works as a pipe operation for the distinct method. So
# the filter will work in the second parameter to create the list of filtered values that are then used to perform the distinct operation on the
# first parameter. In this case the unique list of countries will be returned were the prizes were affiliated to
db.laureates.distinct("prizes.affiliations.country", {"bornCountry":"USA"})
# 2. Triple plays
# Save a filter for prize documents with three or more laureates
criteria = {"laureates.2": {"$exists": True}}
# Save the set of distinct prize categories in documents satisfying the criteria
triple_play_categories = set(db.prizes.distinct("category", criteria))
# Confirm literature as the only category not satisfying the criteria.
assert set(db.prizes.distinct("category")) - triple_play_categories == {"literature"}
# C. Filter arrays using distinct values
# 1. Sharing of physics prizes
# $elemMatch: method proveis the option to add multiple filter clauses
db.laureates.count_documents({
"prizes": {"$elemMatch": {
"category": "physics",
"share": {"$ne": "1"},
"year": {"$lt": "1945"}}}})
# 2. Sharing in other categories
# Save a filter for laureates with unshared prizes
unshared = {
"prizes": {"$elemMatch": {
"category": {"$nin": ["physics", "chemistry", "medicine"]},
"share": "1",
"year": {"$gte": "1945"},
}}}
# Save a filter for laureates with shared prizes
shared = {
"prizes": {"$elemMatch": {
"category": {"$nin": ["physics", "chemistry", "medicine"]},
"share": {"$ne": "1"},
"year": {"$gte": "1945"},
}}}
ratio = db.laureates.count_documents(unshared) / db.laureates.count_documents(shared)
print(ratio)
# 3. Organizations and prizes over time
# Save a filter for organization laureates with prizes won before 1945
before = {
"gender": "org",
"prizes.year": {"$lt": "1945"},
}
# Save a filter for organization laureates with prizes won in or after 1945
in_or_after = {
"gender": "org",
"prizes.year": {"$gte": "1945"},
}
n_before = db.laureates.count_documents(before)
n_in_or_after = db.laureates.count_documents(in_or_after)
ratio = n_in_or_after / (n_in_or_after + n_before)
print(ratio)
# D. Distinct as you like
# 1. Searching for the regular expressions
# ^G: This searches for the G at the beginning of the text
db.laureates.count_documents({"firstname": Regex("^G"), "surname": Regex("^S")})
# 2. Searching for strings
from bson.regex import Regex
# Filter for laureates with a "bornCountry" value starting with "Germany"
criteria = {"bornCountry": Regex("^Germany")}
print(set(db.laureates.distinct("bornCountry", criteria)))
# Fill in a string value to be sandwiched between the strings "^Germany " and "now"
criteria = {"bornCountry": Regex("^Germany " + "\\(" + "now")}
print(set(db.laureates.distinct("bornCountry", criteria)))
#Filter for currently-Germany countries of birth. Fill in a string value to be sandwiched between the strings "now" and "$"
criteria = {"bornCountry": Regex("now" + " Germany\\)" + "$")}
print(set(db.laureates.distinct("bornCountry", criteria)))
# 3. The prized transistor
# Save a filter for laureates with prize motivation values containing "transistor" as a substring
criteria = {"prizes.motivation": Regex("transistor")}
# Save the field names corresponding to a laureate's first name and last name
first, last = "firstname", "surname"
print([(laureate[first], laureate[last]) for laureate in db.laureates.find(criteria)]) | Python/MongoDB/distinct-sets.py |
# A. Distinct values
# 1. Comparing sets
# Countries recorded as countries of death but not as countries of birth. Distinct method will show the unique values in the document
countries = set(db.laureates.distinct("diedCountry")) - set(db.laureates.distinct("bornCountry"))
print(countries)
# 2. Count countries of affiliation
# The number of distinct countries of laureate affiliation for prizes
count = len(db.laureates.distinct("prizes.affiliations.country"))
print(count)
# B. Distinct values given filters
# 1. Born here, went there
# In which countries have USA-born laureates had affiliations for their prizes? The string works as a pipe operation for the distinct method. So
# the filter will work in the second parameter to create the list of filtered values that are then used to perform the distinct operation on the
# first parameter. In this case the unique list of countries will be returned were the prizes were affiliated to
db.laureates.distinct("prizes.affiliations.country", {"bornCountry":"USA"})
# 2. Triple plays
# Save a filter for prize documents with three or more laureates
criteria = {"laureates.2": {"$exists": True}}
# Save the set of distinct prize categories in documents satisfying the criteria
triple_play_categories = set(db.prizes.distinct("category", criteria))
# Confirm literature as the only category not satisfying the criteria.
assert set(db.prizes.distinct("category")) - triple_play_categories == {"literature"}
# C. Filter arrays using distinct values
# 1. Sharing of physics prizes
# $elemMatch: method proveis the option to add multiple filter clauses
db.laureates.count_documents({
"prizes": {"$elemMatch": {
"category": "physics",
"share": {"$ne": "1"},
"year": {"$lt": "1945"}}}})
# 2. Sharing in other categories
# Save a filter for laureates with unshared prizes
unshared = {
"prizes": {"$elemMatch": {
"category": {"$nin": ["physics", "chemistry", "medicine"]},
"share": "1",
"year": {"$gte": "1945"},
}}}
# Save a filter for laureates with shared prizes
shared = {
"prizes": {"$elemMatch": {
"category": {"$nin": ["physics", "chemistry", "medicine"]},
"share": {"$ne": "1"},
"year": {"$gte": "1945"},
}}}
ratio = db.laureates.count_documents(unshared) / db.laureates.count_documents(shared)
print(ratio)
# 3. Organizations and prizes over time
# Save a filter for organization laureates with prizes won before 1945
before = {
"gender": "org",
"prizes.year": {"$lt": "1945"},
}
# Save a filter for organization laureates with prizes won in or after 1945
in_or_after = {
"gender": "org",
"prizes.year": {"$gte": "1945"},
}
n_before = db.laureates.count_documents(before)
n_in_or_after = db.laureates.count_documents(in_or_after)
ratio = n_in_or_after / (n_in_or_after + n_before)
print(ratio)
# D. Distinct as you like
# 1. Searching for the regular expressions
# ^G: This searches for the G at the beginning of the text
db.laureates.count_documents({"firstname": Regex("^G"), "surname": Regex("^S")})
# 2. Searching for strings
from bson.regex import Regex
# Filter for laureates with a "bornCountry" value starting with "Germany"
criteria = {"bornCountry": Regex("^Germany")}
print(set(db.laureates.distinct("bornCountry", criteria)))
# Fill in a string value to be sandwiched between the strings "^Germany " and "now"
criteria = {"bornCountry": Regex("^Germany " + "\\(" + "now")}
print(set(db.laureates.distinct("bornCountry", criteria)))
#Filter for currently-Germany countries of birth. Fill in a string value to be sandwiched between the strings "now" and "$"
criteria = {"bornCountry": Regex("now" + " Germany\\)" + "$")}
print(set(db.laureates.distinct("bornCountry", criteria)))
# 3. The prized transistor
# Save a filter for laureates with prize motivation values containing "transistor" as a substring
criteria = {"prizes.motivation": Regex("transistor")}
# Save the field names corresponding to a laureate's first name and last name
first, last = "firstname", "surname"
print([(laureate[first], laureate[last]) for laureate in db.laureates.find(criteria)]) | 0.532668 | 0.766337 |
import argparse
import plistlib
import subprocess
import fileinput
PLIST_PATH = "MVVMKit/Info.plist"
def increment_version(version):
components = str(version).split('.')
init, last = components[:-1], components[-1:]
init.append(str(int(last[0]) + 1))
return ".".join(init)
def get_version():
global PLIST_PATH
plist = plistlib.readPlist(PLIST_PATH)
return plist["CFBundleVersion"]
def set_plist_version(version):
subprocess.check_output(["agvtool", "new-version", "-all", version])
def set_version(version):
old_version = get_version()
set_plist_version(version)
print "Updated version from [%s] to [%s]." \
% (old_version, version)
def set_version_automatically():
current_version = get_version()
next_version = increment_version(current_version)
set_version(next_version)
def git_add_all():
subprocess.check_output(["git", "add", "-A", "."])
def git_commit(message):
subprocess.check_output(["git", "commit", "-m", message])
def git_push():
subprocess.check_output(["git", "push"])
subprocess.check_output(["git", "push", "--tags"])
def git_tag(name, message):
subprocess.check_output(["git", "tag", "-a", name, "-m", message])
def push_version():
version = get_version()
git_add_all()
git_commit("Bumped version %s;" % version)
git_tag(version, "%s tag;" % version)
git_push()
def main():
parser = argparse.ArgumentParser(description='Version bumper')
parser.add_argument("-n", "--new", help="set new version number")
parser.add_argument("-a", "--auto", help="autoincrement version number",
action="store_true")
parser.add_argument("-p", "--push",
help="commit and push changes to repositories",
action="store_true")
args = parser.parse_args()
if args.auto:
set_version_automatically()
elif args.new:
set_version(args.new)
elif args.push:
push_version()
if __name__ == "__main__":
main() | script/version.py |
import argparse
import plistlib
import subprocess
import fileinput
PLIST_PATH = "MVVMKit/Info.plist"
def increment_version(version):
components = str(version).split('.')
init, last = components[:-1], components[-1:]
init.append(str(int(last[0]) + 1))
return ".".join(init)
def get_version():
global PLIST_PATH
plist = plistlib.readPlist(PLIST_PATH)
return plist["CFBundleVersion"]
def set_plist_version(version):
subprocess.check_output(["agvtool", "new-version", "-all", version])
def set_version(version):
old_version = get_version()
set_plist_version(version)
print "Updated version from [%s] to [%s]." \
% (old_version, version)
def set_version_automatically():
current_version = get_version()
next_version = increment_version(current_version)
set_version(next_version)
def git_add_all():
subprocess.check_output(["git", "add", "-A", "."])
def git_commit(message):
subprocess.check_output(["git", "commit", "-m", message])
def git_push():
subprocess.check_output(["git", "push"])
subprocess.check_output(["git", "push", "--tags"])
def git_tag(name, message):
subprocess.check_output(["git", "tag", "-a", name, "-m", message])
def push_version():
version = get_version()
git_add_all()
git_commit("Bumped version %s;" % version)
git_tag(version, "%s tag;" % version)
git_push()
def main():
parser = argparse.ArgumentParser(description='Version bumper')
parser.add_argument("-n", "--new", help="set new version number")
parser.add_argument("-a", "--auto", help="autoincrement version number",
action="store_true")
parser.add_argument("-p", "--push",
help="commit and push changes to repositories",
action="store_true")
args = parser.parse_args()
if args.auto:
set_version_automatically()
elif args.new:
set_version(args.new)
elif args.push:
push_version()
if __name__ == "__main__":
main() | 0.286369 | 0.156427 |
import cmdc
import nose
from nose.plugins.skip import SkipTest
from maya import cmds
from maya.api import OpenMaya
from . import assert_equals, as_obj, as_plug, new_scene
def test_createNode():
return
node = cmds.createNode('transform', name='root')
node_obj = as_obj(node)
null_obj = cmdc.Object()
type_id = cmdc.FnDependencyNode(node_obj).typeId()
for doc, (value, parent) in (
['a valid type name', ('transform', null_obj)],
['a valid type name and parent', ('transform', node_obj)],
['a valid typeId', (cmdc.TypeId(type_id), null_obj)],
['a valid typeId and parent', (cmdc.TypeId(type_id), node_obj)],
):
test_createNode.__doc__ = """Test MDagModifier::createNode if called with {}.""".format(doc)
yield _createNode_pass, value, parent
not_a_dag = as_obj('time1')
not_a_node = as_plug('persp.message').attribute()
type_id = cmdc.FnDependencyNode(as_obj('time1')).typeId()
for doc, (value, parent) in (
['an invalid type name', ('foobar', null_obj)],
['a non-DAG type name', ('network', null_obj)],
['an invalid typeId', (cmdc.TypeId(0xdeadbeef), null_obj)],
['an non-DAG typeId', (cmdc.TypeId(type_id), null_obj)],
['an invalid parent (not a DAG node)', ('transform', not_a_dag)],
['an invalid parent (not a node)', ('transform', not_a_node)],
):
test_createNode.__doc__ = """Test MDagGModifier::createNode raises error if called with {}.""".format(doc)
yield _createNode_fail, value, parent
@nose.with_setup(teardown=new_scene)
def _createNode_fail(value, parent):
old_nodes = cmds.ls(long=True)
nose.tools.assert_raises(
TypeError, _createNode_pass, value, parent
)
new_nodes = cmds.ls(long=True)
assert len(old_nodes) == len(new_nodes), "DagModifier.createNode modified the scene graph."
@nose.with_setup(teardown=new_scene)
def _createNode_pass(value, parent):
old_nodes = cmds.ls(long=True)
mod = cmdc.DagModifier()
node = mod.createNode(value, parent)
mod.doIt()
new_nodes = cmds.ls(long=True)
add_nodes = set(new_nodes) - set(old_nodes)
assert not node.isNull(), "Created node is not valid."
assert len(add_nodes) == 1, "`ls` did not return new node."
def test_reparentNode():
node_a = cmds.createNode('transform')
node_b = cmds.createNode('transform')
node_c = cmds.createNode('transform', parent=node_a)
node_d = cmds.createNode('transform', parent=node_c)
node_obj_a = as_obj(node_a)
node_obj_b = as_obj(node_b)
node_obj_c = as_obj(node_c)
node_obj_d = as_obj(node_d)
null_obj = cmdc.Object()
for doc, (node, new_parent) in (
['a null object (parent to world)', (node_obj_c, null_obj)],
['a valid object', (node_obj_c, node_obj_b)],
):
test_reparentNode.__doc__ = """Test MDagModifier::reparentNode if called with {}.""".format(doc)
yield _reparentNode_pass, node, new_parent
not_a_dag = as_obj('time1')
not_a_node = as_plug('persp.message').attribute()
for exc, doc, (node, new_parent) in (
[TypeError, 'an invalid object (not a DAG node)', (node_obj_c, not_a_dag)],
[TypeError, 'an invalid object (not a node)', (node_obj_c, not_a_node)],
[ValueError, 'the same object', (node_obj_c, node_obj_c)],
[ValueError, 'a parent and one of its children', (node_obj_c, node_obj_d)],
):
test_reparentNode.__doc__ = """Test MDagModifier::reparentNode raises an error if called with {}.""".format(doc)
yield _reparentNode_fail, exc, node, new_parent
def _reparentNode_pass(node, new_parent):
fn_node = cmdc.FnDagNode(node)
old_parent = fn_node.parent(0)
mod = cmdc.DagModifier()
mod.reparentNode(node, new_parent)
mod.doIt()
parent = fn_node.parent(0)
if new_parent.isNull():
assert parent == fn_node.dagRoot(), "DagModifier.reparentNode doIt failed"
else:
assert parent == new_parent, "DagModifier.reparentNode doIt failed"
mod.undoIt()
parent = fn_node.parent(0)
assert parent == old_parent, "DagModifier.reparentNode undo failed"
# Parent the node to world before the next test.
mod = cmdc.DagModifier()
mod.reparentNode(node, old_parent)
mod.doIt()
def _reparentNode_fail(exception, node, new_parent):
nose.tools.assert_raises(
exception,
cmdc.DagModifier().reparentNode,
node, new_parent
) | tests/test_MDagModifier.py | import cmdc
import nose
from nose.plugins.skip import SkipTest
from maya import cmds
from maya.api import OpenMaya
from . import assert_equals, as_obj, as_plug, new_scene
def test_createNode():
return
node = cmds.createNode('transform', name='root')
node_obj = as_obj(node)
null_obj = cmdc.Object()
type_id = cmdc.FnDependencyNode(node_obj).typeId()
for doc, (value, parent) in (
['a valid type name', ('transform', null_obj)],
['a valid type name and parent', ('transform', node_obj)],
['a valid typeId', (cmdc.TypeId(type_id), null_obj)],
['a valid typeId and parent', (cmdc.TypeId(type_id), node_obj)],
):
test_createNode.__doc__ = """Test MDagModifier::createNode if called with {}.""".format(doc)
yield _createNode_pass, value, parent
not_a_dag = as_obj('time1')
not_a_node = as_plug('persp.message').attribute()
type_id = cmdc.FnDependencyNode(as_obj('time1')).typeId()
for doc, (value, parent) in (
['an invalid type name', ('foobar', null_obj)],
['a non-DAG type name', ('network', null_obj)],
['an invalid typeId', (cmdc.TypeId(0xdeadbeef), null_obj)],
['an non-DAG typeId', (cmdc.TypeId(type_id), null_obj)],
['an invalid parent (not a DAG node)', ('transform', not_a_dag)],
['an invalid parent (not a node)', ('transform', not_a_node)],
):
test_createNode.__doc__ = """Test MDagGModifier::createNode raises error if called with {}.""".format(doc)
yield _createNode_fail, value, parent
@nose.with_setup(teardown=new_scene)
def _createNode_fail(value, parent):
old_nodes = cmds.ls(long=True)
nose.tools.assert_raises(
TypeError, _createNode_pass, value, parent
)
new_nodes = cmds.ls(long=True)
assert len(old_nodes) == len(new_nodes), "DagModifier.createNode modified the scene graph."
@nose.with_setup(teardown=new_scene)
def _createNode_pass(value, parent):
old_nodes = cmds.ls(long=True)
mod = cmdc.DagModifier()
node = mod.createNode(value, parent)
mod.doIt()
new_nodes = cmds.ls(long=True)
add_nodes = set(new_nodes) - set(old_nodes)
assert not node.isNull(), "Created node is not valid."
assert len(add_nodes) == 1, "`ls` did not return new node."
def test_reparentNode():
node_a = cmds.createNode('transform')
node_b = cmds.createNode('transform')
node_c = cmds.createNode('transform', parent=node_a)
node_d = cmds.createNode('transform', parent=node_c)
node_obj_a = as_obj(node_a)
node_obj_b = as_obj(node_b)
node_obj_c = as_obj(node_c)
node_obj_d = as_obj(node_d)
null_obj = cmdc.Object()
for doc, (node, new_parent) in (
['a null object (parent to world)', (node_obj_c, null_obj)],
['a valid object', (node_obj_c, node_obj_b)],
):
test_reparentNode.__doc__ = """Test MDagModifier::reparentNode if called with {}.""".format(doc)
yield _reparentNode_pass, node, new_parent
not_a_dag = as_obj('time1')
not_a_node = as_plug('persp.message').attribute()
for exc, doc, (node, new_parent) in (
[TypeError, 'an invalid object (not a DAG node)', (node_obj_c, not_a_dag)],
[TypeError, 'an invalid object (not a node)', (node_obj_c, not_a_node)],
[ValueError, 'the same object', (node_obj_c, node_obj_c)],
[ValueError, 'a parent and one of its children', (node_obj_c, node_obj_d)],
):
test_reparentNode.__doc__ = """Test MDagModifier::reparentNode raises an error if called with {}.""".format(doc)
yield _reparentNode_fail, exc, node, new_parent
def _reparentNode_pass(node, new_parent):
fn_node = cmdc.FnDagNode(node)
old_parent = fn_node.parent(0)
mod = cmdc.DagModifier()
mod.reparentNode(node, new_parent)
mod.doIt()
parent = fn_node.parent(0)
if new_parent.isNull():
assert parent == fn_node.dagRoot(), "DagModifier.reparentNode doIt failed"
else:
assert parent == new_parent, "DagModifier.reparentNode doIt failed"
mod.undoIt()
parent = fn_node.parent(0)
assert parent == old_parent, "DagModifier.reparentNode undo failed"
# Parent the node to world before the next test.
mod = cmdc.DagModifier()
mod.reparentNode(node, old_parent)
mod.doIt()
def _reparentNode_fail(exception, node, new_parent):
nose.tools.assert_raises(
exception,
cmdc.DagModifier().reparentNode,
node, new_parent
) | 0.471953 | 0.301086 |
from collections import OrderedDict
from onegov.ballot.constants import election_day_i18n_used_locales
from onegov.ballot.models.election.candidate import Candidate
from onegov.ballot.models.election.candidate_result import CandidateResult
from onegov.ballot.models.election.election import Election
from onegov.ballot.models.election.election_result import ElectionResult
from onegov.ballot.models.election.list import List
from onegov.ballot.models.election.list_connection import ListConnection
from onegov.ballot.models.election.list_result import ListResult
from onegov.ballot.models.election.mixins import PartyResultExportMixin
from onegov.ballot.models.election.panachage_result import PanachageResult
from onegov.ballot.models.election.party_result import PartyResult
from onegov.ballot.models.mixins import DomainOfInfluenceMixin
from onegov.ballot.models.mixins import TitleTranslationsMixin
from onegov.core.orm import Base
from onegov.core.orm import translation_hybrid
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import meta_property
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import HSTORE
from onegov.core.orm.types import UUID
from sqlalchemy import cast
from sqlalchemy import Column, Boolean
from sqlalchemy import Date
from sqlalchemy import desc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import Text
from sqlalchemy_utils import observes
from sqlalchemy.orm import backref
from sqlalchemy.orm import object_session
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import literal_column
from uuid import uuid4
class ElectionCompoundAssociation(Base):
__tablename__ = 'election_compound_associations'
#: identifies the candidate result
id = Column(UUID, primary_key=True, default=uuid4)
#: The election compound ID
election_compound_id = Column(
Text,
ForeignKey('election_compounds.id', onupdate='CASCADE')
)
#: The election ID
election_id = Column(
Text,
ForeignKey('elections.id', onupdate='CASCADE'),
primary_key=True
)
election_compound = relationship(
'ElectionCompound', backref=backref(
'associations',
cascade='all, delete-orphan',
lazy='dynamic'
)
)
election = relationship(
'Election', backref=backref(
'associations',
cascade='all, delete-orphan',
lazy='dynamic'
)
)
class ElectionCompound(
Base, ContentMixin, TimestampMixin,
DomainOfInfluenceMixin, TitleTranslationsMixin,
PartyResultExportMixin
):
__tablename__ = 'election_compounds'
#: Identifies the election compound, may be used in the url
id = Column(Text, primary_key=True)
#: all translations of the title
title_translations = Column(HSTORE, nullable=False)
#: the translated title (uses the locale of the request, falls back to the
#: default locale of the app)
title = translation_hybrid(title_translations)
@observes('title_translations')
def title_observer(self, translations):
if not self.id:
self.id = self.id_from_title(object_session(self))
#: Shortcode for cantons that use it
shortcode = Column(Text, nullable=True)
#: The date of the elections
date = Column(Date, nullable=False)
#: Enable Doppelter Pukelsheim for setting status of child elections
after_pukelsheim = Column(Boolean, nullable=False, default=False)
#: Status for Doppelter Pukelsheim to set via Website
pukelsheim_completed = Column(Boolean, nullable=False, default=False)
#: An election compound may contains n party results
party_results = relationship(
'PartyResult',
primaryjoin=(
'foreign(PartyResult.owner) == ElectionCompound.id'
),
cascade='all, delete-orphan',
lazy='dynamic',
)
#: An election compound may contains n panachage results
panachage_results = relationship(
'PanachageResult',
primaryjoin=(
'foreign(PanachageResult.owner) == ElectionCompound.id'
),
cascade='all, delete-orphan',
lazy='dynamic',
)
#: Defines optional colors for parties
colors = meta_property('colors', default=dict)
#: If true, an election represents a single entity rather than a (partial)
#: district
aggregated_by_entity = meta_property('aggregated_by_entity', default=False)
@property
def elections(self):
elections = [association.election for association in self.associations]
return sorted(
elections,
key=lambda x: f"{x.status}{x.shortcode or ''}"
)
@elections.setter
def elections(self, value):
self.associations = [
ElectionCompoundAssociation(election_id=election.id)
for election in value
]
@property
def number_of_mandates(self):
""" The (total) number of mandates. """
return sum([
election.number_of_mandates for election in self.elections
])
def allocated_mandates(self, consider_completed=False):
""" Number of already allocated mandates/elected candidates. """
if consider_completed:
election_ids = [e.id for e in self.elections if e.completed]
else:
election_ids = [e.id for e in self.elections]
if not election_ids:
return 0
session = object_session(self)
mandates = session.query(
func.count(func.nullif(Candidate.elected, False))
)
mandates = mandates.filter(Candidate.election_id.in_(election_ids))
mandates = mandates.first()
return mandates[0] if mandates else 0
@property
def counted(self):
""" True if all elections have been counted. """
for election in self.elections:
if not election.counted:
return False
return True
@property
def progress(self):
""" Returns a tuple with the first value being the number of counted
elections and the second value being the number of total elections.
"""
results = [election.completed for election in self.elections]
return sum(1 for result in results if result), len(results)
@property
def counted_entities(self):
return [
election.title for election in self.elections
if election.completed
]
@property
def has_results(self):
""" Returns True, if the election compound has any results. """
if self.party_results.first():
return True
if self.panachage_results.first():
return True
for election in self.elections:
if election.has_results:
return True
return False
@property
def completed(self):
""" Returns True, if the all elections are completed. """
if self.after_pukelsheim:
return self.pukelsheim_completed
for election in self.elections:
if not election.completed:
return False
return True
@property
def last_modified(self):
""" Returns last change of the elections. """
changes = [self.last_change, self.last_result_change]
session = object_session(self)
election_ids = [election.id for election in self.elections]
# Get the last election change
result = object_session(self).query(Election.last_change)
result = result.order_by(desc(Election.last_change))
result = result.filter(Election.id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last candidate change
result = object_session(self).query(Candidate.last_change)
result = result.order_by(desc(Candidate.last_change))
result = result.filter(Candidate.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list connection change
result = session.query(ListConnection.last_change)
result = result.order_by(desc(ListConnection.last_change))
result = result.filter(ListConnection.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list change
result = session.query(List.last_change)
result = result.order_by(desc(List.last_change))
result = result.filter(List.election_id == self.id)
changes.append(result.first()[0] if result.first() else None)
changes = [change for change in changes if change]
return max(changes) if changes else None
@property
def last_result_change(self):
""" Returns the last change of the results of the elections. """
changes = []
session = object_session(self)
election_ids = [election.id for election in self.elections]
# Get the last election result change
result = session.query(ElectionResult.last_change)
result = result.order_by(desc(ElectionResult.last_change))
result = result.filter(ElectionResult.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last candidate result change
ids = session.query(Candidate.id)
ids = ids.filter(Candidate.election_id.in_(election_ids)).all()
result = session.query(CandidateResult.last_change)
result = result.order_by(desc(CandidateResult.last_change))
result = result.filter(CandidateResult.candidate_id.in_(ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list result changes
ids = session.query(List.id)
ids = ids.filter(List.election_id.in_(election_ids)).all()
if ids:
result = session.query(ListResult.last_change)
result = result.order_by(desc(ListResult.last_change))
result = result.filter(ListResult.list_id.in_(ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last panachage result changes
if ids:
ids = [str(id_[0]) for id_ in ids]
result = session.query(PanachageResult.last_change)
result = result.order_by(desc(PanachageResult.last_change))
result = result.filter(
or_(
PanachageResult.target.in_(ids),
PanachageResult.owner == self.id,
)
)
changes.append(result.first()[0] if result.first() else None)
# Get the last party result changes
result = session.query(PartyResult.last_change)
result = result.order_by(desc(PartyResult.last_change))
result = result.filter(PartyResult.owner.in_(election_ids + [self.id]))
changes.append(result.first()[0] if result.first() else None)
changes = [change for change in changes if change]
return max(changes) if changes else None
@property
def elected_candidates(self):
""" Returns the first and last names of the elected candidates. """
result = []
for election in self.elections:
result.extend(election.elected_candidates)
return result
def get_list_results(self, limit=None, names=None, order_by='votes'):
""" Returns the aggregated number of mandates and votes of all the
lists.
"""
assert order_by in ('votes', 'number_of_mandates')
session = object_session(self)
# Query number of mandates
mandates = session.query(
List.name.label('name'),
func.sum(List.number_of_mandates).label('number_of_mandates'),
literal_column('0').label('votes')
)
mandates = mandates.join(ElectionCompound.associations)
mandates = mandates.filter(ElectionCompound.id == self.id)
if names:
mandates = mandates.filter(List.name.in_(names))
mandates = mandates.join(Election, List)
mandates = mandates.group_by(List.name)
# Query votes
votes = session.query(
List.name.label('name'),
literal_column('0').label('number_of_mandates'),
func.sum(ListResult.votes).label('votes')
)
votes = votes.join(ElectionCompound.associations)
votes = votes.filter(ElectionCompound.id == self.id)
if names:
votes = votes.filter(List.name.in_(names))
votes = votes.join(Election, List, ListResult)
votes = votes.group_by(List.name)
# Combine
union = mandates.union_all(votes).subquery('union')
query = session.query(
union.c.name.label('name'),
cast(func.sum(union.c.number_of_mandates), Integer).label(
'number_of_mandates'
),
cast(func.sum(union.c.votes), Integer).label('votes')
)
query = query.group_by(union.c.name)
query = query.order_by(desc(order_by))
if limit and limit > 0:
query = query.limit(limit)
return query
#: may be used to store a link related to this election
related_link = meta_property('related_link')
related_link_label = meta_property('related_link_label')
#: may be used to enable/disable the visibility of party strengths
show_party_strengths = meta_property('party_strengths')
#: may be used to enable/disable the visibility of mandate allocation
show_mandate_allocation = meta_property('mandate_allocation')
def clear_results(self):
""" Clears all own results. """
session = object_session(self)
for result in self.party_results:
session.delete(result)
for result in self.panachage_results:
session.delete(result)
def export(self, consider_completed=False):
""" Returns all data connected to this election compound as list with
dicts.
This is meant as a base for json/csv/excel exports. The result is
therefore a flat list of dictionaries with repeating values to avoid
the nesting of values. Each record in the resulting list is a single
candidate result for each political entity. Party results are not
included in the export (since they are not really connected with the
lists).
If consider completed, status for candidate_elected and
absolute_majority will be set to None if election is not completed.
"""
common = OrderedDict()
for locale in election_day_i18n_used_locales:
common[f'compound_title_{locale}'] = \
self.title_translations.get(locale, '')
for locale, title in self.title_translations.items():
common[f'compound_title_{locale}'] = (title or '').strip()
common['compound_date'] = self.date.isoformat()
common['compound_mandates'] = self.number_of_mandates
rows = []
for election in self.elections:
for row in election.export(consider_completed):
rows.append(
OrderedDict(list(common.items()) + list(row.items()))
)
return rows | src/onegov/ballot/models/election/election_compound.py | from collections import OrderedDict
from onegov.ballot.constants import election_day_i18n_used_locales
from onegov.ballot.models.election.candidate import Candidate
from onegov.ballot.models.election.candidate_result import CandidateResult
from onegov.ballot.models.election.election import Election
from onegov.ballot.models.election.election_result import ElectionResult
from onegov.ballot.models.election.list import List
from onegov.ballot.models.election.list_connection import ListConnection
from onegov.ballot.models.election.list_result import ListResult
from onegov.ballot.models.election.mixins import PartyResultExportMixin
from onegov.ballot.models.election.panachage_result import PanachageResult
from onegov.ballot.models.election.party_result import PartyResult
from onegov.ballot.models.mixins import DomainOfInfluenceMixin
from onegov.ballot.models.mixins import TitleTranslationsMixin
from onegov.core.orm import Base
from onegov.core.orm import translation_hybrid
from onegov.core.orm.mixins import ContentMixin
from onegov.core.orm.mixins import meta_property
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import HSTORE
from onegov.core.orm.types import UUID
from sqlalchemy import cast
from sqlalchemy import Column, Boolean
from sqlalchemy import Date
from sqlalchemy import desc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import or_
from sqlalchemy import Text
from sqlalchemy_utils import observes
from sqlalchemy.orm import backref
from sqlalchemy.orm import object_session
from sqlalchemy.orm import relationship
from sqlalchemy.sql.expression import literal_column
from uuid import uuid4
class ElectionCompoundAssociation(Base):
__tablename__ = 'election_compound_associations'
#: identifies the candidate result
id = Column(UUID, primary_key=True, default=uuid4)
#: The election compound ID
election_compound_id = Column(
Text,
ForeignKey('election_compounds.id', onupdate='CASCADE')
)
#: The election ID
election_id = Column(
Text,
ForeignKey('elections.id', onupdate='CASCADE'),
primary_key=True
)
election_compound = relationship(
'ElectionCompound', backref=backref(
'associations',
cascade='all, delete-orphan',
lazy='dynamic'
)
)
election = relationship(
'Election', backref=backref(
'associations',
cascade='all, delete-orphan',
lazy='dynamic'
)
)
class ElectionCompound(
Base, ContentMixin, TimestampMixin,
DomainOfInfluenceMixin, TitleTranslationsMixin,
PartyResultExportMixin
):
__tablename__ = 'election_compounds'
#: Identifies the election compound, may be used in the url
id = Column(Text, primary_key=True)
#: all translations of the title
title_translations = Column(HSTORE, nullable=False)
#: the translated title (uses the locale of the request, falls back to the
#: default locale of the app)
title = translation_hybrid(title_translations)
@observes('title_translations')
def title_observer(self, translations):
if not self.id:
self.id = self.id_from_title(object_session(self))
#: Shortcode for cantons that use it
shortcode = Column(Text, nullable=True)
#: The date of the elections
date = Column(Date, nullable=False)
#: Enable Doppelter Pukelsheim for setting status of child elections
after_pukelsheim = Column(Boolean, nullable=False, default=False)
#: Status for Doppelter Pukelsheim to set via Website
pukelsheim_completed = Column(Boolean, nullable=False, default=False)
#: An election compound may contains n party results
party_results = relationship(
'PartyResult',
primaryjoin=(
'foreign(PartyResult.owner) == ElectionCompound.id'
),
cascade='all, delete-orphan',
lazy='dynamic',
)
#: An election compound may contains n panachage results
panachage_results = relationship(
'PanachageResult',
primaryjoin=(
'foreign(PanachageResult.owner) == ElectionCompound.id'
),
cascade='all, delete-orphan',
lazy='dynamic',
)
#: Defines optional colors for parties
colors = meta_property('colors', default=dict)
#: If true, an election represents a single entity rather than a (partial)
#: district
aggregated_by_entity = meta_property('aggregated_by_entity', default=False)
@property
def elections(self):
elections = [association.election for association in self.associations]
return sorted(
elections,
key=lambda x: f"{x.status}{x.shortcode or ''}"
)
@elections.setter
def elections(self, value):
self.associations = [
ElectionCompoundAssociation(election_id=election.id)
for election in value
]
@property
def number_of_mandates(self):
""" The (total) number of mandates. """
return sum([
election.number_of_mandates for election in self.elections
])
def allocated_mandates(self, consider_completed=False):
""" Number of already allocated mandates/elected candidates. """
if consider_completed:
election_ids = [e.id for e in self.elections if e.completed]
else:
election_ids = [e.id for e in self.elections]
if not election_ids:
return 0
session = object_session(self)
mandates = session.query(
func.count(func.nullif(Candidate.elected, False))
)
mandates = mandates.filter(Candidate.election_id.in_(election_ids))
mandates = mandates.first()
return mandates[0] if mandates else 0
@property
def counted(self):
""" True if all elections have been counted. """
for election in self.elections:
if not election.counted:
return False
return True
@property
def progress(self):
""" Returns a tuple with the first value being the number of counted
elections and the second value being the number of total elections.
"""
results = [election.completed for election in self.elections]
return sum(1 for result in results if result), len(results)
@property
def counted_entities(self):
return [
election.title for election in self.elections
if election.completed
]
@property
def has_results(self):
""" Returns True, if the election compound has any results. """
if self.party_results.first():
return True
if self.panachage_results.first():
return True
for election in self.elections:
if election.has_results:
return True
return False
@property
def completed(self):
""" Returns True, if the all elections are completed. """
if self.after_pukelsheim:
return self.pukelsheim_completed
for election in self.elections:
if not election.completed:
return False
return True
@property
def last_modified(self):
""" Returns last change of the elections. """
changes = [self.last_change, self.last_result_change]
session = object_session(self)
election_ids = [election.id for election in self.elections]
# Get the last election change
result = object_session(self).query(Election.last_change)
result = result.order_by(desc(Election.last_change))
result = result.filter(Election.id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last candidate change
result = object_session(self).query(Candidate.last_change)
result = result.order_by(desc(Candidate.last_change))
result = result.filter(Candidate.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list connection change
result = session.query(ListConnection.last_change)
result = result.order_by(desc(ListConnection.last_change))
result = result.filter(ListConnection.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list change
result = session.query(List.last_change)
result = result.order_by(desc(List.last_change))
result = result.filter(List.election_id == self.id)
changes.append(result.first()[0] if result.first() else None)
changes = [change for change in changes if change]
return max(changes) if changes else None
@property
def last_result_change(self):
""" Returns the last change of the results of the elections. """
changes = []
session = object_session(self)
election_ids = [election.id for election in self.elections]
# Get the last election result change
result = session.query(ElectionResult.last_change)
result = result.order_by(desc(ElectionResult.last_change))
result = result.filter(ElectionResult.election_id.in_(election_ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last candidate result change
ids = session.query(Candidate.id)
ids = ids.filter(Candidate.election_id.in_(election_ids)).all()
result = session.query(CandidateResult.last_change)
result = result.order_by(desc(CandidateResult.last_change))
result = result.filter(CandidateResult.candidate_id.in_(ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last list result changes
ids = session.query(List.id)
ids = ids.filter(List.election_id.in_(election_ids)).all()
if ids:
result = session.query(ListResult.last_change)
result = result.order_by(desc(ListResult.last_change))
result = result.filter(ListResult.list_id.in_(ids))
changes.append(result.first()[0] if result.first() else None)
# Get the last panachage result changes
if ids:
ids = [str(id_[0]) for id_ in ids]
result = session.query(PanachageResult.last_change)
result = result.order_by(desc(PanachageResult.last_change))
result = result.filter(
or_(
PanachageResult.target.in_(ids),
PanachageResult.owner == self.id,
)
)
changes.append(result.first()[0] if result.first() else None)
# Get the last party result changes
result = session.query(PartyResult.last_change)
result = result.order_by(desc(PartyResult.last_change))
result = result.filter(PartyResult.owner.in_(election_ids + [self.id]))
changes.append(result.first()[0] if result.first() else None)
changes = [change for change in changes if change]
return max(changes) if changes else None
@property
def elected_candidates(self):
""" Returns the first and last names of the elected candidates. """
result = []
for election in self.elections:
result.extend(election.elected_candidates)
return result
def get_list_results(self, limit=None, names=None, order_by='votes'):
""" Returns the aggregated number of mandates and votes of all the
lists.
"""
assert order_by in ('votes', 'number_of_mandates')
session = object_session(self)
# Query number of mandates
mandates = session.query(
List.name.label('name'),
func.sum(List.number_of_mandates).label('number_of_mandates'),
literal_column('0').label('votes')
)
mandates = mandates.join(ElectionCompound.associations)
mandates = mandates.filter(ElectionCompound.id == self.id)
if names:
mandates = mandates.filter(List.name.in_(names))
mandates = mandates.join(Election, List)
mandates = mandates.group_by(List.name)
# Query votes
votes = session.query(
List.name.label('name'),
literal_column('0').label('number_of_mandates'),
func.sum(ListResult.votes).label('votes')
)
votes = votes.join(ElectionCompound.associations)
votes = votes.filter(ElectionCompound.id == self.id)
if names:
votes = votes.filter(List.name.in_(names))
votes = votes.join(Election, List, ListResult)
votes = votes.group_by(List.name)
# Combine
union = mandates.union_all(votes).subquery('union')
query = session.query(
union.c.name.label('name'),
cast(func.sum(union.c.number_of_mandates), Integer).label(
'number_of_mandates'
),
cast(func.sum(union.c.votes), Integer).label('votes')
)
query = query.group_by(union.c.name)
query = query.order_by(desc(order_by))
if limit and limit > 0:
query = query.limit(limit)
return query
#: may be used to store a link related to this election
related_link = meta_property('related_link')
related_link_label = meta_property('related_link_label')
#: may be used to enable/disable the visibility of party strengths
show_party_strengths = meta_property('party_strengths')
#: may be used to enable/disable the visibility of mandate allocation
show_mandate_allocation = meta_property('mandate_allocation')
def clear_results(self):
""" Clears all own results. """
session = object_session(self)
for result in self.party_results:
session.delete(result)
for result in self.panachage_results:
session.delete(result)
def export(self, consider_completed=False):
""" Returns all data connected to this election compound as list with
dicts.
This is meant as a base for json/csv/excel exports. The result is
therefore a flat list of dictionaries with repeating values to avoid
the nesting of values. Each record in the resulting list is a single
candidate result for each political entity. Party results are not
included in the export (since they are not really connected with the
lists).
If consider completed, status for candidate_elected and
absolute_majority will be set to None if election is not completed.
"""
common = OrderedDict()
for locale in election_day_i18n_used_locales:
common[f'compound_title_{locale}'] = \
self.title_translations.get(locale, '')
for locale, title in self.title_translations.items():
common[f'compound_title_{locale}'] = (title or '').strip()
common['compound_date'] = self.date.isoformat()
common['compound_mandates'] = self.number_of_mandates
rows = []
for election in self.elections:
for row in election.export(consider_completed):
rows.append(
OrderedDict(list(common.items()) + list(row.items()))
)
return rows | 0.835215 | 0.196479 |
import xlrd
import time
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from viaReservation.test_genericFunctions import generic
class test_flightReservationclass:
@staticmethod
def flight_reservation(driver, logger, TCName, sheet1, flight_source, flight_destination):
source_locator=(By.XPATH, "//*[@id='source']")
destination_locator=(By.XPATH, "//*[@id='destination']")
source_drpdwn_locator= "//*[@class='ui-menu-item']//following::*[@class='name'][1]"
destination_drpn_locator="//*[@class='ui-menu-item']//following::*[@class='name'][2]"
journey_date_locator="//div[@class='searchbox recentSearch searchBoxHome']//following::div[@class='container']//following::div[@class='content']//following::form[@class='flightSearchForm']//following::div[@class='panel']//following::div[@id='round-trip-panel']//following::div[@class='element']//following::div[@id='depart-cal']//following::div[@data-month='9']//following::div[@class='vc-month-box']//following::div[@class='vc-row'][5]//following::div[@data-date='29']"
person_locator=(By.XPATH, "//div[@class='plus']")
searchFlight_locator=(By.XPATH, "//*[@id='search-flight-btn']")
arrow_locator="/html/body/div[4]/div[3]/div/form/div[4]/div[1]/div[4]/div[1]/div[4]/div[1]/span[3]"
try:
flightSrc=generic.fetchValueFromExcel(sheet1, TCName, flight_source)
flightDest=generic.fetchValueFromExcel(sheet1, TCName, flight_destination)
source=driver.find_element(*source_locator)
source.send_keys(flightSrc)
source_element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, source_drpdwn_locator)))
source_element.click()
destination=driver.find_element(*destination_locator)
destination.send_keys(flightDest)
dest_element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, destination_drpn_locator)))
dest_element.click()
'''source.send_keys(Keys.ARROW_DOWN)
source.send_keys(Keys.ENTER)'''
arrow = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, arrow_locator)))
arrow.click()
element = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, journey_date_locator)))
element.click()
driver.find_element(*person_locator).click()
driver.find_element(*searchFlight_locator).click()
time.sleep(20)
generic.captureScreenshot(driver, TCName )
logger.error("list of flights load is successful")
except Exception as e:
logger.error("list of flights load is unsuccessful")
raise e | Automation project demo/Page Object Model/flightReservationPage.py | import xlrd
import time
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from viaReservation.test_genericFunctions import generic
class test_flightReservationclass:
@staticmethod
def flight_reservation(driver, logger, TCName, sheet1, flight_source, flight_destination):
source_locator=(By.XPATH, "//*[@id='source']")
destination_locator=(By.XPATH, "//*[@id='destination']")
source_drpdwn_locator= "//*[@class='ui-menu-item']//following::*[@class='name'][1]"
destination_drpn_locator="//*[@class='ui-menu-item']//following::*[@class='name'][2]"
journey_date_locator="//div[@class='searchbox recentSearch searchBoxHome']//following::div[@class='container']//following::div[@class='content']//following::form[@class='flightSearchForm']//following::div[@class='panel']//following::div[@id='round-trip-panel']//following::div[@class='element']//following::div[@id='depart-cal']//following::div[@data-month='9']//following::div[@class='vc-month-box']//following::div[@class='vc-row'][5]//following::div[@data-date='29']"
person_locator=(By.XPATH, "//div[@class='plus']")
searchFlight_locator=(By.XPATH, "//*[@id='search-flight-btn']")
arrow_locator="/html/body/div[4]/div[3]/div/form/div[4]/div[1]/div[4]/div[1]/div[4]/div[1]/span[3]"
try:
flightSrc=generic.fetchValueFromExcel(sheet1, TCName, flight_source)
flightDest=generic.fetchValueFromExcel(sheet1, TCName, flight_destination)
source=driver.find_element(*source_locator)
source.send_keys(flightSrc)
source_element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, source_drpdwn_locator)))
source_element.click()
destination=driver.find_element(*destination_locator)
destination.send_keys(flightDest)
dest_element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, destination_drpn_locator)))
dest_element.click()
'''source.send_keys(Keys.ARROW_DOWN)
source.send_keys(Keys.ENTER)'''
arrow = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, arrow_locator)))
arrow.click()
element = WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.XPATH, journey_date_locator)))
element.click()
driver.find_element(*person_locator).click()
driver.find_element(*searchFlight_locator).click()
time.sleep(20)
generic.captureScreenshot(driver, TCName )
logger.error("list of flights load is successful")
except Exception as e:
logger.error("list of flights load is unsuccessful")
raise e | 0.210848 | 0.057203 |
import sys
import urllib
import utils
import json
import argparse
import urllib.request
from rdflib import URIRef, BNode, Literal, Graph
import glob
def parse_args(args=sys.argv[1:]):
""" Get the parsed arguments specified on this script.
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'site_name',
action='store',
type=str,
help='Site name. ex: hyakki')
return parser.parse_args(args)
def ld_generator(config):
site_name = config["site_name"]
arg_item_set_id = config["item_set_id"]
output_path = "../docs/collections/" + site_name + "/metadata/data.json"
collection = []
item_set_arr = arg_item_set_id.split(",")
files = glob.glob("../docs/api/items/*.json")
targets = {}
for file in files:
with open(file) as f:
df = json.load(f)
if "o:item_set" not in df:
continue
item_set_objs = df["o:item_set"]
for obj in item_set_objs:
item_set_id = str(obj["o:id"])
if item_set_id in item_set_arr:
sort = ""
if "uterms:sort" in df:
sort = df["uterms:sort"][0]["@value"]
if sort not in targets:
targets[sort] = []
if df not in targets[sort]:
targets[sort].append(df)
for key in sorted(targets):
arr = targets[key]
for obj in arr:
collection.append(obj)
fw = open(output_path, 'w')
json.dump(collection, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
# ld_str = json.dumps(collection)
# g = Graph().parse(data=ld_str, format='json-ld')
# g.serialize(format='n3', destination=output_path.replace(".json", ".n3"))
# g.serialize(format='nt', destination=output_path.replace(".json", ".nt"))
# g.serialize(format='turtle', destination=output_path.replace(".json", ".ttl"))
# g.serialize(format='pretty-xml', destination=output_path.replace(".json", ".rdf"))
if __name__ == "__main__":
args = parse_args()
key = args.site_name
site_obj = utils.get_site_config(key)
ld_generator(site_obj) | src/LdGenerator.py | import sys
import urllib
import utils
import json
import argparse
import urllib.request
from rdflib import URIRef, BNode, Literal, Graph
import glob
def parse_args(args=sys.argv[1:]):
""" Get the parsed arguments specified on this script.
"""
parser = argparse.ArgumentParser(description="")
parser.add_argument(
'site_name',
action='store',
type=str,
help='Site name. ex: hyakki')
return parser.parse_args(args)
def ld_generator(config):
site_name = config["site_name"]
arg_item_set_id = config["item_set_id"]
output_path = "../docs/collections/" + site_name + "/metadata/data.json"
collection = []
item_set_arr = arg_item_set_id.split(",")
files = glob.glob("../docs/api/items/*.json")
targets = {}
for file in files:
with open(file) as f:
df = json.load(f)
if "o:item_set" not in df:
continue
item_set_objs = df["o:item_set"]
for obj in item_set_objs:
item_set_id = str(obj["o:id"])
if item_set_id in item_set_arr:
sort = ""
if "uterms:sort" in df:
sort = df["uterms:sort"][0]["@value"]
if sort not in targets:
targets[sort] = []
if df not in targets[sort]:
targets[sort].append(df)
for key in sorted(targets):
arr = targets[key]
for obj in arr:
collection.append(obj)
fw = open(output_path, 'w')
json.dump(collection, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
# ld_str = json.dumps(collection)
# g = Graph().parse(data=ld_str, format='json-ld')
# g.serialize(format='n3', destination=output_path.replace(".json", ".n3"))
# g.serialize(format='nt', destination=output_path.replace(".json", ".nt"))
# g.serialize(format='turtle', destination=output_path.replace(".json", ".ttl"))
# g.serialize(format='pretty-xml', destination=output_path.replace(".json", ".rdf"))
if __name__ == "__main__":
args = parse_args()
key = args.site_name
site_obj = utils.get_site_config(key)
ld_generator(site_obj) | 0.344443 | 0.096025 |
from collections import namedtuple
import numpy as np
from scipy.interpolate import PPoly
import cvxopt
cvxopt.solvers.options['show_progress'] = False
cvxopt.solvers.options['maxiters'] = 500 # seems to reduce errors (unconfirmed)
from likelihood_funcs import *
'''
TODO
- improve "smoothness" input parameter
- change to operate on a [0,1] scale (0 = exact interpolation, 1 = uniform distribution)
- characterize slope with b_mid, instead of b_0
- reduces the repetition of calculations -> reduce errors
- reduces objective matrix non-sparsity
- format tests more professionally
- treat repeat samples as discrete samples
- i.e., make cdf discontinuous around X_i
- i.e., add dirac delta portions to the pdf
'''
def expand_vars(bmid_c, X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.asarray(bmid_c)
b_mid, c = np.split(bmid_c, (1,), axis=-1)
alt_sign = (-1) ** np.arange(n)
diffc_diffX = np.diff(c, axis=-1) / np.diff(X, axis=-1)
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=-1)
b_cumdiff = np.concatenate([
-bpart_lower[..., ::-1].cumsum(-1)[..., ::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, -1),
], axis=-1)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
#a_diffX = diffc_diffX - b[..., :-1]
a_diffX = np.diff(b, axis=-1) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def expand_vars_lc(X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.diagflat(np.ones(n+1, dtype=np.int64))
b_mid, c = np.split(bmid_c, (1,), axis=0)
diffc_diffX = np.diff(c, axis=0) / np.diff(X)[:, np.newaxis]
alt_sign = (-1) ** np.arange(n)[:, np.newaxis]
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=0)
b_cumdiff = np.concatenate([
-bpart_lower[::-1].cumsum(0)[::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, 0),
], axis=0)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
a_diffX = np.diff(b, axis=0) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def make_obj_scale(X, smoothness_factor=1):
n = X.shape[-1]
scale_a = smoothness_factor / (X[-1] - X[0])
scale_c = -d2dp2_rlhood(n, np.arange(n)) / (n.bit_length() * n)
return namedtuple("ObjectiveScales", "scale_a scale_c")(scale_a, scale_c)
def make_P_q(X, scale_a=np.ones(1), scale_e=np.ones(1), autoscale=True):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
outer_prod = lambda col: col * col[:,np.newaxis]
P_a = np.sum(
np.apply_along_axis(outer_prod, -1, a_diffX)
* ((scale_a / np.diff(X))[:, np.newaxis, np.newaxis]),
axis=0)
q_a = np.zeros(None)
P_c = np.sum(
np.apply_along_axis(outer_prod, -1, c)
* (scale_e[:, np.newaxis, np.newaxis]),
axis=0)
q_c = np.sum(
-(scale_e * np.arange(1, 2*n, 2) / n)[:, np.newaxis] * c,
axis=0)
P = 2*(P_a + P_c)
q = q_a + q_c
if autoscale:
min_val = min(
np.min(np.abs(P[P != 0])),
np.min(np.abs(q[q != 0]))
)
max_val = max(
np.max(np.abs(P)),
np.max(np.abs(q))
)
scale = 2 ** -(
# centers exponent range on zero
np.mean((np.frexp(min_val)[1], np.frexp(max_val)[1]))
# biases range to account for sums of n values
#+ n.bit_length() / 2
)
P = P * scale
q = q * scale
res = namedtuple("QuadProgramObj", "P q")(
np.asmatrix(P),
np.asmatrix(q).T
)
return res
def make_G_h(X):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
G_b = -b
h_b = np.zeros(b.shape[0])
G_c0 = -c[:1]
h_c0 = np.zeros(1)
G_cnm1 = c[-1:]
h_cnm1 = np.ones(1)
return namedtuple("QuadProgramBounds", "G h")(
np.asmatrix(np.concatenate((G_b, G_c0, G_cnm1), axis=0)),
np.asmatrix(np.concatenate((h_b, h_c0, h_cnm1), axis=0)).T,
)
def make_A_b(X):
return namedtuple("QuadProgramBounds", "A b")(
np.zeros((0, X.shape[-1]+1)),
np.zeros(0),
)
def bmid_c_init_state(X):
n = len(X)
n_mid = n // 2
'''
# straight line from first point to last point (when a^2 == 0)
b_mid = ((n-1) / n) / (X[-1] - X[0])
c = (.5 / n) + b_mid * (X - X[0])
return np.concatenate(([b_mid], c))
'''
# interpolation through all points (when e^2 == 0)
b_mid = (2/n) / (X[n_mid+1] - X[n_mid-1])
c = np.arange(1,2*n,2) / (2*n)
return np.concatenate(([b_mid], c))
#'''
def clean_optimizer_results(bmid_c_opt, X):
n = len(X)
bmid_c_opt = np.squeeze(np.array(bmid_c_opt))
d2P_X, dP_X, P_X = expand_vars(bmid_c_opt, X)
d2P_X = d2P_X / np.diff(X)
# Add leading/trailing endpoint regions. I.e., adds:
# 1) knots X0, Xnp1 that smoothly joins curve to the constant-value regions
# P(x) = 0 as x -> -inf,
# P(x) = 1 as x -> inf
# 2) 'dead knots' Xm1, Xnp2 with zero-valued derivatives & P = {0,1},
# from which PPoly can extrapolate for x values outside of the
# sampled region
d2P_X = np.concatenate((
np.zeros(1),
dP_X[:1]**2 / (4*P_X[0]),
d2P_X,
-dP_X[-1:]**2 / (4*(1-P_X[-1])),
np.zeros(1)
))
X0 = X[:1] - 2 * P_X[0] / dP_X[0]
Xnp1 = X[-1:] + 2 * (1-P_X[-1]) / dP_X[-1]
X = np.concatenate((
X0 - (X[0] - X0), # dead knot - included for extrapolation to -inf
X0,
X,
Xnp1,
Xnp1 + (Xnp1 - X[-1]), # dead knot - included for extrapolation to inf
))
P_X = np.concatenate((np.zeros(2), P_X, np.ones(1)))
dP_X = np.concatenate((np.zeros(2), dP_X, np.zeros(1)))
return X, P_X, dP_X, d2P_X
def cdf_approx(X): #, smoothness_factor=1):
"""
Generates a ppoly spline to approximate the cdf of a random variable,
from a 1-D array of i.i.d. samples thereof.
Args:
X: a collection of i.i.d. samples from a random variable.
args, kwargs: any options to forward to the cvxopt qp solver
Returns:
scipy.interpolate.PPoly object, estimating the cdf of the random variable.
Raises:
TODO
"""
# Pre-format input as ordered numpy array
X = np.asarray(X)
diff_X = np.diff(X)
if not (diff_X > 0).all():
X.sort()
diff_X = np.diff(X)
assert(diff_X.all()) # avoids case of duplicate X-values
n = len(X)
scale_axi, scale_ei = make_obj_scale(X)#, smoothness_factor)
P, q = make_P_q(X, scale_a=scale_axi, scale_e=scale_ei)
G, h = make_G_h(X)
#A, b = make_A_b(X) # simply unnecessary
bmid_c_init = bmid_c_init_state(X)
qp_res = cvxopt.solvers.qp(
cvxopt.matrix(P),
cvxopt.matrix(q),
cvxopt.matrix(G),
cvxopt.matrix(h),
#cvxopt.matrix(A),
#cvxopt.matrix(b),
#*args, **kwargs
)
X, P_X, dP_X, d2P_X = clean_optimizer_results(np.array(qp_res['x']), X)
return PPoly.construct_fast(np.stack((d2P_X, dP_X, P_X)), X, extrapolate=True)
if __name__ == "__main__":
from cdf_est_CVXOPT_tests import run_all_tests
run_all_tests() | cdf_est_CVXOPT.py | from collections import namedtuple
import numpy as np
from scipy.interpolate import PPoly
import cvxopt
cvxopt.solvers.options['show_progress'] = False
cvxopt.solvers.options['maxiters'] = 500 # seems to reduce errors (unconfirmed)
from likelihood_funcs import *
'''
TODO
- improve "smoothness" input parameter
- change to operate on a [0,1] scale (0 = exact interpolation, 1 = uniform distribution)
- characterize slope with b_mid, instead of b_0
- reduces the repetition of calculations -> reduce errors
- reduces objective matrix non-sparsity
- format tests more professionally
- treat repeat samples as discrete samples
- i.e., make cdf discontinuous around X_i
- i.e., add dirac delta portions to the pdf
'''
def expand_vars(bmid_c, X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.asarray(bmid_c)
b_mid, c = np.split(bmid_c, (1,), axis=-1)
alt_sign = (-1) ** np.arange(n)
diffc_diffX = np.diff(c, axis=-1) / np.diff(X, axis=-1)
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=-1)
b_cumdiff = np.concatenate([
-bpart_lower[..., ::-1].cumsum(-1)[..., ::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, -1),
], axis=-1)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
#a_diffX = diffc_diffX - b[..., :-1]
a_diffX = np.diff(b, axis=-1) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def expand_vars_lc(X):
n = X.shape[-1]
n_mid = n // 2
bmid_c = np.diagflat(np.ones(n+1, dtype=np.int64))
b_mid, c = np.split(bmid_c, (1,), axis=0)
diffc_diffX = np.diff(c, axis=0) / np.diff(X)[:, np.newaxis]
alt_sign = (-1) ** np.arange(n)[:, np.newaxis]
bpart_lower, bpart_upper = np.array_split(
-2 * alt_sign[:-1] * diffc_diffX,
(n_mid,), axis=0)
b_cumdiff = np.concatenate([
-bpart_lower[::-1].cumsum(0)[::-1],
np.zeros_like(b_mid),
np.cumsum(bpart_upper, 0),
], axis=0)
b = alt_sign * (b_cumdiff + alt_sign[n_mid]*b_mid)
a_diffX = np.diff(b, axis=0) / 2
return namedtuple("SplineVars", "a_diffX b c")(a_diffX, b, c)
def make_obj_scale(X, smoothness_factor=1):
n = X.shape[-1]
scale_a = smoothness_factor / (X[-1] - X[0])
scale_c = -d2dp2_rlhood(n, np.arange(n)) / (n.bit_length() * n)
return namedtuple("ObjectiveScales", "scale_a scale_c")(scale_a, scale_c)
def make_P_q(X, scale_a=np.ones(1), scale_e=np.ones(1), autoscale=True):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
outer_prod = lambda col: col * col[:,np.newaxis]
P_a = np.sum(
np.apply_along_axis(outer_prod, -1, a_diffX)
* ((scale_a / np.diff(X))[:, np.newaxis, np.newaxis]),
axis=0)
q_a = np.zeros(None)
P_c = np.sum(
np.apply_along_axis(outer_prod, -1, c)
* (scale_e[:, np.newaxis, np.newaxis]),
axis=0)
q_c = np.sum(
-(scale_e * np.arange(1, 2*n, 2) / n)[:, np.newaxis] * c,
axis=0)
P = 2*(P_a + P_c)
q = q_a + q_c
if autoscale:
min_val = min(
np.min(np.abs(P[P != 0])),
np.min(np.abs(q[q != 0]))
)
max_val = max(
np.max(np.abs(P)),
np.max(np.abs(q))
)
scale = 2 ** -(
# centers exponent range on zero
np.mean((np.frexp(min_val)[1], np.frexp(max_val)[1]))
# biases range to account for sums of n values
#+ n.bit_length() / 2
)
P = P * scale
q = q * scale
res = namedtuple("QuadProgramObj", "P q")(
np.asmatrix(P),
np.asmatrix(q).T
)
return res
def make_G_h(X):
n = X.shape[-1]
a_diffX, b, c = expand_vars_lc(X)
G_b = -b
h_b = np.zeros(b.shape[0])
G_c0 = -c[:1]
h_c0 = np.zeros(1)
G_cnm1 = c[-1:]
h_cnm1 = np.ones(1)
return namedtuple("QuadProgramBounds", "G h")(
np.asmatrix(np.concatenate((G_b, G_c0, G_cnm1), axis=0)),
np.asmatrix(np.concatenate((h_b, h_c0, h_cnm1), axis=0)).T,
)
def make_A_b(X):
return namedtuple("QuadProgramBounds", "A b")(
np.zeros((0, X.shape[-1]+1)),
np.zeros(0),
)
def bmid_c_init_state(X):
n = len(X)
n_mid = n // 2
'''
# straight line from first point to last point (when a^2 == 0)
b_mid = ((n-1) / n) / (X[-1] - X[0])
c = (.5 / n) + b_mid * (X - X[0])
return np.concatenate(([b_mid], c))
'''
# interpolation through all points (when e^2 == 0)
b_mid = (2/n) / (X[n_mid+1] - X[n_mid-1])
c = np.arange(1,2*n,2) / (2*n)
return np.concatenate(([b_mid], c))
#'''
def clean_optimizer_results(bmid_c_opt, X):
n = len(X)
bmid_c_opt = np.squeeze(np.array(bmid_c_opt))
d2P_X, dP_X, P_X = expand_vars(bmid_c_opt, X)
d2P_X = d2P_X / np.diff(X)
# Add leading/trailing endpoint regions. I.e., adds:
# 1) knots X0, Xnp1 that smoothly joins curve to the constant-value regions
# P(x) = 0 as x -> -inf,
# P(x) = 1 as x -> inf
# 2) 'dead knots' Xm1, Xnp2 with zero-valued derivatives & P = {0,1},
# from which PPoly can extrapolate for x values outside of the
# sampled region
d2P_X = np.concatenate((
np.zeros(1),
dP_X[:1]**2 / (4*P_X[0]),
d2P_X,
-dP_X[-1:]**2 / (4*(1-P_X[-1])),
np.zeros(1)
))
X0 = X[:1] - 2 * P_X[0] / dP_X[0]
Xnp1 = X[-1:] + 2 * (1-P_X[-1]) / dP_X[-1]
X = np.concatenate((
X0 - (X[0] - X0), # dead knot - included for extrapolation to -inf
X0,
X,
Xnp1,
Xnp1 + (Xnp1 - X[-1]), # dead knot - included for extrapolation to inf
))
P_X = np.concatenate((np.zeros(2), P_X, np.ones(1)))
dP_X = np.concatenate((np.zeros(2), dP_X, np.zeros(1)))
return X, P_X, dP_X, d2P_X
def cdf_approx(X): #, smoothness_factor=1):
"""
Generates a ppoly spline to approximate the cdf of a random variable,
from a 1-D array of i.i.d. samples thereof.
Args:
X: a collection of i.i.d. samples from a random variable.
args, kwargs: any options to forward to the cvxopt qp solver
Returns:
scipy.interpolate.PPoly object, estimating the cdf of the random variable.
Raises:
TODO
"""
# Pre-format input as ordered numpy array
X = np.asarray(X)
diff_X = np.diff(X)
if not (diff_X > 0).all():
X.sort()
diff_X = np.diff(X)
assert(diff_X.all()) # avoids case of duplicate X-values
n = len(X)
scale_axi, scale_ei = make_obj_scale(X)#, smoothness_factor)
P, q = make_P_q(X, scale_a=scale_axi, scale_e=scale_ei)
G, h = make_G_h(X)
#A, b = make_A_b(X) # simply unnecessary
bmid_c_init = bmid_c_init_state(X)
qp_res = cvxopt.solvers.qp(
cvxopt.matrix(P),
cvxopt.matrix(q),
cvxopt.matrix(G),
cvxopt.matrix(h),
#cvxopt.matrix(A),
#cvxopt.matrix(b),
#*args, **kwargs
)
X, P_X, dP_X, d2P_X = clean_optimizer_results(np.array(qp_res['x']), X)
return PPoly.construct_fast(np.stack((d2P_X, dP_X, P_X)), X, extrapolate=True)
if __name__ == "__main__":
from cdf_est_CVXOPT_tests import run_all_tests
run_all_tests() | 0.374791 | 0.539105 |
# pyright: reportMissingImports=false
# pylint: disable=import-error
import numpy as np
import tensorflow as tf
from ..utils.tensor import from_float32_to_uint8, from_uint8_to_float32
# pylint: enable=import-error
__email__ = "<EMAIL>"
__author__ = "Deezer Research"
__license__ = "MIT License"
def to_n_channels(waveform: tf.Tensor, n_channels: int) -> tf.Tensor:
"""
Convert a waveform to n_channels by removing or duplicating channels if
needed (in tensorflow).
Parameters:
waveform (tensorflow.Tensor):
Waveform to transform.
n_channels (int):
Number of channel to reshape waveform in.
Returns:
tensorflow.Tensor:
Reshaped waveform.
"""
return tf.cond(
tf.shape(waveform)[1] >= n_channels,
true_fn=lambda: waveform[:, :n_channels],
false_fn=lambda: tf.tile(waveform, [1, n_channels])[:, :n_channels],
)
def to_stereo(waveform: np.ndarray) -> np.ndarray:
"""
Convert a waveform to stereo by duplicating if mono, or truncating
if too many channels.
Parameters:
waveform (numpy.ndarray):
a `(N, d)` numpy array.
Returns:
numpy.ndarray:
A stereo waveform as a `(N, 1)` numpy array.
"""
if waveform.shape[1] == 1:
return np.repeat(waveform, 2, axis=-1)
if waveform.shape[1] > 2:
return waveform[:, :2]
return waveform
def gain_to_db(tensor: tf.Tensor, espilon: float = 10e-10) -> tf.Tensor:
"""
Convert from gain to decibel in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
epsilon (float):
Operation constant.
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return 20.0 / np.log(10) * tf.math.log(tf.maximum(tensor, espilon))
def db_to_gain(tensor: tf.Tensor) -> tf.Tensor:
"""
Convert from decibel to gain in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return tf.pow(10.0, (tensor / 20.0))
def spectrogram_to_db_uint(
spectrogram: tf.Tensor, db_range: float = 100.0, **kwargs
) -> tf.Tensor:
"""
Encodes given spectrogram into uint8 using decibel scale.
Parameters:
spectrogram (tensorflow.Tensor):
Spectrogram to be encoded as TF float tensor.
db_range (float):
Range in decibel for encoding.
Returns:
tensorflow.Tensor:
Encoded decibel spectrogram as `uint8` tensor.
"""
db_spectrogram: tf.Tensor = gain_to_db(spectrogram)
max_db_spectrogram: tf.Tensor = tf.reduce_max(db_spectrogram)
db_spectrogram: tf.Tensor = tf.maximum(
db_spectrogram, max_db_spectrogram - db_range
)
return from_float32_to_uint8(db_spectrogram, **kwargs)
def db_uint_spectrogram_to_gain(
db_uint_spectrogram: tf.Tensor, min_db: tf.Tensor, max_db: tf.Tensor
) -> tf.Tensor:
"""
Decode spectrogram from uint8 decibel scale.
Paramters:
db_uint_spectrogram (tensorflow.Tensor):
Decibel spectrogram to decode.
min_db (tensorflow.Tensor):
Lower bound limit for decoding.
max_db (tensorflow.Tensor):
Upper bound limit for decoding.
Returns:
tensorflow.Tensor:
Decoded spectrogram as `float32` tensor.
"""
db_spectrogram: tf.Tensor = from_uint8_to_float32(
db_uint_spectrogram, min_db, max_db
)
return db_to_gain(db_spectrogram) | spleeter/audio/convertor.py | # pyright: reportMissingImports=false
# pylint: disable=import-error
import numpy as np
import tensorflow as tf
from ..utils.tensor import from_float32_to_uint8, from_uint8_to_float32
# pylint: enable=import-error
__email__ = "<EMAIL>"
__author__ = "Deezer Research"
__license__ = "MIT License"
def to_n_channels(waveform: tf.Tensor, n_channels: int) -> tf.Tensor:
"""
Convert a waveform to n_channels by removing or duplicating channels if
needed (in tensorflow).
Parameters:
waveform (tensorflow.Tensor):
Waveform to transform.
n_channels (int):
Number of channel to reshape waveform in.
Returns:
tensorflow.Tensor:
Reshaped waveform.
"""
return tf.cond(
tf.shape(waveform)[1] >= n_channels,
true_fn=lambda: waveform[:, :n_channels],
false_fn=lambda: tf.tile(waveform, [1, n_channels])[:, :n_channels],
)
def to_stereo(waveform: np.ndarray) -> np.ndarray:
"""
Convert a waveform to stereo by duplicating if mono, or truncating
if too many channels.
Parameters:
waveform (numpy.ndarray):
a `(N, d)` numpy array.
Returns:
numpy.ndarray:
A stereo waveform as a `(N, 1)` numpy array.
"""
if waveform.shape[1] == 1:
return np.repeat(waveform, 2, axis=-1)
if waveform.shape[1] > 2:
return waveform[:, :2]
return waveform
def gain_to_db(tensor: tf.Tensor, espilon: float = 10e-10) -> tf.Tensor:
"""
Convert from gain to decibel in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
epsilon (float):
Operation constant.
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return 20.0 / np.log(10) * tf.math.log(tf.maximum(tensor, espilon))
def db_to_gain(tensor: tf.Tensor) -> tf.Tensor:
"""
Convert from decibel to gain in tensorflow.
Parameters:
tensor (tensorflow.Tensor):
Tensor to convert
Returns:
tensorflow.Tensor:
Converted tensor.
"""
return tf.pow(10.0, (tensor / 20.0))
def spectrogram_to_db_uint(
spectrogram: tf.Tensor, db_range: float = 100.0, **kwargs
) -> tf.Tensor:
"""
Encodes given spectrogram into uint8 using decibel scale.
Parameters:
spectrogram (tensorflow.Tensor):
Spectrogram to be encoded as TF float tensor.
db_range (float):
Range in decibel for encoding.
Returns:
tensorflow.Tensor:
Encoded decibel spectrogram as `uint8` tensor.
"""
db_spectrogram: tf.Tensor = gain_to_db(spectrogram)
max_db_spectrogram: tf.Tensor = tf.reduce_max(db_spectrogram)
db_spectrogram: tf.Tensor = tf.maximum(
db_spectrogram, max_db_spectrogram - db_range
)
return from_float32_to_uint8(db_spectrogram, **kwargs)
def db_uint_spectrogram_to_gain(
db_uint_spectrogram: tf.Tensor, min_db: tf.Tensor, max_db: tf.Tensor
) -> tf.Tensor:
"""
Decode spectrogram from uint8 decibel scale.
Paramters:
db_uint_spectrogram (tensorflow.Tensor):
Decibel spectrogram to decode.
min_db (tensorflow.Tensor):
Lower bound limit for decoding.
max_db (tensorflow.Tensor):
Upper bound limit for decoding.
Returns:
tensorflow.Tensor:
Decoded spectrogram as `float32` tensor.
"""
db_spectrogram: tf.Tensor = from_uint8_to_float32(
db_uint_spectrogram, min_db, max_db
)
return db_to_gain(db_spectrogram) | 0.941808 | 0.52275 |
from intern.remote import Remote
from intern.resource.local.resource import *
from intern.service.local.metadata import MetadataService
import os.path
LATEST_VERSION = 'v0'
CONFIG_HOST = "host"
CONFIG_DATASTORE = "datastore"
filePath = ""
datastore = ""
class LocalRemote(Remote):
def __init__(self, specs, version=None):
"""
Constructor:
Checks for latest version. If no version is given, assigns version as none
Protocol and host specifications are taken in as keys -values of dictionary.
global hos and datastore values are assigned.
If the data store initialized does not exist the remote will create a new one.
"""
if version is None:
version = LATEST_VERSION
host = specs[CONFIG_HOST]
datastore = specs[CONFIG_DATASTORE]
global filePath
filePath = str(host)
global datastore
if os.path.isfile(filePath + datastore + ".hdf5") == True:
datastore = h5py.File(filePath + datastore + ".hdf5")
else:
datastore = LocalResource.create_LocalHDF5(filePath,datastore)
print("Your data store did not exist, so we created one.")
def get_cutout(self, channelRes, res, xspan, yspan, zspan):
"""
Method to request a volume of data from local server
Args:
channelRes (string) : hiererchal path of where the data is located
res (int) : data resolution
xspan (int) : range of pixels in x axis ([1000:1500])
yspan (int) : range of pixels in y axis ([1000:1500])
zspan (int) : range of pixels in z axis ([1000:1010])
Returns:
array: numpy array representation of the requested volume
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.get_cutout(datastore, channelRes, res, xspan, yspan, zspan)
def get_channel(self, collection,channel,experiment=''):
"""
Method to reques specific collection/channel/experiment where the data is located
Args:
collection (string) : name of collection
channel (string) : name of channel
experiment (string) : name of experiement (actual dataset)
Returns:
channelSource (string) : amalgamation of all three parameters into a single path string
Raises:
(KeyError): if given invalid version
"""
return LocalResource.get_channel(collection,channel,experiment)
def create_collection(self, groupName):
"""
Method to create a group space within local HDF5 datastore (collection)
Args:
groupName (string) : Desired name of the group which will be categorized 'collection'
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_collection(datastore, groupName)
def create_channel(self, groupName, subGroup):
"""
Method to create a sub-group space within local HDF5 datastore (channel)
Args:
groupName (string) : name of the group (collection) this sub-group (channel) will be created in
subGroup (string) : Desired name of the sub-group which will be categorized as the channel
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_channel(groupName, subGroup)
def create_project(self, chan_setup):
"""
Method to request specific collection/channel/experiment where the data is located
Args:
collection (string) : name of collection
channel (string) : name of channel
experiment (string) : name of experiement (actual dataset)
Returns:
channelSource (string) : amalgamation of all three parameters into a single path string
Raises:
(KeyError): if given invalid version
"""
return LocalResource.create_project(datastore,chan_setup)
def create_cutout(self, subGroup, arrayName, dataArray):
"""
Method to create a dataset within local HDF5 datastore
Args:
subGroup (string) : name of the channel (sub-group) in which the data will be saved
arrayName (string) : name of the data
dataArray (array) : N-Dimensional array which is to be saved
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_cutout(subGroup, arrayName, dataArray)
def retrieve(self, path):
"""
Method to retrieve a specific file. Aimed at developer for quick file access
Args:
path (string): desired path to the HDF5 group created
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.retrieve(datastore,path)
def list(self):
"""
Method to retrieve a tree of hirerchy within datastore.
Returns:
printname (string) : list of all possible collections, channels and experiments
created in the current datastore
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.list(datastore)
def create_metadata(self, resource, keys_vals):
"""
Create the given key-value pairs for the given resource.
Will attempt to create all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): List keys associated with this resource.
keys_vals (dictionary): The metadata to associate with the resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.create(resource,keys_vals)
def get_metadata(self, resource, keys):
"""
Get metadata key-value pairs associated with the given resource.
Args:
resource (intern.resource.boss.BossResource): Get key-value pairs associated with this resource.
keys (list): Keys to retrieve.
Returns:
(dictionary): The requested metadata for the given resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.get(resource,keys)
def update_metadata(self, resource, keys_vals):
"""
Update the given key-value pairs for the given resource.
Keys must already exist before they may be updated. Will attempt to
update all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): Update values associated with this resource.
keys_vals (dictionary): The metadata to update for the resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.update(resource,keys_vals)
def delete_metadata(self, resource, keys):
"""
Delete metadata key-value pairs associated with the given resource.
Will attempt to delete all given key-value pairs even if a failure
occurs.
Args:
resource (intern.resource.boss.BossResource): Delete key-value pairs associated with this resource.
keys (list): Keys to delete.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.delete(resource,keys)
@classmethod
def list_metadata(self,resource):
"""
Method to retrieve a tree of hirerchy within datastore.
Args:
resource (string): name of the resource of which metadata attributes will be listed
Returns:
printname (string) : list of all possible collections, channels and experiments
created in the current datastore
Raises:
(KeyError): if given invalid version.
"""
return MetadataService.list(resource) | intern/remote/local/remote.py | from intern.remote import Remote
from intern.resource.local.resource import *
from intern.service.local.metadata import MetadataService
import os.path
LATEST_VERSION = 'v0'
CONFIG_HOST = "host"
CONFIG_DATASTORE = "datastore"
filePath = ""
datastore = ""
class LocalRemote(Remote):
def __init__(self, specs, version=None):
"""
Constructor:
Checks for latest version. If no version is given, assigns version as none
Protocol and host specifications are taken in as keys -values of dictionary.
global hos and datastore values are assigned.
If the data store initialized does not exist the remote will create a new one.
"""
if version is None:
version = LATEST_VERSION
host = specs[CONFIG_HOST]
datastore = specs[CONFIG_DATASTORE]
global filePath
filePath = str(host)
global datastore
if os.path.isfile(filePath + datastore + ".hdf5") == True:
datastore = h5py.File(filePath + datastore + ".hdf5")
else:
datastore = LocalResource.create_LocalHDF5(filePath,datastore)
print("Your data store did not exist, so we created one.")
def get_cutout(self, channelRes, res, xspan, yspan, zspan):
"""
Method to request a volume of data from local server
Args:
channelRes (string) : hiererchal path of where the data is located
res (int) : data resolution
xspan (int) : range of pixels in x axis ([1000:1500])
yspan (int) : range of pixels in y axis ([1000:1500])
zspan (int) : range of pixels in z axis ([1000:1010])
Returns:
array: numpy array representation of the requested volume
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.get_cutout(datastore, channelRes, res, xspan, yspan, zspan)
def get_channel(self, collection,channel,experiment=''):
"""
Method to reques specific collection/channel/experiment where the data is located
Args:
collection (string) : name of collection
channel (string) : name of channel
experiment (string) : name of experiement (actual dataset)
Returns:
channelSource (string) : amalgamation of all three parameters into a single path string
Raises:
(KeyError): if given invalid version
"""
return LocalResource.get_channel(collection,channel,experiment)
def create_collection(self, groupName):
"""
Method to create a group space within local HDF5 datastore (collection)
Args:
groupName (string) : Desired name of the group which will be categorized 'collection'
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_collection(datastore, groupName)
def create_channel(self, groupName, subGroup):
"""
Method to create a sub-group space within local HDF5 datastore (channel)
Args:
groupName (string) : name of the group (collection) this sub-group (channel) will be created in
subGroup (string) : Desired name of the sub-group which will be categorized as the channel
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_channel(groupName, subGroup)
def create_project(self, chan_setup):
"""
Method to request specific collection/channel/experiment where the data is located
Args:
collection (string) : name of collection
channel (string) : name of channel
experiment (string) : name of experiement (actual dataset)
Returns:
channelSource (string) : amalgamation of all three parameters into a single path string
Raises:
(KeyError): if given invalid version
"""
return LocalResource.create_project(datastore,chan_setup)
def create_cutout(self, subGroup, arrayName, dataArray):
"""
Method to create a dataset within local HDF5 datastore
Args:
subGroup (string) : name of the channel (sub-group) in which the data will be saved
arrayName (string) : name of the data
dataArray (array) : N-Dimensional array which is to be saved
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.create_cutout(subGroup, arrayName, dataArray)
def retrieve(self, path):
"""
Method to retrieve a specific file. Aimed at developer for quick file access
Args:
path (string): desired path to the HDF5 group created
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.retrieve(datastore,path)
def list(self):
"""
Method to retrieve a tree of hirerchy within datastore.
Returns:
printname (string) : list of all possible collections, channels and experiments
created in the current datastore
Raises:
(KeyError): if given invalid version.
"""
return LocalResource.list(datastore)
def create_metadata(self, resource, keys_vals):
"""
Create the given key-value pairs for the given resource.
Will attempt to create all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): List keys associated with this resource.
keys_vals (dictionary): The metadata to associate with the resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.create(resource,keys_vals)
def get_metadata(self, resource, keys):
"""
Get metadata key-value pairs associated with the given resource.
Args:
resource (intern.resource.boss.BossResource): Get key-value pairs associated with this resource.
keys (list): Keys to retrieve.
Returns:
(dictionary): The requested metadata for the given resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.get(resource,keys)
def update_metadata(self, resource, keys_vals):
"""
Update the given key-value pairs for the given resource.
Keys must already exist before they may be updated. Will attempt to
update all key-value pairs even if a failure is encountered.
Args:
resource (intern.resource.boss.BossResource): Update values associated with this resource.
keys_vals (dictionary): The metadata to update for the resource.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.update(resource,keys_vals)
def delete_metadata(self, resource, keys):
"""
Delete metadata key-value pairs associated with the given resource.
Will attempt to delete all given key-value pairs even if a failure
occurs.
Args:
resource (intern.resource.boss.BossResource): Delete key-value pairs associated with this resource.
keys (list): Keys to delete.
Raises:
HTTPErrorList on failure.
"""
return MetadataService.delete(resource,keys)
@classmethod
def list_metadata(self,resource):
"""
Method to retrieve a tree of hirerchy within datastore.
Args:
resource (string): name of the resource of which metadata attributes will be listed
Returns:
printname (string) : list of all possible collections, channels and experiments
created in the current datastore
Raises:
(KeyError): if given invalid version.
"""
return MetadataService.list(resource) | 0.765681 | 0.319201 |
from flask import current_app as app
from wtforms import StringField, SubmitField
from flask_wtf import FlaskForm
from wtforms.validators import ValidationError, DataRequired
# CONTAINS CLASS:
# Paper
# Authors
# Abstract
class Paper:
def __init__(self, pid, title, year, conference):
self.pid = pid
self.title = title
self.year = year
self.conference = conference
@staticmethod
def get_paper_for_one_page(pagesize, pagenum):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
ORDER BY pid
LIMIT :pagesize OFFSET :offset
''', pagesize=pagesize, offset=pagenum*pagesize)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_title(title):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE title = :title
ORDER BY pid
''', title=title)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year(year):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE year = :year
ORDER BY pid
''', year=year)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_author(author):
rows = app.db.execute(
'''
SELECT papers.pid, title, year, conference
FROM papers
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t1
ON papers.pid = t1.pid
ORDER BY papers.pid
''', author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_conference(conference):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE conference = :conference
ORDER BY pid
''', conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_author_conf(year, author, conference):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE year = :year AND conference = :conference
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', year=year, author=author, conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_author(year, author):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE year = :year
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', year=year, author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_author_conf(author, conference):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE conference = :conference
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', conference=conference, author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_conf(year, conference):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE year = :year AND conference = :conference
ORDER BY pid
''', year=year, conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_pid(pid):
# (pid<=maximum pid) is checked before executing
rows = app.db.execute(
'''
SELECT *
FROM papers
WHERE pid = :pid
''', pid=pid)
return [Paper(*rows[0])]
@staticmethod
def get_by_title(title_input,pagenum):
pagenum = int(pagenum)
sql_str = "SELECT papers.pid, title, year, conference FROM papers WHERE title LIKE '%"+title_input+"%' ORDER BY pid"
rows = app.db.execute(sql_str)
total_num = len(rows)
if (pagenum+1)*10>total_num:
rows = rows[(pagenum*10):-1]
else:
rows = rows[(pagenum*10):((pagenum+1)*10)]
return [Paper(*row) for row in rows], total_num
@staticmethod
def get_citing_papers_by_pid(pid):
rows = app.db.execute(
'''
SELECT papers.pid, title, year, conference
FROM papers
WHERE pid IN (SELECT cite_pid
FROM citation
WHERE pid = :pid)
ORDER BY pid
''',pid=pid)
return [Paper(*row) for row in rows]
class Statistic:
def __init__(self, category, count):
self.category = category
self.count = count
@staticmethod
def get_year_statistic():
rows = app.db.execute(
'''
SELECT year, count(*) as cnt
FROM papers
GROUP BY year
ORDER BY cnt DESC
LIMIT 5
''')
return [Statistic(*row) for row in rows]
@staticmethod
def get_author_statistic():
rows = app.db.execute(
'''
SELECT a.author , count(*) as cnt
FROM papers as p, authorship as a
WHERE p.pid = a.pid and a.author!='Staff'
GROUP BY a. author
ORDER BY cnt DESC
LIMIT 5
''')
return [Statistic(*row) for row in rows]
class Authors:
def __init__(self, pid, author):
self.pid = pid
self.author = author
@staticmethod
def get_by_pid(pid):
rows = app.db.execute(
'''
SELECT pid, author
FROM authorship
WHERE pid = :pid
''', pid=pid)
return [Authors(*row) for row in rows]
def get_pid_by_author(author):
rows = app.db.execute(
'''
SELECT pid, author
FROM authorship
WHERE author =:author
''', author=author
)
return [Authors(*row) for row in rows]
class Abstract:
def __init__(self, pid, abstract):
self.pid = pid
self.abstract = abstract
@staticmethod
def get_by_pid(pid):
rows = app.db.execute(
'''
SELECT pid, abstract
FROM abstract
WHERE pid = :pid
''', pid=pid)
if len(rows) == 0:
return None
return Abstract(*(rows[0])) | app/models/paper.py | from flask import current_app as app
from wtforms import StringField, SubmitField
from flask_wtf import FlaskForm
from wtforms.validators import ValidationError, DataRequired
# CONTAINS CLASS:
# Paper
# Authors
# Abstract
class Paper:
def __init__(self, pid, title, year, conference):
self.pid = pid
self.title = title
self.year = year
self.conference = conference
@staticmethod
def get_paper_for_one_page(pagesize, pagenum):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
ORDER BY pid
LIMIT :pagesize OFFSET :offset
''', pagesize=pagesize, offset=pagenum*pagesize)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_title(title):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE title = :title
ORDER BY pid
''', title=title)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year(year):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE year = :year
ORDER BY pid
''', year=year)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_author(author):
rows = app.db.execute(
'''
SELECT papers.pid, title, year, conference
FROM papers
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t1
ON papers.pid = t1.pid
ORDER BY papers.pid
''', author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_conference(conference):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE conference = :conference
ORDER BY pid
''', conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_author_conf(year, author, conference):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE year = :year AND conference = :conference
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', year=year, author=author, conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_author(year, author):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE year = :year
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', year=year, author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_author_conf(author, conference):
rows = app.db.execute(
'''
SELECT t1.pid, title, year, conference
FROM
(
SELECT pid, title, year, conference
FROM papers
WHERE conference = :conference
) AS t1
INNER JOIN
(
SELECT pid, author
FROM authorship
WHERE author = :author
) AS t2
ON t1.pid = t2.pid
ORDER BY t1.pid
''', conference=conference, author=author)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_year_conf(year, conference):
rows = app.db.execute(
'''
SELECT pid, title, year, conference
FROM papers
WHERE year = :year AND conference = :conference
ORDER BY pid
''', year=year, conference=conference)
return [Paper(*row) for row in rows]
@staticmethod
def get_by_pid(pid):
# (pid<=maximum pid) is checked before executing
rows = app.db.execute(
'''
SELECT *
FROM papers
WHERE pid = :pid
''', pid=pid)
return [Paper(*rows[0])]
@staticmethod
def get_by_title(title_input,pagenum):
pagenum = int(pagenum)
sql_str = "SELECT papers.pid, title, year, conference FROM papers WHERE title LIKE '%"+title_input+"%' ORDER BY pid"
rows = app.db.execute(sql_str)
total_num = len(rows)
if (pagenum+1)*10>total_num:
rows = rows[(pagenum*10):-1]
else:
rows = rows[(pagenum*10):((pagenum+1)*10)]
return [Paper(*row) for row in rows], total_num
@staticmethod
def get_citing_papers_by_pid(pid):
rows = app.db.execute(
'''
SELECT papers.pid, title, year, conference
FROM papers
WHERE pid IN (SELECT cite_pid
FROM citation
WHERE pid = :pid)
ORDER BY pid
''',pid=pid)
return [Paper(*row) for row in rows]
class Statistic:
def __init__(self, category, count):
self.category = category
self.count = count
@staticmethod
def get_year_statistic():
rows = app.db.execute(
'''
SELECT year, count(*) as cnt
FROM papers
GROUP BY year
ORDER BY cnt DESC
LIMIT 5
''')
return [Statistic(*row) for row in rows]
@staticmethod
def get_author_statistic():
rows = app.db.execute(
'''
SELECT a.author , count(*) as cnt
FROM papers as p, authorship as a
WHERE p.pid = a.pid and a.author!='Staff'
GROUP BY a. author
ORDER BY cnt DESC
LIMIT 5
''')
return [Statistic(*row) for row in rows]
class Authors:
def __init__(self, pid, author):
self.pid = pid
self.author = author
@staticmethod
def get_by_pid(pid):
rows = app.db.execute(
'''
SELECT pid, author
FROM authorship
WHERE pid = :pid
''', pid=pid)
return [Authors(*row) for row in rows]
def get_pid_by_author(author):
rows = app.db.execute(
'''
SELECT pid, author
FROM authorship
WHERE author =:author
''', author=author
)
return [Authors(*row) for row in rows]
class Abstract:
def __init__(self, pid, abstract):
self.pid = pid
self.abstract = abstract
@staticmethod
def get_by_pid(pid):
rows = app.db.execute(
'''
SELECT pid, abstract
FROM abstract
WHERE pid = :pid
''', pid=pid)
if len(rows) == 0:
return None
return Abstract(*(rows[0])) | 0.559771 | 0.133868 |
import pathlib
from ted_sws import config
from ted_sws.mapping_suite_processor.adapters.allegro_triple_store import AllegroGraphTripleStore
def repository_exists(triple_store: AllegroGraphTripleStore, repository_name) -> bool:
"""
Method to check if the repository is in the triple store
:param triple_store:
:param repository_name:
:return:
"""
return True if repository_name in triple_store.list_repositories() else False
def load_mapping_suite_output_into_triple_store(package_folder_path, allegro_host=config.ALLEGRO_HOST,
allegro_user=config.AGRAPH_SUPER_USER,
allegro_password=config.<PASSWORD>,
allegro_catalog_name: str = None):
"""
Method to create a repository in the triple store and load all ttl files from the output folder of a mapping suite
package. Name of the repository will be auto-generated from the folder name.
:param package_folder_path:
:param allegro_host:
:param allegro_user:
:param allegro_password:
:param allegro_catalog_name:
:return:
"""
package_folder_path = pathlib.Path(package_folder_path)
metadata_file = package_folder_path / "metadata.json"
assert metadata_file.exists()
package_name = package_folder_path.stem
ttl_files_paths = [str(path) for path in package_folder_path.glob("output/**/*.ttl")]
triple_store = AllegroGraphTripleStore(host=allegro_host, password=<PASSWORD>,
user=allegro_user, catalog_name=allegro_catalog_name)
if repository_exists(triple_store=triple_store, repository_name=package_name):
triple_store.delete_repository(repository_name=package_name)
triple_store.create_repository(repository_name=package_name)
for ttl_file_path in ttl_files_paths:
triple_store.add_file_to_repository(file_path=ttl_file_path, repository_name=package_name) | ted_sws/mapping_suite_processor/services/load_mapping_suite_output_into_triple_store.py | import pathlib
from ted_sws import config
from ted_sws.mapping_suite_processor.adapters.allegro_triple_store import AllegroGraphTripleStore
def repository_exists(triple_store: AllegroGraphTripleStore, repository_name) -> bool:
"""
Method to check if the repository is in the triple store
:param triple_store:
:param repository_name:
:return:
"""
return True if repository_name in triple_store.list_repositories() else False
def load_mapping_suite_output_into_triple_store(package_folder_path, allegro_host=config.ALLEGRO_HOST,
allegro_user=config.AGRAPH_SUPER_USER,
allegro_password=config.<PASSWORD>,
allegro_catalog_name: str = None):
"""
Method to create a repository in the triple store and load all ttl files from the output folder of a mapping suite
package. Name of the repository will be auto-generated from the folder name.
:param package_folder_path:
:param allegro_host:
:param allegro_user:
:param allegro_password:
:param allegro_catalog_name:
:return:
"""
package_folder_path = pathlib.Path(package_folder_path)
metadata_file = package_folder_path / "metadata.json"
assert metadata_file.exists()
package_name = package_folder_path.stem
ttl_files_paths = [str(path) for path in package_folder_path.glob("output/**/*.ttl")]
triple_store = AllegroGraphTripleStore(host=allegro_host, password=<PASSWORD>,
user=allegro_user, catalog_name=allegro_catalog_name)
if repository_exists(triple_store=triple_store, repository_name=package_name):
triple_store.delete_repository(repository_name=package_name)
triple_store.create_repository(repository_name=package_name)
for ttl_file_path in ttl_files_paths:
triple_store.add_file_to_repository(file_path=ttl_file_path, repository_name=package_name) | 0.473414 | 0.160858 |
from sqlalchemy.exc import SQLAlchemyError
from setup import Category, Item
import bleach
def getCategories(session):
"""
Retrieve all categories.
:param session: (DBSession) SQLAlchemy session
:return:
List of Category objects.
"""
try:
categories = (session.query(Category)
.order_by(Category.name)
.all())
except SQLAlchemyError:
return False
return categories
def getItems(session):
"""
Retrieve all items.
:param session: (DBSession) SQLAlchemy session
:return:
List of Item objects from the greatest to lowest id
"""
try:
items = (session.query(Item)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getCategory(category_name, session):
"""
Retrieve a category based on category name
:param category_name: (string)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
category = (session.query(Category)
.filter_by(name=bleach.clean(category_name))
.one())
except SQLAlchemyError:
return False
return category
def getCategoryItems(category_id, session):
"""
Retrieve a category's items based on category id.
:param category_id: (integer)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
items = (session.query(Item)
.filter_by(category_id=category_id)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getItem(category_id, item_name, session):
"""
Retrieve item based on category id and item name.
:param category_id: (integer) Category.id
:param item_name: (string) Item.name
:param session: (DBSession) SQLAlchemy session
:return:
Item object.
"""
try:
item = (session.query(Item)
.filter_by(category_id=category_id,
name=bleach.clean(item_name.lower()))
.one())
except SQLAlchemyError:
return False
return item | queryhelpers.py | from sqlalchemy.exc import SQLAlchemyError
from setup import Category, Item
import bleach
def getCategories(session):
"""
Retrieve all categories.
:param session: (DBSession) SQLAlchemy session
:return:
List of Category objects.
"""
try:
categories = (session.query(Category)
.order_by(Category.name)
.all())
except SQLAlchemyError:
return False
return categories
def getItems(session):
"""
Retrieve all items.
:param session: (DBSession) SQLAlchemy session
:return:
List of Item objects from the greatest to lowest id
"""
try:
items = (session.query(Item)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getCategory(category_name, session):
"""
Retrieve a category based on category name
:param category_name: (string)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
category = (session.query(Category)
.filter_by(name=bleach.clean(category_name))
.one())
except SQLAlchemyError:
return False
return category
def getCategoryItems(category_id, session):
"""
Retrieve a category's items based on category id.
:param category_id: (integer)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
items = (session.query(Item)
.filter_by(category_id=category_id)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getItem(category_id, item_name, session):
"""
Retrieve item based on category id and item name.
:param category_id: (integer) Category.id
:param item_name: (string) Item.name
:param session: (DBSession) SQLAlchemy session
:return:
Item object.
"""
try:
item = (session.query(Item)
.filter_by(category_id=category_id,
name=bleach.clean(item_name.lower()))
.one())
except SQLAlchemyError:
return False
return item | 0.617628 | 0.258391 |
from django.db import models
from django.contrib.auth.models import User
""" customized, app specific imports """
from laptops101 import customValidators as v
class AssetTag(models.Model):
""" Maintains a list of Asset Tags for sole purpose of input validation across several tables """
ASSET_TAG = models.CharField(max_length=8, unique=True)
class EndUser(models.Model):
FIRST_NAME = models.CharField(max_length=100)
LAST_NAME = models.CharField(max_length=100)
TITLE = models.CharField(max_length=100)
EMAIL = models.EmailField(max_length=100, unique=True, blank=True)
DEPARTMENT = models.CharField(max_length=100)
def save(self, *args, **kwargs):
""" If e-mail is blank, will auto fill. """
if bool(self.EMAIL) == False:
self.EMAIL = self.FIRST_NAME + "." + self.LAST_NAME + '@<EMAIL>'
super(EndUser, self).save(*args, **kwargs)
class Meta:
unique_together = ["FIRST_NAME", "LAST_NAME"]
class Manufacturer(models.Model):
""" Stores names of Manufacturers and their associated website URLs """
MANUFACTURER = models.CharField(max_length=100, unique=True) # e.g. "Apple" or "Dell"
WEBSITE = models.CharField(max_length=100)
class ItemArchetype(models.Model):
""" Static Details about an asset that will not change from item to item
e.g. All Dell XPS 4300 will ahve the Same SKU and Manufacturer, but
different Serials, Asset Tags, users etc.
"""
SKU = models.CharField(max_length=100, unique=True)
COMMON_NAME = models.CharField(max_length=255) # e.g. "Dell XPS"
MANUFACTURER = models.ForeignKey(Manufacturer, on_delete=models.SET_NULL, null=True)
NOTES = models.CharField(max_length=255)
class TaggedItem(models.Model):
""" Information common to all Assets, but will change on each asset """
""" Is the base class for all other items. Will not create its own table """
PURCHASE_DATE = models.DateField(auto_now=False, auto_now_add=True)
PURCHASE_COST = models.DecimalField(max_digits=7, decimal_places=2)
ASSIGNED_USER = models.ForeignKey(EndUser, on_delete=models.SET_NULL, null=True)
ASSET_TAG = models.CharField(max_length=8, unique=True, validators=[v.tag_available])
def recordAsset(self, tag):
""" Each time a tagged item is saved, its tag is saved in another table.
This is done for purpose of input validation. """
assetTag = AssetTag(ASSET_TAG = self.ASSET_TAG)
assetTag.save()
class Meta:
abstract = True
class Laptop(TaggedItem):
""" Any Laptop Computer """
CPU_MODEL = models.CharField(max_length=128) #e.g. Intel i7
CPU_SPEED = models.DecimalField(max_digits=3, decimal_places=2) # Speed of CPU in GHZ
RAM = models.IntegerField() # RAM in GB
HDD = models.DecimalField(max_digits=4, decimal_places = 2) # HDD size in Terabytes
ARCHETYPE = models.ForeignKey(ItemArchetype, on_delete=models.CASCADE,)
NOTES = models.CharField(max_length=255, default="Notes can be added here.")
def save(self, *args, **kwargs):
""" Ivokes a method to copy the asset tag to the asset tag table """
self.recordAsset(self.ASSET_TAG)
super(Laptop, self).save(*args, **kwargs)
class Monitor(TaggedItem):
""" Any monitored deployed with a laptop """
HDMI = models.BooleanField(default=True)
VGA = models.BooleanField(default=False)
DISPLAY_PORT = models.BooleanField(default=True)
ARCHETYPE = models.ForeignKey(ItemArchetype, on_delete=models.CASCADE,)
NOTES = models.CharField(max_length=255, default="Notes can be added here.")
def save(self, *args, **kwargs):
""" Ivokes a method to copy the asset tag to the asset tag table """
self.recordAsset(self.ASSET_TAG)
super(Monitor, self).save(*args, **kwargs) | laptops101/models.py | from django.db import models
from django.contrib.auth.models import User
""" customized, app specific imports """
from laptops101 import customValidators as v
class AssetTag(models.Model):
""" Maintains a list of Asset Tags for sole purpose of input validation across several tables """
ASSET_TAG = models.CharField(max_length=8, unique=True)
class EndUser(models.Model):
FIRST_NAME = models.CharField(max_length=100)
LAST_NAME = models.CharField(max_length=100)
TITLE = models.CharField(max_length=100)
EMAIL = models.EmailField(max_length=100, unique=True, blank=True)
DEPARTMENT = models.CharField(max_length=100)
def save(self, *args, **kwargs):
""" If e-mail is blank, will auto fill. """
if bool(self.EMAIL) == False:
self.EMAIL = self.FIRST_NAME + "." + self.LAST_NAME + '@<EMAIL>'
super(EndUser, self).save(*args, **kwargs)
class Meta:
unique_together = ["FIRST_NAME", "LAST_NAME"]
class Manufacturer(models.Model):
""" Stores names of Manufacturers and their associated website URLs """
MANUFACTURER = models.CharField(max_length=100, unique=True) # e.g. "Apple" or "Dell"
WEBSITE = models.CharField(max_length=100)
class ItemArchetype(models.Model):
""" Static Details about an asset that will not change from item to item
e.g. All Dell XPS 4300 will ahve the Same SKU and Manufacturer, but
different Serials, Asset Tags, users etc.
"""
SKU = models.CharField(max_length=100, unique=True)
COMMON_NAME = models.CharField(max_length=255) # e.g. "Dell XPS"
MANUFACTURER = models.ForeignKey(Manufacturer, on_delete=models.SET_NULL, null=True)
NOTES = models.CharField(max_length=255)
class TaggedItem(models.Model):
""" Information common to all Assets, but will change on each asset """
""" Is the base class for all other items. Will not create its own table """
PURCHASE_DATE = models.DateField(auto_now=False, auto_now_add=True)
PURCHASE_COST = models.DecimalField(max_digits=7, decimal_places=2)
ASSIGNED_USER = models.ForeignKey(EndUser, on_delete=models.SET_NULL, null=True)
ASSET_TAG = models.CharField(max_length=8, unique=True, validators=[v.tag_available])
def recordAsset(self, tag):
""" Each time a tagged item is saved, its tag is saved in another table.
This is done for purpose of input validation. """
assetTag = AssetTag(ASSET_TAG = self.ASSET_TAG)
assetTag.save()
class Meta:
abstract = True
class Laptop(TaggedItem):
""" Any Laptop Computer """
CPU_MODEL = models.CharField(max_length=128) #e.g. Intel i7
CPU_SPEED = models.DecimalField(max_digits=3, decimal_places=2) # Speed of CPU in GHZ
RAM = models.IntegerField() # RAM in GB
HDD = models.DecimalField(max_digits=4, decimal_places = 2) # HDD size in Terabytes
ARCHETYPE = models.ForeignKey(ItemArchetype, on_delete=models.CASCADE,)
NOTES = models.CharField(max_length=255, default="Notes can be added here.")
def save(self, *args, **kwargs):
""" Ivokes a method to copy the asset tag to the asset tag table """
self.recordAsset(self.ASSET_TAG)
super(Laptop, self).save(*args, **kwargs)
class Monitor(TaggedItem):
""" Any monitored deployed with a laptop """
HDMI = models.BooleanField(default=True)
VGA = models.BooleanField(default=False)
DISPLAY_PORT = models.BooleanField(default=True)
ARCHETYPE = models.ForeignKey(ItemArchetype, on_delete=models.CASCADE,)
NOTES = models.CharField(max_length=255, default="Notes can be added here.")
def save(self, *args, **kwargs):
""" Ivokes a method to copy the asset tag to the asset tag table """
self.recordAsset(self.ASSET_TAG)
super(Monitor, self).save(*args, **kwargs) | 0.532668 | 0.139367 |
from pathlib import Path
import logging
class RecordingManager:
"""
Manages the recordings of a Vesper archive.
The main responsibility of a recording manager is to convert
recording file paths between relative and absolute forms. The
relative form of a recording file path, stored in the archive
database, is converted to its absolute form by appending it
to the absolute path of one of the archive's *recording
directories*. The absolute form is converted to the relative
form by removing the initial recording directory path.
It is common for there to be only one recording directory for
an archive, but in some cases (for example, if the recordings
of a large archive are located on more than one disk) an
archive may have more than one recording directory. When there
is more than one recording directory, care must be taken to
ensure that any relative recording file path exists in only one
recording directory. This can be accomplished by making
recording file names unique within an archive, a common
practice.
Parameters
----------
archive_dir_path: str or pathlib.Path object
The absolute path of the archive of this manager.
recording_dir_paths: sequence of str or pathlib.Path objects
Each of the specified paths must be the path of a recording
directory. Each path may be either absolute or relative:
if a path is relative, it is relative to the archive
directory. A specified recording directory does not have to
exist, though of course if it does not any recordings that
are supposed to be in it cannot be accessed. In order to
ensure proper operation for case-insensitive file systems,
no pair of recording directory paths can differ only by
alphabetic case, even for a case-sensitive file system.
Attributes
----------
archive_dir_path: pathlib.Path object
The absolute path of the archive of this manager.
recording_dir_paths: tuple of pathlib.Path objects
The recording directory paths of this manager.
Raises
------
ValueError:
if the specified archive directory path is not absolute.
"""
def __init__(self, archive_dir_path, recording_dir_paths):
self._archive_dir_path = _get_path_object(archive_dir_path)
self._check_archive_dir_path()
self._recording_dir_paths = \
self._get_recording_dir_paths(recording_dir_paths)
self._lowered_recording_dir_paths = \
tuple(Path(str(p).lower()) for p in self._recording_dir_paths)
self._absolute_file_path_cache = {}
def _check_archive_dir_path(self):
if not self.archive_dir_path.is_absolute():
raise ValueError(
('Archive directory path "{}" specified to recording '
'manager is not absolute.').format(self.archive_dir_path))
def _get_recording_dir_paths(self, paths):
# Get all recording dir paths as `Path` objects.
paths = [_get_path_object(p) for p in paths]
vetted_paths = []
lowered_to_original = {}
logger = logging.getLogger()
for path in paths:
if not path.is_absolute():
path = self.archive_dir_path / path
lowered_path = str(path).lower()
original = lowered_to_original.get(lowered_path)
if original is not None:
# have already seen this path or one that is identical to
# it except for alphabetic case
if original == path:
s = 'a duplicate'
else:
s = 'considered a duplicate of "{}"'.format(original)
logger.warning((
'Recording directory path "{}" specified to recording '
'manager is {} and will be ignored.').format(path, s))
else:
# have not already seen this path
vetted_paths.append(path)
lowered_to_original[lowered_path] = path
return tuple(vetted_paths)
@property
def archive_dir_path(self):
return self._archive_dir_path
@property
def recording_dir_paths(self):
return self._recording_dir_paths
def get_absolute_recording_file_path(self, relative_path):
"""
Gets the absolute version of a relative recording file path.
This method looks for the specified relative path within the
recording directories, and returns the absolute version of the
path if found. The recording directories are searched in order.
Parameters
----------
relative_path: str or pathlib.Path
The relative path for which to find the absolute path.
Returns
-------
pathlib.Path
The absolute version of the specified relative path.
Raises
------
ValueError:
if the specified path is absolute or does not exist in any
recording directory.
"""
# Ensure that path is a `Path` object.
path = _get_path_object(relative_path)
if path.is_absolute():
raise ValueError(
'Recording file path "{}" is already absolute.'.format(path))
else:
# `path` is relative
try:
return self._absolute_file_path_cache[path]
except KeyError:
for dir_path in self.recording_dir_paths:
abs_path = dir_path / path
if abs_path.exists():
self._absolute_file_path_cache[path] = abs_path
return abs_path
# If we get here, the specified path does not exist in
# any recording directory.
start = (
'Recording file path "{}" could not be made '
'absolute since ').format(path)
num_recording_dirs = len(self.recording_dir_paths)
if num_recording_dirs == 0:
end = 'there are no recording directories.'
elif num_recording_dirs == 1:
if not dir_path.exists():
end = (
'the recording directory "{}" could not be '
'found.').format(self.recording_dir_paths[0])
else:
end = (
'it is not in the recording directory '
'"{}".').format(self.recording_dir_paths[0])
else:
end = (
'it is not in any of the recording directories '
'{}.').format(self._create_recording_dirs_list())
raise ValueError(start + end)
def _create_recording_dirs_list(self):
return str([str(p) for p in self.recording_dir_paths])
def get_relative_recording_file_path(self, absolute_path):
"""
Gets the relative version of an absolute recording file path.
The path is made relative with respect to the first recording
directory whose path starts it, disregarding alphabetic case.
Note that the specified path does not have to exist: it only
has to start with a recording directory path.
Parameters
----------
absolute_path: str or pathlib.Path
The absolute path for which to find the relative path.
Returns
-------
recording_dir_path: pathlib.Path
The path of the first recording directory whose path starts
`absolute_path`.
rel_path: pathlib.Path
The path relative to `recording_dir_path` of `absolute_path`.
Raises
------
ValueError
If the specified path is relative, or does not start with
a recording directory path.
"""
# Ensure that path is a `Path` object.
path = _get_path_object(absolute_path)
if not path.is_absolute():
raise ValueError(
'Recording file path "{}" is already relative.'.format(path))
else:
# `path` is absolute
# Get lower-case version of path for comparison to
# lower-case versions of recording directory paths.
lowered_path = Path(str(path).lower())
for dir_path, lowered_dir_path in \
zip(self.recording_dir_paths,
self._lowered_recording_dir_paths):
try:
lowered_rel_path = \
lowered_path.relative_to(lowered_dir_path)
except ValueError:
continue
else:
num_parts = len(lowered_rel_path.parts)
parts = path.parts[-num_parts:]
rel_path = Path(*parts)
return dir_path, rel_path
# If we get here, the specified path is not inside any of the
# recording directories.
start = (
'Recording file path "{}" could not be made relative '
'since ').format(path)
num_recording_dirs = len(self.recording_dir_paths)
if num_recording_dirs == 0:
end = 'there are no recording directories.'
elif num_recording_dirs == 1:
end = 'it is not in the recording directory "{}".'.format(
self.recording_dir_paths[0])
else:
end = \
'it is not in any of the recording directories {}.'.format(
self._create_recording_dirs_list())
raise ValueError(start + end)
def _get_path_object(p):
return p if isinstance(p, Path) else Path(p) | vesper/util/recording_manager.py | from pathlib import Path
import logging
class RecordingManager:
"""
Manages the recordings of a Vesper archive.
The main responsibility of a recording manager is to convert
recording file paths between relative and absolute forms. The
relative form of a recording file path, stored in the archive
database, is converted to its absolute form by appending it
to the absolute path of one of the archive's *recording
directories*. The absolute form is converted to the relative
form by removing the initial recording directory path.
It is common for there to be only one recording directory for
an archive, but in some cases (for example, if the recordings
of a large archive are located on more than one disk) an
archive may have more than one recording directory. When there
is more than one recording directory, care must be taken to
ensure that any relative recording file path exists in only one
recording directory. This can be accomplished by making
recording file names unique within an archive, a common
practice.
Parameters
----------
archive_dir_path: str or pathlib.Path object
The absolute path of the archive of this manager.
recording_dir_paths: sequence of str or pathlib.Path objects
Each of the specified paths must be the path of a recording
directory. Each path may be either absolute or relative:
if a path is relative, it is relative to the archive
directory. A specified recording directory does not have to
exist, though of course if it does not any recordings that
are supposed to be in it cannot be accessed. In order to
ensure proper operation for case-insensitive file systems,
no pair of recording directory paths can differ only by
alphabetic case, even for a case-sensitive file system.
Attributes
----------
archive_dir_path: pathlib.Path object
The absolute path of the archive of this manager.
recording_dir_paths: tuple of pathlib.Path objects
The recording directory paths of this manager.
Raises
------
ValueError:
if the specified archive directory path is not absolute.
"""
def __init__(self, archive_dir_path, recording_dir_paths):
self._archive_dir_path = _get_path_object(archive_dir_path)
self._check_archive_dir_path()
self._recording_dir_paths = \
self._get_recording_dir_paths(recording_dir_paths)
self._lowered_recording_dir_paths = \
tuple(Path(str(p).lower()) for p in self._recording_dir_paths)
self._absolute_file_path_cache = {}
def _check_archive_dir_path(self):
if not self.archive_dir_path.is_absolute():
raise ValueError(
('Archive directory path "{}" specified to recording '
'manager is not absolute.').format(self.archive_dir_path))
def _get_recording_dir_paths(self, paths):
# Get all recording dir paths as `Path` objects.
paths = [_get_path_object(p) for p in paths]
vetted_paths = []
lowered_to_original = {}
logger = logging.getLogger()
for path in paths:
if not path.is_absolute():
path = self.archive_dir_path / path
lowered_path = str(path).lower()
original = lowered_to_original.get(lowered_path)
if original is not None:
# have already seen this path or one that is identical to
# it except for alphabetic case
if original == path:
s = 'a duplicate'
else:
s = 'considered a duplicate of "{}"'.format(original)
logger.warning((
'Recording directory path "{}" specified to recording '
'manager is {} and will be ignored.').format(path, s))
else:
# have not already seen this path
vetted_paths.append(path)
lowered_to_original[lowered_path] = path
return tuple(vetted_paths)
@property
def archive_dir_path(self):
return self._archive_dir_path
@property
def recording_dir_paths(self):
return self._recording_dir_paths
def get_absolute_recording_file_path(self, relative_path):
"""
Gets the absolute version of a relative recording file path.
This method looks for the specified relative path within the
recording directories, and returns the absolute version of the
path if found. The recording directories are searched in order.
Parameters
----------
relative_path: str or pathlib.Path
The relative path for which to find the absolute path.
Returns
-------
pathlib.Path
The absolute version of the specified relative path.
Raises
------
ValueError:
if the specified path is absolute or does not exist in any
recording directory.
"""
# Ensure that path is a `Path` object.
path = _get_path_object(relative_path)
if path.is_absolute():
raise ValueError(
'Recording file path "{}" is already absolute.'.format(path))
else:
# `path` is relative
try:
return self._absolute_file_path_cache[path]
except KeyError:
for dir_path in self.recording_dir_paths:
abs_path = dir_path / path
if abs_path.exists():
self._absolute_file_path_cache[path] = abs_path
return abs_path
# If we get here, the specified path does not exist in
# any recording directory.
start = (
'Recording file path "{}" could not be made '
'absolute since ').format(path)
num_recording_dirs = len(self.recording_dir_paths)
if num_recording_dirs == 0:
end = 'there are no recording directories.'
elif num_recording_dirs == 1:
if not dir_path.exists():
end = (
'the recording directory "{}" could not be '
'found.').format(self.recording_dir_paths[0])
else:
end = (
'it is not in the recording directory '
'"{}".').format(self.recording_dir_paths[0])
else:
end = (
'it is not in any of the recording directories '
'{}.').format(self._create_recording_dirs_list())
raise ValueError(start + end)
def _create_recording_dirs_list(self):
return str([str(p) for p in self.recording_dir_paths])
def get_relative_recording_file_path(self, absolute_path):
"""
Gets the relative version of an absolute recording file path.
The path is made relative with respect to the first recording
directory whose path starts it, disregarding alphabetic case.
Note that the specified path does not have to exist: it only
has to start with a recording directory path.
Parameters
----------
absolute_path: str or pathlib.Path
The absolute path for which to find the relative path.
Returns
-------
recording_dir_path: pathlib.Path
The path of the first recording directory whose path starts
`absolute_path`.
rel_path: pathlib.Path
The path relative to `recording_dir_path` of `absolute_path`.
Raises
------
ValueError
If the specified path is relative, or does not start with
a recording directory path.
"""
# Ensure that path is a `Path` object.
path = _get_path_object(absolute_path)
if not path.is_absolute():
raise ValueError(
'Recording file path "{}" is already relative.'.format(path))
else:
# `path` is absolute
# Get lower-case version of path for comparison to
# lower-case versions of recording directory paths.
lowered_path = Path(str(path).lower())
for dir_path, lowered_dir_path in \
zip(self.recording_dir_paths,
self._lowered_recording_dir_paths):
try:
lowered_rel_path = \
lowered_path.relative_to(lowered_dir_path)
except ValueError:
continue
else:
num_parts = len(lowered_rel_path.parts)
parts = path.parts[-num_parts:]
rel_path = Path(*parts)
return dir_path, rel_path
# If we get here, the specified path is not inside any of the
# recording directories.
start = (
'Recording file path "{}" could not be made relative '
'since ').format(path)
num_recording_dirs = len(self.recording_dir_paths)
if num_recording_dirs == 0:
end = 'there are no recording directories.'
elif num_recording_dirs == 1:
end = 'it is not in the recording directory "{}".'.format(
self.recording_dir_paths[0])
else:
end = \
'it is not in any of the recording directories {}.'.format(
self._create_recording_dirs_list())
raise ValueError(start + end)
def _get_path_object(p):
return p if isinstance(p, Path) else Path(p) | 0.786869 | 0.310511 |
from __future__ import absolute_import
from fortrace.botnet.common.loggerbase import LoggerBase
from fortrace.core.guest import Guest
__author__ = '<NAME>'
class GroupManager(LoggerBase):
""" Manages symbolic names.
Bridge between symbolic names and bot registry as well as the payload registry.
Replaces access to the bot registry and the payload registry.
:param bot_registry: an instance of a bot registry
:param payload_registry: an instance of a payload_registry
"""
def __init__(self, bot_registry, payload_registry):
""" The init method.
:type payload_registry: fortrace.core.bmoncomponents.payloadregistry.PayloadRegistry
:type bot_registry: fortrace.core.bmoncomponents.botregistry.BotRegistry
"""
LoggerBase.__init__(self, "GroupManager")
self.br = bot_registry
self.pr = payload_registry
self.items = dict()
self.current_index = 1
def setup_group(self, group_name, payload_file_name):
""" Registers a group associated with a payload.
:type payload_file_name: str
:type group_name: str
:param group_name: a symbolic group name
:param payload_file_name: the path of a payload
"""
self.items[group_name] = self.current_index
internal_group = "group-" + str(self.current_index)
self.current_index += 1
self.pr.register_payload(payload_file_name, internal_group)
self.logger.info("added new group <%s>: %s", internal_group, group_name)
# print "added new group <" + internal_group + ">: " + group_name
def __add_bot_to_group_by_guest_object(self, group_name, guest_object):
""" Adds a bot to a group by using a Guest object
:type guest_object: fortrace.core.guest.Guest
:type group_name: str
:param group_name: a symbolic group name
:param guest_object: a Guest instance
"""
self.br.reserve_for_bot(str(guest_object.ip_local), self.get_group_id(group_name))
def __add_bot_to_group_by_ip(self, group_name, ip_address):
""" Adds a bot to a group by using an ip address
:type ip_address: str
:type group_name: str
:param group_name: a symbolic group name
:param ip_address: ip address of the bot
"""
self.br.reserve_for_bot(ip_address, self.get_group_id(group_name))
def add_bot_to_group(self, group, guest_or_ip):
""" Adds a bot to a group by using either a guest object or an ip address
:type guest_or_ip: fortrace.core.guest.Guest | str
:type group: str
:param group: group name
:param guest_or_ip: a guest object or an ip that should be added to group
"""
if isinstance(guest_or_ip, Guest):
self.__add_bot_to_group_by_guest_object(group, guest_or_ip)
else:
self.__add_bot_to_group_by_ip(group, guest_or_ip)
def get_group_id(self, group_name):
""" Returns the internal group identifier.
:rtype : int
:type group_name: str
:param group_name: a symbolic group name
:return: the internal group identifier
"""
return self.items[group_name]
def get_bots_by_group_name(self, group_name):
""" Gets a list of bots in group.
:type group_name: str
:param group_name: a symbolic group name
:return: a list of bots
"""
return self.br.get_promoted_bot_list(self.get_group_id(group_name))
def get_unmanaged_bots(self):
""" Gets a list of bots that are in neither group.
:return: a list of bots
"""
return self.br.get_promoted_unmanaged_bot_list()
def get_single_bot(self, guest_or_ip):
"""
:type guest_or_ip: fortrace.core.guest.Guest | str
:param guest_or_ip: the guest object to resolve
:return: the requested bot object
"""
if isinstance(guest_or_ip, Guest):
return self.br.promote_bot(str(guest_or_ip.ip_local))
else:
return self.br.promote_bot(guest_or_ip)
def __getitem__(self, item):
return self.get_group_id(item) | src/fortrace/botnet/core/bmoncomponents/groupmanager.py | from __future__ import absolute_import
from fortrace.botnet.common.loggerbase import LoggerBase
from fortrace.core.guest import Guest
__author__ = '<NAME>'
class GroupManager(LoggerBase):
""" Manages symbolic names.
Bridge between symbolic names and bot registry as well as the payload registry.
Replaces access to the bot registry and the payload registry.
:param bot_registry: an instance of a bot registry
:param payload_registry: an instance of a payload_registry
"""
def __init__(self, bot_registry, payload_registry):
""" The init method.
:type payload_registry: fortrace.core.bmoncomponents.payloadregistry.PayloadRegistry
:type bot_registry: fortrace.core.bmoncomponents.botregistry.BotRegistry
"""
LoggerBase.__init__(self, "GroupManager")
self.br = bot_registry
self.pr = payload_registry
self.items = dict()
self.current_index = 1
def setup_group(self, group_name, payload_file_name):
""" Registers a group associated with a payload.
:type payload_file_name: str
:type group_name: str
:param group_name: a symbolic group name
:param payload_file_name: the path of a payload
"""
self.items[group_name] = self.current_index
internal_group = "group-" + str(self.current_index)
self.current_index += 1
self.pr.register_payload(payload_file_name, internal_group)
self.logger.info("added new group <%s>: %s", internal_group, group_name)
# print "added new group <" + internal_group + ">: " + group_name
def __add_bot_to_group_by_guest_object(self, group_name, guest_object):
""" Adds a bot to a group by using a Guest object
:type guest_object: fortrace.core.guest.Guest
:type group_name: str
:param group_name: a symbolic group name
:param guest_object: a Guest instance
"""
self.br.reserve_for_bot(str(guest_object.ip_local), self.get_group_id(group_name))
def __add_bot_to_group_by_ip(self, group_name, ip_address):
""" Adds a bot to a group by using an ip address
:type ip_address: str
:type group_name: str
:param group_name: a symbolic group name
:param ip_address: ip address of the bot
"""
self.br.reserve_for_bot(ip_address, self.get_group_id(group_name))
def add_bot_to_group(self, group, guest_or_ip):
""" Adds a bot to a group by using either a guest object or an ip address
:type guest_or_ip: fortrace.core.guest.Guest | str
:type group: str
:param group: group name
:param guest_or_ip: a guest object or an ip that should be added to group
"""
if isinstance(guest_or_ip, Guest):
self.__add_bot_to_group_by_guest_object(group, guest_or_ip)
else:
self.__add_bot_to_group_by_ip(group, guest_or_ip)
def get_group_id(self, group_name):
""" Returns the internal group identifier.
:rtype : int
:type group_name: str
:param group_name: a symbolic group name
:return: the internal group identifier
"""
return self.items[group_name]
def get_bots_by_group_name(self, group_name):
""" Gets a list of bots in group.
:type group_name: str
:param group_name: a symbolic group name
:return: a list of bots
"""
return self.br.get_promoted_bot_list(self.get_group_id(group_name))
def get_unmanaged_bots(self):
""" Gets a list of bots that are in neither group.
:return: a list of bots
"""
return self.br.get_promoted_unmanaged_bot_list()
def get_single_bot(self, guest_or_ip):
"""
:type guest_or_ip: fortrace.core.guest.Guest | str
:param guest_or_ip: the guest object to resolve
:return: the requested bot object
"""
if isinstance(guest_or_ip, Guest):
return self.br.promote_bot(str(guest_or_ip.ip_local))
else:
return self.br.promote_bot(guest_or_ip)
def __getitem__(self, item):
return self.get_group_id(item) | 0.560373 | 0.187821 |
from __future__ import absolute_import, print_function
import logging
import threading
import requests
from . import wake_on_lan
from .utils import LogIt, LogItWithReturn
logger = logging.getLogger('samsungctl')
class WebSocketBase(object):
"""Base class for TV's with websocket connection."""
@LogIt
def __init__(self, config):
"""
Constructor.
:param config: TV configuration settings. see `samsungctl.Config` for further details
:type config: `dict` or `samsungctl.Config` instance
"""
self.config = config
self.sock = None
self._loop_event = threading.Event()
self._registered_callbacks = []
self._starting = False
self._running = False
self._thread = None
try:
requests.get(
'http://{0}:8001/api/v2/'.format(self.config.host),
timeout=3
)
self.open()
except (
requests.HTTPError,
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError
):
pass
@property
@LogItWithReturn
def mac_address(self):
"""
MAC Address.
**Get:** Gets the MAC address.
*Returns:* None or the MAC address of the TV formatted ``"00:00:00:00:00"``
*Return type:* `None` or `str`
"""
if self.config.mac is None:
self.config.mac = wake_on_lan.get_mac_address(self.config.host)
if self.config.mac is None:
if not self.power:
logger.error('Unable to acquire MAC address')
return self.config.mac
def on_message(self, _):
pass
@LogIt
def close(self):
"""Close the connection."""
if self.sock is not None:
self._loop_event.set()
self.sock.close()
if self._thread is not None:
self._thread.join(3.0)
if self._thread is not None:
raise RuntimeError('Loop thread did not properly terminate')
def loop(self):
self._running = True
while not self._loop_event.isSet():
try:
data = self.sock.recv()
if data:
self.on_message(data)
except:
self.sock = None
del self._registered_callbacks[:]
logger.info('Websocket closed')
while self.sock is None and not self._loop_event.isSet():
if not self._starting:
try:
self.open()
except:
self._loop_event.wait(1.0)
else:
self._loop_event.wait(1.0)
self._running = False
self._loop_event.clear()
self._thread = None
@property
def artmode(self):
return None
@artmode.setter
def artmode(self, value):
pass
@LogItWithReturn
def power(self):
return self.sock is not None
def control(self, *_):
raise NotImplementedError
def open(self):
raise NotImplementedError
def __enter__(self):
"""
Open the connection to the TV. use in a `with` statement
>>> with samsungctl.Remote(config) as remote:
>>> remote.KEY_MENU()
:return: self
:rtype: :class: `samsungctl.Remote` instance
"""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
This gets called automatically when exiting a `with` statement
see `samsungctl.Remote.__enter__` for more information
:param exc_type: Not Used
:param exc_val: Not Used
:param exc_tb: Not Used
:return: `None`
"""
self.close() | samsungctl/websocket_base.py |
from __future__ import absolute_import, print_function
import logging
import threading
import requests
from . import wake_on_lan
from .utils import LogIt, LogItWithReturn
logger = logging.getLogger('samsungctl')
class WebSocketBase(object):
"""Base class for TV's with websocket connection."""
@LogIt
def __init__(self, config):
"""
Constructor.
:param config: TV configuration settings. see `samsungctl.Config` for further details
:type config: `dict` or `samsungctl.Config` instance
"""
self.config = config
self.sock = None
self._loop_event = threading.Event()
self._registered_callbacks = []
self._starting = False
self._running = False
self._thread = None
try:
requests.get(
'http://{0}:8001/api/v2/'.format(self.config.host),
timeout=3
)
self.open()
except (
requests.HTTPError,
requests.exceptions.ConnectTimeout,
requests.exceptions.ConnectionError
):
pass
@property
@LogItWithReturn
def mac_address(self):
"""
MAC Address.
**Get:** Gets the MAC address.
*Returns:* None or the MAC address of the TV formatted ``"00:00:00:00:00"``
*Return type:* `None` or `str`
"""
if self.config.mac is None:
self.config.mac = wake_on_lan.get_mac_address(self.config.host)
if self.config.mac is None:
if not self.power:
logger.error('Unable to acquire MAC address')
return self.config.mac
def on_message(self, _):
pass
@LogIt
def close(self):
"""Close the connection."""
if self.sock is not None:
self._loop_event.set()
self.sock.close()
if self._thread is not None:
self._thread.join(3.0)
if self._thread is not None:
raise RuntimeError('Loop thread did not properly terminate')
def loop(self):
self._running = True
while not self._loop_event.isSet():
try:
data = self.sock.recv()
if data:
self.on_message(data)
except:
self.sock = None
del self._registered_callbacks[:]
logger.info('Websocket closed')
while self.sock is None and not self._loop_event.isSet():
if not self._starting:
try:
self.open()
except:
self._loop_event.wait(1.0)
else:
self._loop_event.wait(1.0)
self._running = False
self._loop_event.clear()
self._thread = None
@property
def artmode(self):
return None
@artmode.setter
def artmode(self, value):
pass
@LogItWithReturn
def power(self):
return self.sock is not None
def control(self, *_):
raise NotImplementedError
def open(self):
raise NotImplementedError
def __enter__(self):
"""
Open the connection to the TV. use in a `with` statement
>>> with samsungctl.Remote(config) as remote:
>>> remote.KEY_MENU()
:return: self
:rtype: :class: `samsungctl.Remote` instance
"""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
This gets called automatically when exiting a `with` statement
see `samsungctl.Remote.__enter__` for more information
:param exc_type: Not Used
:param exc_val: Not Used
:param exc_tb: Not Used
:return: `None`
"""
self.close() | 0.736874 | 0.069258 |
import json
from vutil import *
import os
from vulkan import *
from buffer import *
from pathlib import Path
here = os.path.dirname(os.path.abspath(__file__))
class Stage(Sinode):
def __init__(self, pipeline, setupDict):
Sinode.__init__(self, pipeline)
self.vkInstance = pipeline.instance.vkInstance
self.vkDevice = pipeline.device.vkDevice
self.setupDict = setupDict
self.pipeline = pipeline
self.outputWidthPixels = setupDict["outputWidthPixels"]
self.outputHeightPixels = setupDict["outputHeightPixels"]
self.stage = setupDict["stage"]
print("creating Stage with description")
print(json.dumps(setupDict, indent=4))
# attributes are ex. location, normal, color
self.buffers = {}
with open(setupDict["header"]) as f:
shader_spirv = f.read()
shader_spirv += "\n"
with open(os.path.join(here, "derivedtypes.json"), 'r') as f:
derivedDict = json.loads(f.read())
for structName, composeDict in derivedDict.items():
shader_spirv += "struct " + structName + "\n"
shader_spirv += "{\n"
for name, ctype in composeDict.items():
shader_spirv += " " + ctype + " " + name + ";\n"
shader_spirv += "};\n\n"
location = 0
# novel INPUT buffers belong to THIS Stage (others are linked)
for bufferName, bufferDict in setupDict["buffers"].items():
if type(bufferDict) is not dict:
continue
for k, v in setupDict["defaultbuffer"].items():
if k not in bufferDict.keys():
bufferDict[k] = v
bufferMatch = False
for existingBuffer in pipeline.getAllBuffers():
print(bufferDict["name"] + " : " + existingBuffer.setupDict["name"])
if bufferDict["name"] == existingBuffer.setupDict["name"]:
print(bufferDict["name"] + " exists already. linking")
bufferMatch = existingBuffer
if bufferMatch:
for k, v in bufferDict.items():
bufferMatch.setupDict[k] = v
shader_spirv += bufferMatch.getDeclaration()
else:
bufferDict["location"] = location
location += self.getSize(bufferDict["type"])
if "vertex" in setupDict["name"]:
newBuffer = VertexBuffer(pipeline.device, bufferDict)
else:
newBuffer = Buffer(pipeline.device, bufferDict)
shader_spirv += newBuffer.getDeclaration()
self.buffers [bufferName] = newBuffer
self.children += [newBuffer]
if bufferDict["name"] == "INDEX":
self.pipeline.indexBuffer = newBuffer
with open(setupDict["main"]) as f:
shader_spirv += f.read()
#print("---final Stage code---")
#print(shader_spirv)
#print("--- end Stage code ---")
print("compiling Stage")
compStagesPath = os.path.join(here, "compiledStages")
compStagesPath = "compiledStages"
Path(compStagesPath).mkdir(parents=True, exist_ok=True)
basename = os.path.basename(setupDict["header"])
outfilename = os.path.join(compStagesPath, basename.replace(".header", ""))
with open(outfilename, 'w+') as f:
f.write(shader_spirv)
os.system("glslc " + outfilename)
# POS always outputs to "a.spv"
with open("a.spv", 'rb') as f:
shader_spirv = f.read()
# Create Stage
self.shader_create = VkShaderModuleCreateInfo(
sType=VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
flags=0,
codeSize=len(shader_spirv),
pCode=shader_spirv
)
self.vkShaderModule = vkCreateShaderModule(self.vkDevice, self.shader_create, None)
# Create Shader stage
self.shader_stage_create = VkPipelineShaderStageCreateInfo(
sType=VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
stage=eval(setupDict["stage"]),
module=self.vkShaderModule,
flags=0,
pSpecializationInfo=None,
pName='main')
def getSize(self, bufftype):
with open(os.path.join(here, "derivedtypes.json"), 'r') as f:
derivedDict = json.loads(f.read())
with open(os.path.join(here, "ctypes.json"), 'r') as f:
cDict = json.loads(f.read())
size = 0
if bufftype in derivedDict.keys():
for subtype in derivedDict[bufftype]:
size += self.getSize(subtype)
else:
size += 1
return size
def getVertexBuffers(self):
allVertexBuffers = []
for b in self.buffers.values():
if type(b) == VertexBuffer:
allVertexBuffers += [b]
return allVertexBuffers
def release(self):
print("destroying Stage")
Sinode.release(self)
vkDestroyShaderModule(self.vkDevice, self.vkShaderModule, None) | vulkanese/stage.py | import json
from vutil import *
import os
from vulkan import *
from buffer import *
from pathlib import Path
here = os.path.dirname(os.path.abspath(__file__))
class Stage(Sinode):
def __init__(self, pipeline, setupDict):
Sinode.__init__(self, pipeline)
self.vkInstance = pipeline.instance.vkInstance
self.vkDevice = pipeline.device.vkDevice
self.setupDict = setupDict
self.pipeline = pipeline
self.outputWidthPixels = setupDict["outputWidthPixels"]
self.outputHeightPixels = setupDict["outputHeightPixels"]
self.stage = setupDict["stage"]
print("creating Stage with description")
print(json.dumps(setupDict, indent=4))
# attributes are ex. location, normal, color
self.buffers = {}
with open(setupDict["header"]) as f:
shader_spirv = f.read()
shader_spirv += "\n"
with open(os.path.join(here, "derivedtypes.json"), 'r') as f:
derivedDict = json.loads(f.read())
for structName, composeDict in derivedDict.items():
shader_spirv += "struct " + structName + "\n"
shader_spirv += "{\n"
for name, ctype in composeDict.items():
shader_spirv += " " + ctype + " " + name + ";\n"
shader_spirv += "};\n\n"
location = 0
# novel INPUT buffers belong to THIS Stage (others are linked)
for bufferName, bufferDict in setupDict["buffers"].items():
if type(bufferDict) is not dict:
continue
for k, v in setupDict["defaultbuffer"].items():
if k not in bufferDict.keys():
bufferDict[k] = v
bufferMatch = False
for existingBuffer in pipeline.getAllBuffers():
print(bufferDict["name"] + " : " + existingBuffer.setupDict["name"])
if bufferDict["name"] == existingBuffer.setupDict["name"]:
print(bufferDict["name"] + " exists already. linking")
bufferMatch = existingBuffer
if bufferMatch:
for k, v in bufferDict.items():
bufferMatch.setupDict[k] = v
shader_spirv += bufferMatch.getDeclaration()
else:
bufferDict["location"] = location
location += self.getSize(bufferDict["type"])
if "vertex" in setupDict["name"]:
newBuffer = VertexBuffer(pipeline.device, bufferDict)
else:
newBuffer = Buffer(pipeline.device, bufferDict)
shader_spirv += newBuffer.getDeclaration()
self.buffers [bufferName] = newBuffer
self.children += [newBuffer]
if bufferDict["name"] == "INDEX":
self.pipeline.indexBuffer = newBuffer
with open(setupDict["main"]) as f:
shader_spirv += f.read()
#print("---final Stage code---")
#print(shader_spirv)
#print("--- end Stage code ---")
print("compiling Stage")
compStagesPath = os.path.join(here, "compiledStages")
compStagesPath = "compiledStages"
Path(compStagesPath).mkdir(parents=True, exist_ok=True)
basename = os.path.basename(setupDict["header"])
outfilename = os.path.join(compStagesPath, basename.replace(".header", ""))
with open(outfilename, 'w+') as f:
f.write(shader_spirv)
os.system("glslc " + outfilename)
# POS always outputs to "a.spv"
with open("a.spv", 'rb') as f:
shader_spirv = f.read()
# Create Stage
self.shader_create = VkShaderModuleCreateInfo(
sType=VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
flags=0,
codeSize=len(shader_spirv),
pCode=shader_spirv
)
self.vkShaderModule = vkCreateShaderModule(self.vkDevice, self.shader_create, None)
# Create Shader stage
self.shader_stage_create = VkPipelineShaderStageCreateInfo(
sType=VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
stage=eval(setupDict["stage"]),
module=self.vkShaderModule,
flags=0,
pSpecializationInfo=None,
pName='main')
def getSize(self, bufftype):
with open(os.path.join(here, "derivedtypes.json"), 'r') as f:
derivedDict = json.loads(f.read())
with open(os.path.join(here, "ctypes.json"), 'r') as f:
cDict = json.loads(f.read())
size = 0
if bufftype in derivedDict.keys():
for subtype in derivedDict[bufftype]:
size += self.getSize(subtype)
else:
size += 1
return size
def getVertexBuffers(self):
allVertexBuffers = []
for b in self.buffers.values():
if type(b) == VertexBuffer:
allVertexBuffers += [b]
return allVertexBuffers
def release(self):
print("destroying Stage")
Sinode.release(self)
vkDestroyShaderModule(self.vkDevice, self.vkShaderModule, None) | 0.107907 | 0.084342 |
class FiniteField:
""" 有限体 """
def __init__(self, p):
self.p = p
def xgcd(self, a, b):
x0, x1, y0, y1 = 1, 0, 0, 1
while b != 0:
q, a, b = a // b, b, a % b
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return a, x0, y0
def modInv(self, a):
g, x, y = self.xgcd(a, self.p)
if g != 1:
raise Exception("逆数が存在しません")
return x % self.p
def legendreSymbol(self, a):
ls = pow(a, (self.p-1) / 2, self.p)
return -1 if ls == self.p-1 else ls
def modSqrt(self, a):
L = self.legendreSymbol(a)
if L == -1:
raise Exception("根が存在しません")
elif L == 0:
return 0
elif self.p == 2:
return a # a=1->n=1
# p-1 = s*2^eなるsとeを計算する
s = (self.p - 1)>>1
e = 0
while s & 1 == 0:
s >>= 1
e += 1
# L(n/p) = -1なるnを見つける
n = 2
while self.legendreSymbol(n) != -1:
n += 1
# 根を計算する
x = pow(a, (s + 1)/2, self.p)
b = pow(a, s, self.p)
g = pow(n, s, self.p)
while True:
t, m = b, 0
for m in xrange(e):
if t == 1:
break
t = pow(t, 2, self.p)
if m == 0:
return x
gs = pow(g, 2 ** (e - m - 1), self.p)
g = (gs ** 2) % self.p
x = (x * gs) % self.p
b = (b * g) % self.p
e = m
class Point:
""" 点 """
def __init__(self, EC, x, y):
self.EC = EC
self.F = self.EC.F
self.x = x
self.y = y
def __add__(self, Q):
""" 加算の定義 """
if not isinstance(Q, Point):
raise("無効な型との加算です")
# 異なる楕円曲線
if Q.EC.F.p != self.EC.F.p:
raise("異なる楕円曲線上の点どうしの加算です")
# 無限遠点の場合
if Q.x == -1:
return self
if self.x == -1:
return Q
# 無限遠点以外
if self.x == Q.x:
if self.y == Q.y:
# R = P + P
m = (3*Q.x*Q.x + self.EC.A) * self.F.modInv(2*Q.y) % self.F.p
else:
# R = P + (-P) = O
return self.EC.O
else:
# R = P + Q
m = (Q.y - self.y) * self.F.modInv(Q.x - self.x) % self.F.p
x = (m*m - self.x - Q.x) % self.F.p
y = (m*(self.x - x) - self.y) % self.F.p
return Point(self.EC, x, y)
def __mul__(self, n):
""" スカラー倍の定義 """
if not isinstance(n, int):
raise("無効な型との乗算です")
Q = self.EC.O
R = self
while n > 0:
if n & 1 == 1:
Q += R
R += R
n >>= 1
return Q
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
class EllipticCurve:
""" 楕円曲線 """
def __init__(self, F, tup):
self.F = F
self.A, self.B = tup
self.D = (4 * pow(self.A, 3) + 27 * pow(self.B, 2)) % self.F.p
if self.D == 0:
raise("判別式が0になります")
self.O = Point(self, -1, -1)
def f(self, x):
""" xからyを求める """
y2 = (pow(x, 3) + self.A*x + self.B) % self.F.p
y = self.F.modSqrt(y2)
return y
def __str__(self):
return "EC: y^2 = x^3 + {A}x + {B} mod {p}".format(
A=self.A, B=self.B, p=self.F.p
)
if __name__ == '__main__':
p = 17
# 楕円曲線
F = FiniteField(p)
A, B = 1, 2
EC = EllipticCurve(F, (A, B))
# 例
O = EC.O
P = Point(EC, 0, EC.f(0)) # P(0, 4)
Q = Point(EC, 1, EC.f(1)) # Q(1, 16)
print(EC)
print("P = " + str(P))
print("Q = " + str(Q))
print("P + Q = " + str(P + Q))
print("P + P = " + str(P + P))
print("2P = " + str(P*2))
print("10P = " + str(P*10)) | Gathered CTF writeups/ptr-yudai-writeups/2019/Newbie_CTF_2019/is_it_ecc/EllipticCurve.py |
class FiniteField:
""" 有限体 """
def __init__(self, p):
self.p = p
def xgcd(self, a, b):
x0, x1, y0, y1 = 1, 0, 0, 1
while b != 0:
q, a, b = a // b, b, a % b
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return a, x0, y0
def modInv(self, a):
g, x, y = self.xgcd(a, self.p)
if g != 1:
raise Exception("逆数が存在しません")
return x % self.p
def legendreSymbol(self, a):
ls = pow(a, (self.p-1) / 2, self.p)
return -1 if ls == self.p-1 else ls
def modSqrt(self, a):
L = self.legendreSymbol(a)
if L == -1:
raise Exception("根が存在しません")
elif L == 0:
return 0
elif self.p == 2:
return a # a=1->n=1
# p-1 = s*2^eなるsとeを計算する
s = (self.p - 1)>>1
e = 0
while s & 1 == 0:
s >>= 1
e += 1
# L(n/p) = -1なるnを見つける
n = 2
while self.legendreSymbol(n) != -1:
n += 1
# 根を計算する
x = pow(a, (s + 1)/2, self.p)
b = pow(a, s, self.p)
g = pow(n, s, self.p)
while True:
t, m = b, 0
for m in xrange(e):
if t == 1:
break
t = pow(t, 2, self.p)
if m == 0:
return x
gs = pow(g, 2 ** (e - m - 1), self.p)
g = (gs ** 2) % self.p
x = (x * gs) % self.p
b = (b * g) % self.p
e = m
class Point:
""" 点 """
def __init__(self, EC, x, y):
self.EC = EC
self.F = self.EC.F
self.x = x
self.y = y
def __add__(self, Q):
""" 加算の定義 """
if not isinstance(Q, Point):
raise("無効な型との加算です")
# 異なる楕円曲線
if Q.EC.F.p != self.EC.F.p:
raise("異なる楕円曲線上の点どうしの加算です")
# 無限遠点の場合
if Q.x == -1:
return self
if self.x == -1:
return Q
# 無限遠点以外
if self.x == Q.x:
if self.y == Q.y:
# R = P + P
m = (3*Q.x*Q.x + self.EC.A) * self.F.modInv(2*Q.y) % self.F.p
else:
# R = P + (-P) = O
return self.EC.O
else:
# R = P + Q
m = (Q.y - self.y) * self.F.modInv(Q.x - self.x) % self.F.p
x = (m*m - self.x - Q.x) % self.F.p
y = (m*(self.x - x) - self.y) % self.F.p
return Point(self.EC, x, y)
def __mul__(self, n):
""" スカラー倍の定義 """
if not isinstance(n, int):
raise("無効な型との乗算です")
Q = self.EC.O
R = self
while n > 0:
if n & 1 == 1:
Q += R
R += R
n >>= 1
return Q
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
class EllipticCurve:
""" 楕円曲線 """
def __init__(self, F, tup):
self.F = F
self.A, self.B = tup
self.D = (4 * pow(self.A, 3) + 27 * pow(self.B, 2)) % self.F.p
if self.D == 0:
raise("判別式が0になります")
self.O = Point(self, -1, -1)
def f(self, x):
""" xからyを求める """
y2 = (pow(x, 3) + self.A*x + self.B) % self.F.p
y = self.F.modSqrt(y2)
return y
def __str__(self):
return "EC: y^2 = x^3 + {A}x + {B} mod {p}".format(
A=self.A, B=self.B, p=self.F.p
)
if __name__ == '__main__':
p = 17
# 楕円曲線
F = FiniteField(p)
A, B = 1, 2
EC = EllipticCurve(F, (A, B))
# 例
O = EC.O
P = Point(EC, 0, EC.f(0)) # P(0, 4)
Q = Point(EC, 1, EC.f(1)) # Q(1, 16)
print(EC)
print("P = " + str(P))
print("Q = " + str(Q))
print("P + Q = " + str(P + Q))
print("P + P = " + str(P + P))
print("2P = " + str(P*2))
print("10P = " + str(P*10)) | 0.500488 | 0.464719 |
import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
import re
#%%
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--result_dir", type=Path, required=True)
parser.add_argument("--figure_dir", type=Path, default="./figures")
parser.add_argument("--summary_dir", type=Path, default="./summary_files")
args = parser.parse_args()
#%%
dirs = [dir for dir in args.result_dir.iterdir() if dir.is_dir() and re.match(r"^[0-9]+$", dir.stem)]
dirs.sort(key=lambda dir: dir.stem)
#%%
figure_dir = args.figure_dir
figure_dir.mkdir(exist_ok=True, parents=True)
summary_dir = args.summary_dir
summary_dir.mkdir(exist_ok=True, parents=True)
#%%
print("Initialize variables....")
N = len(dirs)
tmp = np.loadtxt(dirs[0] / "summary_files/resample_times.txt")
T = tmp.shape[0]
spkind = False
if (dirs[0] / "summary_files/Speaker_individual_letter_ARI.txt").exists():
tmp = np.loadtxt(dirs[0] / "summary_files/Speaker_individual_letter_ARI.txt")
spk_num = tmp.shape[0]
spkind = True
resample_times = np.empty((N, T))
log_likelihoods = np.empty((N, T+1))
letter_ARIs = np.empty((N, T))
letter_macro_f1_scores = np.empty((N, T))
letter_micro_f1_scores = np.empty((N, T))
word_ARIs = np.empty((N, T))
word_macro_f1_scores = np.empty((N, T))
word_micro_f1_scores = np.empty((N, T))
if spkind:
spkind_letter_ARIs = np.empty((N, spk_num, T))
spkind_word_ARIs = np.empty((N, spk_num, T))
print("Done!")
#%%
print("Loading results....")
for i, dir in enumerate(dirs):
resample_times[i] = np.loadtxt(dir / "summary_files/resample_times.txt")
log_likelihoods[i] = np.loadtxt(dir / "summary_files/log_likelihood.txt")
letter_ARIs[i] = np.loadtxt(dir / "summary_files/Letter_ARI.txt")
letter_macro_f1_scores[i] = np.loadtxt(dir / "summary_files/Letter_macro_F1_score.txt")
letter_micro_f1_scores[i] = np.loadtxt(dir / "summary_files/Letter_micro_F1_score.txt")
word_ARIs[i] = np.loadtxt(dir / "summary_files/Word_ARI.txt")
word_macro_f1_scores[i] = np.loadtxt(dir / "summary_files/Word_macro_F1_score.txt")
word_micro_f1_scores[i] = np.loadtxt(dir / "summary_files/Word_micro_F1_score.txt")
if spkind:
spkind_letter_ARIs[i] = np.loadtxt(dir / "summary_files/Speaker_individual_letter_ARI.txt")
spkind_word_ARIs[i] = np.loadtxt(dir / "summary_files/Speaker_individual_word_ARI.txt")
print("Done!")
#%%
print("Ploting...")
plt.clf()
plt.errorbar(range(T), resample_times.mean(axis=0), yerr=resample_times.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Execution time [sec]")
plt.title("Transitions of the execution time")
plt.savefig(figure_dir / "summary_of_execution_time.png")
plt.clf()
plt.errorbar(range(T+1), log_likelihoods.mean(axis=0), yerr=log_likelihoods.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Log likelihood")
plt.title("Transitions of the log likelihood")
plt.savefig(figure_dir / "summary_of_log_likelihood.png")
plt.clf()
plt.errorbar(range(T), word_ARIs.mean(axis=0), yerr=word_ARIs.std(axis=0), label="Word ARI")
plt.errorbar(range(T), letter_ARIs.mean(axis=0), yerr=letter_ARIs.std(axis=0), label="Letter ARI")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the ARI")
plt.legend()
plt.savefig(figure_dir / "summary_of_ARI.png")
plt.clf()
plt.errorbar(range(T), word_macro_f1_scores.mean(axis=0), yerr=word_macro_f1_scores.std(axis=0), label="Word macro F1")
plt.errorbar(range(T), letter_macro_f1_scores.mean(axis=0), yerr=letter_macro_f1_scores.std(axis=0), label="Letter macro F1")
plt.xlabel("Iteration")
plt.ylabel("Macro F1 score")
plt.title("Transitions of the macro F1 score")
plt.legend()
plt.savefig(figure_dir / "summary_of_macro_F1_score.png")
plt.clf()
plt.errorbar(range(T), word_micro_f1_scores.mean(axis=0), yerr=word_micro_f1_scores.std(axis=0), label="Word micro F1")
plt.errorbar(range(T), letter_micro_f1_scores.mean(axis=0), yerr=letter_micro_f1_scores.std(axis=0), label="Letter micro F1")
plt.xlabel("Iteration")
plt.ylabel("Micro F1 score")
plt.title("Transitions of the micro F1 score")
plt.legend()
plt.savefig(figure_dir / "summary_of_micro_F1_score.png")
if spkind:
plt.clf()
for spk in range(spk_num):
plt.plot(range(T), spkind_letter_ARIs[:, spk].mean(axis=0), label=f"{spk+1}-th speaker")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the letter ARI")
plt.legend()
plt.savefig(figure_dir / "speaker_individual_summary_of_letter_ARI.png")
plt.clf()
for spk in range(spk_num):
plt.plot(range(T), spkind_word_ARIs[:, spk].mean(axis=0), label=f"{spk+1}-th speaker")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the word ARI")
plt.legend()
plt.savefig(figure_dir / "speaker_individual_summary_of_word_ARI.png")
print("Done!")
#%%
print("Save npy files...")
np.save(summary_dir / "resample_times.npy", resample_times)
np.save(summary_dir / "log_likelihoods.npy", log_likelihoods)
np.save(summary_dir / "letter_ARI.npy", letter_ARIs)
np.save(summary_dir / "letter_macro_F1.npy", letter_macro_f1_scores)
np.save(summary_dir / "letter_micro_F1.npy", letter_micro_f1_scores)
np.save(summary_dir / "word_ARI.npy", word_ARIs)
np.save(summary_dir / "word_macro_F1.npy", word_macro_f1_scores)
np.save(summary_dir / "word_micro_F1.npy", word_micro_f1_scores)
if spkind:
np.save(summary_dir / "Speaker_individual_letter_ARI.npy", spkind_letter_ARIs)
np.save(summary_dir / "Speaker_individual_word_ARI.npy", spkind_word_ARIs)
print("Done!") | Experiment_1/src/NPB-DAA/summary_summary.py | import numpy as np
import matplotlib.pyplot as plt
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
import re
#%%
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--result_dir", type=Path, required=True)
parser.add_argument("--figure_dir", type=Path, default="./figures")
parser.add_argument("--summary_dir", type=Path, default="./summary_files")
args = parser.parse_args()
#%%
dirs = [dir for dir in args.result_dir.iterdir() if dir.is_dir() and re.match(r"^[0-9]+$", dir.stem)]
dirs.sort(key=lambda dir: dir.stem)
#%%
figure_dir = args.figure_dir
figure_dir.mkdir(exist_ok=True, parents=True)
summary_dir = args.summary_dir
summary_dir.mkdir(exist_ok=True, parents=True)
#%%
print("Initialize variables....")
N = len(dirs)
tmp = np.loadtxt(dirs[0] / "summary_files/resample_times.txt")
T = tmp.shape[0]
spkind = False
if (dirs[0] / "summary_files/Speaker_individual_letter_ARI.txt").exists():
tmp = np.loadtxt(dirs[0] / "summary_files/Speaker_individual_letter_ARI.txt")
spk_num = tmp.shape[0]
spkind = True
resample_times = np.empty((N, T))
log_likelihoods = np.empty((N, T+1))
letter_ARIs = np.empty((N, T))
letter_macro_f1_scores = np.empty((N, T))
letter_micro_f1_scores = np.empty((N, T))
word_ARIs = np.empty((N, T))
word_macro_f1_scores = np.empty((N, T))
word_micro_f1_scores = np.empty((N, T))
if spkind:
spkind_letter_ARIs = np.empty((N, spk_num, T))
spkind_word_ARIs = np.empty((N, spk_num, T))
print("Done!")
#%%
print("Loading results....")
for i, dir in enumerate(dirs):
resample_times[i] = np.loadtxt(dir / "summary_files/resample_times.txt")
log_likelihoods[i] = np.loadtxt(dir / "summary_files/log_likelihood.txt")
letter_ARIs[i] = np.loadtxt(dir / "summary_files/Letter_ARI.txt")
letter_macro_f1_scores[i] = np.loadtxt(dir / "summary_files/Letter_macro_F1_score.txt")
letter_micro_f1_scores[i] = np.loadtxt(dir / "summary_files/Letter_micro_F1_score.txt")
word_ARIs[i] = np.loadtxt(dir / "summary_files/Word_ARI.txt")
word_macro_f1_scores[i] = np.loadtxt(dir / "summary_files/Word_macro_F1_score.txt")
word_micro_f1_scores[i] = np.loadtxt(dir / "summary_files/Word_micro_F1_score.txt")
if spkind:
spkind_letter_ARIs[i] = np.loadtxt(dir / "summary_files/Speaker_individual_letter_ARI.txt")
spkind_word_ARIs[i] = np.loadtxt(dir / "summary_files/Speaker_individual_word_ARI.txt")
print("Done!")
#%%
print("Ploting...")
plt.clf()
plt.errorbar(range(T), resample_times.mean(axis=0), yerr=resample_times.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Execution time [sec]")
plt.title("Transitions of the execution time")
plt.savefig(figure_dir / "summary_of_execution_time.png")
plt.clf()
plt.errorbar(range(T+1), log_likelihoods.mean(axis=0), yerr=log_likelihoods.std(axis=0))
plt.xlabel("Iteration")
plt.ylabel("Log likelihood")
plt.title("Transitions of the log likelihood")
plt.savefig(figure_dir / "summary_of_log_likelihood.png")
plt.clf()
plt.errorbar(range(T), word_ARIs.mean(axis=0), yerr=word_ARIs.std(axis=0), label="Word ARI")
plt.errorbar(range(T), letter_ARIs.mean(axis=0), yerr=letter_ARIs.std(axis=0), label="Letter ARI")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the ARI")
plt.legend()
plt.savefig(figure_dir / "summary_of_ARI.png")
plt.clf()
plt.errorbar(range(T), word_macro_f1_scores.mean(axis=0), yerr=word_macro_f1_scores.std(axis=0), label="Word macro F1")
plt.errorbar(range(T), letter_macro_f1_scores.mean(axis=0), yerr=letter_macro_f1_scores.std(axis=0), label="Letter macro F1")
plt.xlabel("Iteration")
plt.ylabel("Macro F1 score")
plt.title("Transitions of the macro F1 score")
plt.legend()
plt.savefig(figure_dir / "summary_of_macro_F1_score.png")
plt.clf()
plt.errorbar(range(T), word_micro_f1_scores.mean(axis=0), yerr=word_micro_f1_scores.std(axis=0), label="Word micro F1")
plt.errorbar(range(T), letter_micro_f1_scores.mean(axis=0), yerr=letter_micro_f1_scores.std(axis=0), label="Letter micro F1")
plt.xlabel("Iteration")
plt.ylabel("Micro F1 score")
plt.title("Transitions of the micro F1 score")
plt.legend()
plt.savefig(figure_dir / "summary_of_micro_F1_score.png")
if spkind:
plt.clf()
for spk in range(spk_num):
plt.plot(range(T), spkind_letter_ARIs[:, spk].mean(axis=0), label=f"{spk+1}-th speaker")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the letter ARI")
plt.legend()
plt.savefig(figure_dir / "speaker_individual_summary_of_letter_ARI.png")
plt.clf()
for spk in range(spk_num):
plt.plot(range(T), spkind_word_ARIs[:, spk].mean(axis=0), label=f"{spk+1}-th speaker")
plt.xlabel("Iteration")
plt.ylabel("ARI")
plt.title("Transitions of the word ARI")
plt.legend()
plt.savefig(figure_dir / "speaker_individual_summary_of_word_ARI.png")
print("Done!")
#%%
print("Save npy files...")
np.save(summary_dir / "resample_times.npy", resample_times)
np.save(summary_dir / "log_likelihoods.npy", log_likelihoods)
np.save(summary_dir / "letter_ARI.npy", letter_ARIs)
np.save(summary_dir / "letter_macro_F1.npy", letter_macro_f1_scores)
np.save(summary_dir / "letter_micro_F1.npy", letter_micro_f1_scores)
np.save(summary_dir / "word_ARI.npy", word_ARIs)
np.save(summary_dir / "word_macro_F1.npy", word_macro_f1_scores)
np.save(summary_dir / "word_micro_F1.npy", word_micro_f1_scores)
if spkind:
np.save(summary_dir / "Speaker_individual_letter_ARI.npy", spkind_letter_ARIs)
np.save(summary_dir / "Speaker_individual_word_ARI.npy", spkind_word_ARIs)
print("Done!") | 0.424173 | 0.235229 |