input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
undo_converters_stack.pop()
def undo_converters():
print_spacer=True
for parent in reversed(undo_converters_list):
o = parent.args_converters.pop()
if want_prints:
if print_spacer:
print(f"##")
print_spacer = False
print(f"## undo converter")
print(f"## {parent=}")
print(f"## popped {o=}")
print(f"## arg_converters={parent.args_converters}")
undo_converters_list.clear()
first_print_string = ""
waiting_op = None
prev_op = None
while ci or argi:
if want_prints:
print(first_print_string)
first_print_string = "##"
print(f"############################################################")
print(f"## cmdline {list(argi.values)}")
# first, run ci until we either
# * finish the program, or
# * must consume a command-line argument
for op in ci:
prev_op = waiting_op
waiting_op = op
if want_prints:
ip = f"[{ci.repr_ip()}]"
ip_spacer = " " * len(ip)
converter = ci.repr_converter(ci.converter)
o = ci.repr_converter(ci.o)
_total = ci.total and ci.total.summary()
_group = ci.group and ci.group.summary()
print(f"##")
print(f"## {ip} {converter=} {o=}")
print(f"## {ip_spacer} total={_total}")
print(f"## {ip_spacer} group={_group}")
if op.op == opcode.create_converter:
r = None if op.parameter.kind == KEYWORD_ONLY else root
cls = appeal.map_to_converter(op.parameter)
converter = cls(op.parameter, appeal)
ci.converters[op.key] = ci.o = converter
if not root:
root = converter
if want_prints:
print(f"## create_converter key={op.key} parameter={op.parameter}")
print(f"## {converter=}")
continue
if op.op == opcode.load_converter:
ci.converter = ci.converters.get(op.key, None)
converter = ci.repr_converter(ci.converter)
if want_prints:
print(f"## load_converter {op.key=} {converter=!s}")
continue
if op.op == opcode.load_o:
ci.o = ci.converters.get(op.key, None)
if want_prints:
o = ci.repr_converter(ci.o)
print(f"## load_o {op.key=} {o=!s}")
continue
if op.op == opcode.map_option:
options_bucket[op.option] = op.program
if want_prints:
print(f"## map_option {op.option=} {op.program=} token {options_token}")
continue
if op.op == opcode.append_args:
ci.converter.args_converters.append(ci.o)
add_undoable_converter(ci.converter)
if want_prints:
o = ci.repr_converter(ci.o)
print(f"## append_args {o=}")
continue
if op.op == opcode.store_kwargs:
converter = ci.o
if op.name in ci.converter.kwargs_converters:
existing = ci.converter.kwargs_converters[op.name]
if not ((existing == converter) and isinstance(existing, MultiOption)):
# TODO: this is terrible UI, must fix.
raise AppealUsageError(f"option is illegal, kwarg already set, {existing=} {hex(id(existing))} {converter=} {hex(id(converter))}")
# we're setting the kwarg to the value it's already set to,
# and it's a multioption, so this is fine.
continue
ci.converter.kwargs_converters[op.name] = ci.o
if want_prints:
o = ci.repr_converter(ci.o)
print(f"## store_kwargs name={op.name} {o=}")
continue
if op.op == opcode.consume_argument:
if want_prints:
print(f"## consume_argument is_oparg={op.is_oparg}")
if not argi:
if want_prints:
print(f"## no more arguments, aborting program")
ci.abort()
break
if op.op == opcode.push_context:
ci.push_context()
push_undo_converters()
if want_prints:
print(f"## push_context")
continue
if op.op == opcode.pop_context:
pop_undo_converters()
ci.pop_context()
if want_prints:
print(f"## pop_context")
continue
if op.op == opcode.set_group:
ci.group = op.group.copy()
reset_undo_converters()
if want_prints:
print(f"## set_group {ci.group.summary()}")
continue
if op.op == opcode.flush_multioption:
assert isinstance(ci.o, MultiOption), f"expected instance of MultiOption but {ci.o=}"
ci.o.flush()
if want_prints:
o = ci.repr_converter(ci.o)
print(f"## flush_multioption {o=}")
continue
if op.op == opcode.jump:
if want_prints:
print(f"## jump {op.address=}")
ci.i.jump(op.address)
continue
if op.op == opcode.jump_relative:
if want_prints:
print(f"## jump_relative {op.delta=}")
ci.i.jump_relative(op.delta)
continue
if op.op == opcode.branch_on_o:
if want_prints:
print(f"## branch_on_o o={ci.o} {op.delta=}")
if ci.o:
ci.i.jump(op.address)
continue
if op.op == opcode.comment:
if want_prints:
print(f"## comment {op.comment!r}")
continue
if op.op == opcode.end:
if want_prints:
name = str(op.op).partition(".")[2]
print(f"## {name} id={op.id} name={op.name!r}")
continue
raise AppealConfigurationError(f"unhandled opcode {op=}")
else:
# we finished the program
if want_prints:
print(f"##")
print(f"## program finished.")
print(f"##")
op = None
forget_undo_converters()
assert (op == None) or (op.op == opcode.consume_argument)
# it's time to consume arguments.
# we've either paused or finished the program.
# if we've paused, it's because the program wants us
# to consume an argument. in that case op
# will be a 'consume_argument' op.
# if we've finished the program, op will be None.
#
# technically this is a for loop over argi, but
# we usually only consume one argument at a time.
#
# for a in argi:
# * if a is an option (or options),
# push that program (programs) and resume
# the charm interpreter.
# * if a is the special value '--', remember
# that all subsequent command-line arguments
# can no longer be options, and continue to
# the next a in argi. (this is the only case
# in which we'll consume more than one argument
# in this loop.)
# * else a is a positional argument.
# * if op is consume_argument, consume it and
# resume the charm interpreter.
# * else, hmm, we have a positional argument
# we don't know what to do with. the program
# is done, and we don't have a consume_argument
# to give it to. so push it back onto argi
# and exit. (hopefully the argument is the
# name of a command/subcomand.)
for a in argi:
if want_prints:
print("#]")
is_oparg = op and (op.op == opcode.consume_argument) and op.is_oparg
# if this is true, we're consuming a top-level command-line argument.
# if this is false, we're processing an oparg.
# what's the difference? opargs can't be options.
is_positional_argument = (
appeal.root.force_positional
or ((not a.startswith("-")) or (a == "-"))
or is_oparg
)
if want_prints:
# print_op = "consume_argument" if op else None
print_op = op
print(f"#] process argument {a!r} {list(argi.values)}")
print(f"#] op={print_op}")
if is_positional_argument:
if not op:
if want_prints:
print(f"#] positional argument we can't handle. exit.")
argi.push(a)
return ci.converters[0]
ci.o = a
forget_undo_converters()
if ci.group:
ci.group.count += 1
if ci.total:
ci.total.count += 1
if not is_oparg:
pop_options_to_base()
if want_prints:
print(f"#] positional argument. o={ci.o!r}")
# return to the interpreter
break
# it's an option! or "--".
if not option_space_oparg:
raise AppealConfigurationError("oops, option_space_oparg must currently be True")
queue = []
option_stack_tokens = []
# split_value is the value we "split" from the option string.
# --option=X
# -o=X
# -oX
# it's set to X if the user specifies an X, otherwise it's None.
split_value = None
if a.startswith("--"):
if a == "--":
appeal.root.force_positional = True
if want_prints:
print(f"#] '--', force_positional=True")
continue
option, equals, _split_value = a.partition("=")
if equals:
split_value = _split_value
program, maximum_arguments, token = find_option(option)
option_stack_tokens.append(token)
if want_prints:
print(f"#] option {denormalize_option(option)} {program=}")
queue.append((option, program, maximum_arguments, split_value, True))
else:
options = collections.deque(a[1:])
while options:
option = options.popleft()
equals = short_option_equals_oparg and options and (options[0] == '=')
if equals:
options.popleft()
split_value = "".join(options)
options = ()
program, maximum_arguments, token = find_option(option)
option_stack_tokens.append(token)
# if it takes no arguments, proceed to the next option
if not maximum_arguments:
if want_prints:
print(f"#] option {denormalize_option(option)}")
queue.append([denormalize_option(option), program, maximum_arguments, split_value, False])
continue
# this eats arguments. if there are more characters waiting,
# they must be the split value.
if options:
assert not split_value
split_value = "".join(options)
options = ()
if not short_option_concatenated_oparg:
raise AppealUsageError(f"'-{option}{split_value}' is not allowed, use '-{option} {split_value}'")
if want_prints:
print(f"#] option {denormalize_option(option)}")
queue.append([denormalize_option(option), program, maximum_arguments, split_value, False])
# mark the last entry in the queue as last
queue[-1][-1] = True
assert queue and option_stack_tokens
# we have options to run.
# so the existing consume_argument op will have to wait.
if op:
ci.rewind()
op = None
# pop to the *lowest* bucket!
option_stack_tokens.sort()
pop_options_to_token(option_stack_tokens[0])
# and now push on a new bucket.
push_options()
# process options in reverse here!
# that's because we push each program on the interpreter. so, LIFO.
for error_option, program, maximum_arguments, split_value, is_last in reversed(queue):
if want_prints:
print(f"#] call program={program=} {split_value=}")
if not is_last:
total = program.total
assert maximum_arguments == 0
if split_value is not None:
assert is_last
if maximum_arguments != 1:
if maximum_arguments == 0:
raise AppealUsageError(f"{error_option} doesn't take an argument")
if maximum_arguments >= 2:
raise AppealUsageError(f"{error_option} given a single argument but it requires multiple arguments, you must separate the arguments with spaces")
argi.push(split_value)
if want_prints:
print(f"#] pushing split value {split_value!r} on argi")
ci.call(program)
break
undo_converters()
satisfied = True
if ci.total and not ci.total.satisfied():
satisfied = False
ag = ci.total
if ci.group and not ci.group.satisfied():
if (not ci.group.optional) or ci.group.count:
satisfied = False
ag = ci.group
if not satisfied:
if not ci.group.satisfied():
which = "in this argument group"
ag = ci.group
else:
which = "total"
ag = ci.total
if ag.minimum == ag.maximum:
middle = f"{ag.minimum} arguments"
else:
middle = f"at least {ag.minimum} arguments but no more than {ag.maximum} arguments"
message = f"{program.name} requires {middle} {which}."
raise AppealUsageError(message)
if want_prints:
print(f"##")
print(f"## ending parse.")
finished_state = "not finished" if ci else "finished"
print(f"## program was {finished_state}.")
if argi:
print(f"## remaining | |
<gh_stars>1-10
import pandas as pd
from .algo import *
from .validate import *
from .validate import DCAError
__all__ = ['DecisionCurveAnalysis'] # only public member should be the class
class DecisionCurveAnalysis:
"""DecisionCurveAnalysis(...)
DecisionCurveAnalysis(algorithm='dca', **kwargs)
Create an object of class DecisionCurveAnalysis for generating
and plotting "net benefit" and "interventions avoided" curves
Parameters
----------
algorithm : str
the type of analysis to run
valid values are 'dca' (decision curve) or 'stdca' (survival time decision curve)
**kwargs : object
keyword arguments that are used in the analysis
Attributes
----------
data : pd.DataFrame
The data set to analyze, with observations in each row, and
outcomes/predictors in the columns
outcome : str
The column in `data` to use as the outcome for the analysis
All observations in this column must be coded 0/1
predictors : list(str)
The column(s) in `data` to use as predictors during the analysis
All observations, 'x', in this column must be in the range 0 <= x <= 1
Methods
-------
run : runs the analysis
smooth_results : use local regression (LOWESS) to smooth the
results of the analysis, using the specified fraction
plot_net_benefit : TODO
plot_interv_avoid : TODO
Examples
--------
TODO
"""
#universal parameters for dca
_common_args = {'data' : None,
'outcome' : None,
'predictors' : None,
'thresh_lo' : 0.01,
'thresh_hi' : 0.99,
'thresh_step' : 0.01,
'probabilities' : None,
'harms' : None,
'intervention_per' : 100}
#stdca-specific attributes
_stdca_args = {'tt_outcome' : None,
'time_point' : None,
'cmp_risk' : False}
def __init__(self, algorithm='dca', **kwargs):
"""Initializes the DecisionCurveAnalysis object
Arguments for the analysis may be passed in as keywords upon object initialization
Parameters
----------
algorithm : str
the algorithm to use, valid options are 'dca' or 'stdca'
**kwargs :
keyword arguments to populate instance attributes that will be used in analysis
Raises
------
ValueError
if user doesn't specify a valid algorithm; valid values are 'dca' or 'stdca'
if the user specifies an invalid keyword
"""
if algorithm not in ['dca', 'stdca']:
raise ValueError("did not specify a valid algorithm, only 'dca' and 'stdca' are valid")
self.algorithm = algorithm
#set args based on keywords passed in
#this naively assigns values passed in -- validation occurs afterwords
for kw in kwargs:
if kw in self._common_args:
self._common_args[kw] = kwargs[kw] #assign
continue
elif kw in self._stdca_args:
self._stdca_args[kw] = kwargs[kw]
else:
raise ValueError("{kw} is not a valid decision_curve_analysis keyword"
.format(kw=repr(kw)))
#do validation on all args, make sure we still have a valid analysis
self.data = data_validate(self.data)
self.outcome = outcome_validate(self.data, self.outcome)
self.predictors = predictors_validate(self.predictors, self.data)
#validate bounds
new_bounds = []
curr_bounds = [self._common_args['thresh_lo'], self._common_args['thresh_hi'],
self._common_args['thresh_step']]
for i, bound in enumerate(['lower', 'upper', 'step']):
new_bounds.append(threshold_validate(bound, self.threshold_bound(bound),
curr_bounds))
self.set_threshold_bounds(new_bounds[0], new_bounds[1], new_bounds[2])
#validate predictor-reliant probs/harms
self.probabilities = probabilities_validate(self.probabilities,
self.predictors)
self.harms = harms_validate(self.harms, self.predictors)
#validate the data in each predictor column
self.data = validate_data_predictors(self.data, self.outcome, self.predictors,
self.probabilities)
def _args_dict(self):
"""Forms the arguments to pass to the analysis algorithm
Returns
-------
dict(str, object)
A dictionary that can be unpacked and passed to the algorithm for the
analysis
"""
if self.algorithm == 'dca':
return self._common_args
else:
from collections import Counter
return dict(Counter(self._common_args) + Counter(self._stdca_args))
def _algo(self):
"""The algorithm to use for this analysis
"""
return dca if self.algorithm == 'dca' else stdca
def run(self, return_results=False):
"""Performs the analysis
Parameters
----------
return_results : bool
if `True`, sets the results to the instance attribute `results`
if `False` (default), the function returns the results as a tuple
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
Returns net_benefit, interventions_avoided if `return_results=True`
"""
nb, ia = self._algo()(**(self._args_dict()))
if return_results:
return nb, ia
else:
self.results = {'net benefit' : nb, 'interventions avoided' : ia}
def smooth_results(self, lowess_frac, return_results=False):
"""Smooths the results using a LOWESS smoother
Parameters
----------
lowess_frac : float
the fraction of the endog value to use when smoothing
return_results : bool
if `True`, sets the results to the instance attribute `results`
if `False` (default), the function returns the results as a tuple
Returns
-------
tuple(pd.DataFrame, pd.DataFrame)
smoothed predictor dataFrames for results if `return_results=True`
"""
from dcapy.calc import lowess_smooth_results
_nb = _ia = None
for predictor in self.predictors:
nb, ia = lowess_smooth_results(predictor, self.results['net benefit'],
self.results['interventions avoided'],
lowess_frac)
#concatenate results
_nb = pd.concat([_nb, nb], axis=1)
_ia = pd.concat([_ia, ia], axis=1)
if return_results:
return _nb, _ia
else:
self.results['net benefit'] = pd.concat(
[self.results['net benefit'], _nb], axis=1)
self.results['interventions avoided'] = pd.concat(
[self.results['interventions avoided'], _ia], axis=1)
def plot_net_benefit(self, custom_axes=None, make_legend=True):
"""Plots the net benefit from the analysis
Parameters
----------
custom_axes : list(float)
a length-4 list of dimensions for the plot, `[x_min, x_max, y_min, y_max]`
make_legend : bool
whether to include a legend in the plot
Returns
-------
matplotlib.rc_context
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
e.args += ("plotting the analysis requires matplotlib")
raise
try:
net_benefit = getattr(self, 'results')['net benefit']
except AttributeError:
raise DCAError("must run analysis before plotting!")
plt.plot(net_benefit)
plt.ylabel("Net Benefit")
plt.xlabel("Threshold Probability")
#prettify the graph
if custom_axes:
plt.axis(custom_axes)
else: #use default
plt.axis([0, self.threshold_bound('upper')*100,
-0.05, 0.20])
def plot_interventions_avoided(self, custom_axes=None, make_legend=True):
"""Plots the interventions avoided per `interventions_per` patients
Notes
-----
Generated plots are 'interventions avoided per `intervention_per` patients' vs. threshold
Parameters
----------
custom_axes : list(float)
a length-4 list of dimensions for the plot, `[x_min, x_max, y_min, y_max]`
make_legend : bool
whether to include a legend in the plot
Returns
-------
matplotlib.rc_context
context manager for working with the newly-created plot
"""
try:
import matplotlib.pyplot as plt
except ImportError as e:
e.args += ("plotting the analysis requires matplotlib")
raise
try:
interv_avoid = getattr(self, 'results')['interventions avoided']
except AttributeError:
raise DCAError("must run analysis before plotting!")
iaplot = plt.plot(interv_avoid)
#TODO: graph prettying/customization
return iaplot
@property
def data(self):
"""The data set to analyze
Returns
-------
pd.DataFrame
"""
return self._common_args['data']
@data.setter
def data(self, value):
"""Set the data for the analysis
Parameters
----------
value : pd.DataFrame
the data to analyze
"""
value = data_validate(value) # validate
self._common_args['data'] = value
@property
def outcome(self):
"""The outcome to use for the analysis
"""
return self._common_args['outcome']
@outcome.setter
def outcome(self, value):
"""Sets the column in the dataset to use as the outcome for the analysis
Parameters
----------
value : str
the name of the column in `data` to set as `outcome`
"""
value = outcome_validate(self.data, value) # validate
self._common_args['outcome'] = value
@property
def predictors(self):
"""The predictors to use
Returns
-------
list(str)
A list of all predictors for the analysis
"""
return self._common_args['predictors']
@predictors.setter
def predictors(self, value):
"""Sets the predictors to use for the analysis
Parameters
----------
value : list(str)
the list of predictors to use
"""
value = predictors_validate(value, self.data)
self._common_args['predictors'] = value
def threshold_bound(self, bound):
"""Gets the specified threshold boundary
Parameters
----------
bound : str
the boundary to get; valid values are "lower", "upper", or "step"
Returns
-------
float
the current value of that boundary
"""
mapping = {'lower' : 'thresh_lo',
'upper' : 'thresh_hi',
'step' : 'thresh_step'}
try:
return self._common_args[mapping[bound]]
except KeyError:
raise ValueError("did not specify a valid boundary")
def set_threshold_bounds(self, lower, upper, step=None):
"""Sets the threshold boundaries (thresh_*) for the analysis
Notes
-----
Passing `None` for any of the parameters will skip that parameter
The analysis will be run over all steps, x, lower <= x <= upper
Parameters
----------
lower : float
the lower boundary
upper : float
the upper boundary
step : float
the increment between calculations
"""
_step = step if step else self._common_args['thresh_step']
bounds_to_test = [lower, upper, _step]
if lower is not None:
lower = threshold_validate('lower', lower, bounds_to_test)
self._common_args['thresh_lo'] = lower
if upper is not None:
upper = threshold_validate('upper', upper, bounds_to_test)
self._common_args['thresh_hi'] = upper
if step is not None:
step = threshold_validate('step', step, bounds_to_test)
self._common_args['thresh_step'] = step
@property
def probabilities(self):
"""The list of probability values for each predictor
Returns
-------
list(bool)
the probability list
"""
return self._common_args['probabilities']
@probabilities.setter
def probabilities(self, value):
"""Sets the probabilities list for the analysis
Notes
-----
The length of the parameter `value` must match that of the predictors
Parameters
----------
value : list(bool)
a list of probabilities to assign, one for each predictor
"""
| |
# -*- coding: utf-8 -*-
import numpy as np
import random
import math
from Policy import *
from my_moduler import get_module_logger, get_state_logger
from mulligan_setting import *
from adjustment_action_code import *
mylogger = get_module_logger(__name__)
# mylogger = get_module_logger('mylogger')
import itertools
#statelogger = get_state_logger('state')
from my_enum import *
class Player:
def __init__(self, max_hand_num, first=True, policy=RandomPolicy(), mulligan=Random_mulligan_policy()):
self.hand = []
self.max_hand_num = max_hand_num
self.is_first = first
self.player_num = 1 - int(self.is_first)
self.life = 20
self.max_life = 20
self.policy = policy
self.mulligan_policy = mulligan
self.deck = None
self.lib_out_flg = False
self.field = None
self.name = None
self.class_num = None
self.effect = []
self.error_count = 0
def get_copy(self, field):
player = Player(self.max_hand_num, first=self.is_first, policy=self.policy, mulligan=self.mulligan_policy)
#for card in self.hand:
# player.hand.append(card.get_copy())
player.hand = list(map(field.copy_func,self.hand)) if field is not None else []
player.life = self.life
player.deck = Deck()
if self.deck is not None:
player.deck.set_leader_class(self.deck.leader_class.name)
#for i,card in enumerate(self.deck.deck):
# player.deck.append(card)
player.deck.deck = deque(map(field.copy_func, self.deck.deck)) if field is not None else deque()
player.deck.remain_num = int(self.deck.remain_num)
player.deck.deck_type = int(self.deck.deck_type)
player.field = field
player.name = self.name
player.class_num = self.class_num
player.effect = copy.copy(self.effect)
#if len(self.effect) > 0:
#player.effect = copy.deepcopy(self.effect)
return player
def eq(self,other):
if self.life != other.life:
return False
if len(self.deck.deck) != len(other.deck.deck):
return False
if len(self.hand) != len(other.hand):
return False
checked_cards = []
for i,card in enumerate(self.hand):
if card in checked_cards:
continue
origin_count = 0
other_count = 0
for player_card in self.hand:
if player_card.eq(card):
origin_count += 1
for other_card in other.hand:
if other_card.eq(card):
other_count += 1
if origin_count != other_count:
return False
checked_cards.append(card)
return True
def compare_hand(self,other_hand):
checked_cards = []
for i,card in enumerate(self.hand):
if card in checked_cards:
continue
origin_count = 0
other_count = 0
for player_card in self.hand:
if player_card.eq(card):
origin_count += 1
for other_card in other_hand:
if other_card.eq(card):
other_count += 1
if origin_count != other_count:
return False
checked_cards.append(card)
return True
def sort_hand(self):
self.hand.sort(key = lambda card:card.name)
def get_damage(self, damage):
if len(self.effect) > 0:
tmp = int(damage)
priority_list = list(set([effect.proirity for effect in self.effect]))
priority_list = sorted(priority_list, reverse=True)
for i in priority_list:
for effect in self.effect:
if effect.priority == i:
tmp = effect(argument=tmp, state_code=State_Code.GET_DAMAGE.value)
return tmp
else:
self.life -= damage
return damage
def restore_player_life(self, num=0, virtual=False):
self.field.restore_player_life(player=self, num=num, virtual=virtual)
def check_vengeance(self):
return self.life <= 10
def check_overflow(self):
return self.field.cost[self.player_num] >= 7
def check_resonance(self):
return len(self.deck.deck) % 2 == 0
def draw(self, deck, num):
for i in range(num):
if len(deck.deck) == 0:
self.lib_out_flg = True
return
card = deck.draw()
self.field.drawn_cards.append(card,self.player_num)
self.hand.append(card)
if len(self.hand) > self.max_hand_num:
self.field.graveyard.shadows[self.player_num] += 1
self.hand.pop(-1)
def append_cards_to_hand(self, cards):
for card in cards:
self.hand.append(card)
if len(self.hand) > self.max_hand_num:
self.field.graveyard.shadows[self.player_num] += 1
self.hand.pop(-1)
def show_hand(self):
length = 0
print("Player", self.player_num + 1, "'s hand")
print("====================================================================")
hand_len = len(self.hand)
for i in range(hand_len):
print(i, ": ", self.hand[i])
length = i
print("====================================================================")
for i in range(9 - length):
print("")
def mulligan(self, deck, virtual=False):
change_cards_id = self.mulligan_policy.decide(self.hand, deck)
if not virtual:
mylogger.info("Player{}'s hand".format(self.player_num + 1))
self.show_hand()
mylogger.info("change card_id:{}".format(change_cards_id))
return_cards = [self.hand.pop(i) for i in change_cards_id[::-1]]
self.draw(deck, len(return_cards))
if not virtual:
self.show_hand()
return_cards_len = len(return_cards)
for i in range(return_cards_len):
deck.append(return_cards.pop())
deck.shuffle()
def play_card(self, field, card_id, player, opponent, virtual=False, target=None):
if not virtual:
mylogger.info("Player {} plays {}".format(self.player_num + 1, self.hand[card_id].name))
if self.hand[card_id].have_enhance == True and self.hand[card_id].active_enhance_code[0] == True:
field.remain_cost[self.player_num] -= self.hand[card_id].active_enhance_code[1]
if not virtual:
mylogger.info("Enhance active!")
elif self.hand[card_id].have_accelerate and self.hand[card_id].active_accelerate_code[0]:
field.remain_cost[self.player_num] -= self.hand[card_id].active_accelerate_code[1]
if not virtual:
mylogger.info("Accelerate active!")
field.play_as_other_card(self.hand, card_id, self.player_num, virtual=virtual, target=target)
return
else:
if field.remain_cost[self.player_num] - self.hand[card_id].cost < 0:
mylogger.info("{}".format(self.hand[card_id]))
mylogger.info("minus-pp error:{} < {}"\
.format(field.remain_cost[self.player_num],self.hand[card_id].cost))
raise AssertionError
#assert field.remain_cost[self.player_num] - self.hand[card_id].cost >= 0, "minus-pp error:{} < {}"\
# .format(field.remain_cost[self.player_num],self.hand[card_id].cost)
field.remain_cost[self.player_num] -= self.hand[card_id].cost
if self.hand[card_id].card_category == "Creature":
field.play_creature(self.hand, card_id, self.player_num, player, opponent, virtual=virtual, target=target)
elif self.hand[card_id].card_category == "Spell":
field.play_spell(self.hand, card_id, self.player_num, player, opponent, virtual=virtual, target=target)
elif self.hand[card_id].card_category == "Amulet":
field.play_amulet(self.hand, card_id, self.player_num, player, opponent, virtual=virtual, target=target)
field.players_play_num += 1
field.ability_resolution(virtual=virtual, player_num=self.player_num)
def attack_to_follower(self, field, attacker_id, target_id, virtual=False):
field.attack_to_follower([self.player_num, attacker_id], [1 - self.player_num, target_id], field,
virtual=virtual)
def attack_to_player(self, field, attacker_id, opponent, virtual=False):
field.attack_to_player([self.player_num, attacker_id], opponent, virtual=virtual)
def creature_evolve(self, creature, field, target=None, virtual=False):
assert field.evo_point[self.player_num] > 0
field.evo_point[self.player_num] -= 1
field.evolve(creature, virtual=virtual, target=target)
def discard(self, hand_id, field):
field.discard_card(self,hand_id)
def decide(self, player, opponent, field, virtual=False,dual=False):
field.stack.clear()
#self.sort_hand()
(_, _, can_be_attacked, regal_targets) = field.get_situation(self, opponent)
(can_play, can_attack, can_evo), (able_to_play, able_to_attack, able_to_creature_attack, able_to_evo) \
= field.get_flag_and_choices(self, opponent, regal_targets)
if not virtual:
observable_data = field.get_observable_data(player_num=self.player_num)
if self.player_num == 0:
print("first:player")
else:
print("first:opponent")
player_keys = list(observable_data.keys())
for sub_key in list(observable_data[player_keys[0]].keys()):
print("{}:{},{}".format(sub_key,observable_data[player_keys[0]][sub_key],
observable_data[player_keys[1]][sub_key]))
#for key in list(observable_data.keys()):
# print("{}".format(key))
# for sub_key in list(observable_data[key].keys()):
# print("{}:{}".format(sub_key, observable_data[key][sub_key]))
self.show_hand()
field.show_field()
if able_to_play != []:
mylogger.info("able_to_play:{}".format(able_to_play))
if able_to_creature_attack != []:
mylogger.info(
"able_to_creature_attack:{} can_be_attacked:{}".format(able_to_creature_attack, can_be_attacked))
mylogger.info("regal_targets:{}".format(regal_targets))
#mylogger.info("check1:")
#field.show_field()
(action_num, card_id, target_id) = self.policy.decide(self, opponent, field)
#mylogger.info("check2:")
#field.show_field()
#mylogger.info("{},{}".format(action_num,self.policy.policy_type))
if action_num == Action_Code.ERROR.value:
#self.policy.starting_node.print_node()
#assert False
self.error_count += 1
#mylogger.info("error_count:{}".format(self.error_count))
if self.error_count >= 3:
self.policy.starting_node.print_tree()
print((action_num, card_id, target_id))
field.show_field()
mylogger.info("{}".format(self.policy.type))
self.policy.current_node.print_tree(single=True)
assert False
self.policy.current_node = None
return self.decide(player, opponent, field, virtual=virtual,dual=dual)
elif action_num != Action_Code.TURN_END.value and self.policy.policy_type == 4:
#mylogger.info("adjust")
sim_field = self.policy.prev_node.field
action_num, card_id, target_id = adjust_action_code(field,sim_field,self.player_num,
action_code=(action_num, card_id, target_id), msg = action_num)
self.error_count = 0
if not virtual:
mylogger.info("action_num:{} card_id:{} target_id:{}".format(action_num, card_id, target_id))
action = (action_num, card_id, target_id)
before_action = self.field.get_single_detailed_action_code(self,action)
end_flg = self.execute_action(field, opponent, action_code=action, virtual=virtual)
if dual:
return end_flg, (action_num, card_id, target_id),before_action#(action_num, card_id)
return end_flg
def execute_action(self, field, opponent, action_code=None, virtual=False):
field.reset_time_stamp()
(action_num, card_id, target_id) = action_code
if action_num == Action_Code.EVOLVE.value:
self.creature_evolve(field.card_location[self.player_num][card_id],
field, virtual=virtual, target=target_id)
elif action_num == Action_Code.TURN_END.value:
field.turn_end = True
return True
elif action_num == Action_Code.PLAY_CARD.value:
if not virtual:
if self.hand[card_id].have_enhance \
and self.hand[card_id].active_enhance_code[0]:
mylogger.info("play_cost:{}".format(self.hand[card_id].active_enhance_code[1]))
elif self.hand[card_id].have_accelerate \
and self.hand[card_id].active_accelerate_code[0]:
mylogger.info("play_cost:{}".format(self.hand[card_id].active_accelerate_code[1]))
else:
mylogger.info("play_cost:{}".format(self.hand[card_id].cost))
self.play_card(field, card_id, self, opponent, target=target_id, virtual=virtual)
elif action_num == Action_Code.ATTACK_TO_FOLLOWER.value:
self.attack_to_follower(field, card_id, target_id, virtual=virtual)
elif action_num == Action_Code.ATTACK_TO_PLAYER.value:
#assert len(field.get_ward_list(self.player_num)) == 0,"ward_ignore_error"
self.attack_to_player(field, card_id, opponent, virtual=virtual)
field.ability_resolution(virtual=virtual, player_num=self.player_num)
field.check_death(player_num=self.player_num, virtual=virtual)
return field.check_game_end()
class HumanPlayer(Player):
def __init__(self, max_hand_num, first=True, policy=HumanPolicy(), mulligan=None):
self.hand = []
self.max_hand_num = max_hand_num
self.is_first = first
self.player_num = 1 - int(self.is_first)
self.life = 20
self.max_life = 20
self.policy = policy
self.mulligan_policy = mulligan
self.deck = None
self.lib_out_flg = False
self.field = None
self.name = None
self.class_num = None
self.effect = []
self.error_count = 0
def get_copy(self, field):
player = HumanPlayer(self.max_hand_num, first=self.is_first, policy=HumanPolicy(), mulligan=None)
player.hand = list(map(field.copy_func,self.hand)) if field is not None else []
player.life = self.life
player.deck = Deck()
if self.deck is not None:
player.deck.set_leader_class(self.deck.leader_class.name)
player.deck.deck = deque(map(field.copy_func, self.deck.deck)) if field is not None else deque()
player.deck.remain_num = int(self.deck.remain_num)
player.deck.deck_type = int(self.deck.deck_type)
player.field = field
player.name = self.name
player.class_num = self.class_num
player.effect = copy.copy(self.effect)
return player
def mulligan(self, deck, virtual=False):
self.show_hand()
tmp = input("input change card id(if you want to change all card,input ↑):")
hand_len = len(self.hand)
if tmp == "":
return
elif tmp == "\x1b[A":
tmp = [i for i in range(hand_len )]
else:
tmp = tmp.split(",")
# mylogger.info("tmp:{} type:{}".format(tmp, type(tmp)))
if len(tmp) > 0:
change_cards_id = list(map(int, tmp))
return_cards = [self.hand.pop(i) for i in change_cards_id[::-1]]
self.draw(deck, len(return_cards))
self.show_hand()
return_cards_len = len(return_cards)
for i in range(return_cards_len):
deck.append(return_cards.pop())
deck.shuffle()
def decide(self, player, opponent, field, virtual=False,dual=False):
#os.system('clear')
field.reset_time_stamp()
(ward_list, can_be_targeted, can_be_attacked, regal_targets) = field.get_situation(player, opponent)
(can_play, can_attack, can_evo), (able_to_play, able_to_attack, able_to_creature_attack, able_to_evo) \
= field.get_flag_and_choices(player, opponent, regal_targets)
observable_data = field.get_observable_data(player_num=self.player_num)
if self.player_num == 0:
print("first:player")
else:
print("first:opponent")
player_keys = list(observable_data.keys())
for sub_key in list(observable_data[player_keys[0]].keys()):
print("{}:{},{}".format(sub_key,observable_data[player_keys[0]][sub_key],
observable_data[player_keys[1]][sub_key]))
self.show_hand()
field.show_field()
choices = [Action_Code.TURN_END.value]
if can_evo:
choices.append(Action_Code.EVOLVE.value)
if can_play:
choices.append(Action_Code.PLAY_CARD.value)
if can_attack:
if len(can_be_attacked) > 0:
choices.append(Action_Code.ATTACK_TO_FOLLOWER.value)
if ward_list == [] and len(able_to_attack) > 0:
choices.append(Action_Code.ATTACK_TO_PLAYER.value)
[print("{:<25}:{}".format(Action_Code(i).name,i))for i in choices]
tmp = input("you can input {} :".format(choices))
action_num = Action_Code.TURN_END.value
if tmp == "":
action_num = Action_Code.TURN_END.value
elif tmp == "\x1b[C":
self.deck.show_remain_card_set()
input("input any key to quit remain_card_set:")
return can_play, can_attack, field.check_game_end()
else:
action_num = int(tmp)
assert action_num in choices,"{} not in {}".format(action_num,choices)
if action_num not in choices:
print("invalid input!")
return can_play, can_attack, field.check_game_end()
if action_num == Action_Code.EVOLVE.value:
print("you can evolve:{}".format(able_to_evo))
evo_names = ["id:{} name:{}".format(ele, field.card_location[self.player_num][ele].name) for ele in
able_to_evo]
for cell in evo_names:
mylogger.info("{}".format(cell))
card_id = int(input("input creature id :"))
if card_id not in able_to_evo:
print("already evolved!")
return can_play, can_attack, field.check_game_end()
if field.card_location[self.player_num][card_id].evo_target is not None:
mylogger.info("target-evolve")
regal = field.get_regal_targets(field.card_location[self.player_num][card_id], target_type=0,
player_num=self.player_num, human=True)
mylogger.info("targets:{}".format(regal))
if regal != | |
IMAG_TOL)
otherParams[i, i] = Lmx[i, i].real
for j in range(i):
otherParams[i, j] = Lmx[i, j].real
otherParams[j, i] = Lmx[i, j].imag
else: # param_mode == "unconstrained": otherParams mx stores otherProjs (hermitian) directly
for i in range(bsO - 1):
assert(_np.linalg.norm(_np.imag(otherProjs[i, i])) < IMAG_TOL)
otherParams[i, i] = otherProjs[i, i].real
for j in range(i):
otherParams[i, j] = otherProjs[i, j].real
otherParams[j, i] = otherProjs[i, j].imag
else:
otherParams = _np.empty(0, 'd')
assert(not _np.iscomplexobj(hamParams)) # params should always
assert(not _np.iscomplexobj(otherParams)) # be *real*
return _np.concatenate((hamParams, otherParams.flat))
def paramvals_to_lindblad_projections(paramvals, ham_basis_size,
other_basis_size, param_mode="cptp",
other_mode="all", Lmx=None):
"""
Construct the separate arrays of Hamiltonian and non-Hamiltonian
Lindblad-term projections from the array of Lindblad-gate parameter values.
This function essentially performs the inverse of
:function:`lindblad_projections_to_paramvals`.
Parameters
----------
paramvals : numpy.ndarray
A 1D array of real parameter values consisting of d-1 Hamiltonian
values followed by either (d-1)^2 or just d-1 non-Hamiltonian
values (the latter when `other_mode in ('diagonal','diag_affine')`).
ham_basis_size, other_basis_size : int
The number of elements in the Hamiltonian and non-Hamiltonian
bases used to construct `paramvals`. As such, `ham_basis_size`
gives the offset into `paramvals` where the non-Hamiltonian
parameters begin.
param_mode : {"unconstrained", "cptp", "depol", "reldepol"}
Specifies how the Lindblad-term coefficients are mapped to the set of
(real) parameter values. This really just applies to the "other"
(non-Hamiltonian) coefficients. "unconstrained" means that ranging
over the parameter values lets the coefficient-matrix vary over all
matrices, "cptp" restricts this to postitive matrices. "depol"
maps all of the coefficients to the *same, positive* parameter (only
available for "diagonal" and "diag_affine" other-modes), and "reldepol"
does the same thing but without the positivity constraint.
other_mode : {"all", "diagonal", "diag_affine"}
Specifies the structure of the matrix of other (non-Hamiltonian)
coefficients. If d is the gate dimension, "all" means a (d-1,d-1)
matrix is used; "diagonal" means just the (d2-1,) diagonal of this
matrix is used; "diag_affine" means the coefficients are in a (2,d2-1)
array with the diagonal-term coefficients being the first row and the
affine coefficients being the second row.
Lmx : ndarray, optional
Scratch space that is used to store the lower-triangular
Cholesky decomposition matrix that is used to construct
the "other" projections when there is a CPTP constraint.
Returns
-------
hamProjs : numpy.ndarray
An array of length d-1, where d is the gate dimension, giving the
projections onto a full set of the Hamiltonian-type Lindblad terms.
otherProjs : numpy.ndarray
An array of shape (d-1,d-1) or (d-1,) or (2,d-1) where d is the gate
dimension, giving the projections onto a full set of non-Hamiltonian
-type Lindblad terms (see `other_mode` above).
"""
bsH = ham_basis_size
bsO = other_basis_size
if Lmx is None:
Lmx = _np.zeros((bsO - 1, bsO - 1), 'complex') if bsO > 0 else None
# self.paramvals = [hamCoeffs] + [otherParams]
# where hamCoeffs are *real* and of length d2-1 (self.dim == d2)
if bsH > 0:
hamCoeffs = paramvals[0:bsH - 1]
nHam = bsH - 1
else:
hamCoeffs = None
nHam = 0
#built up otherCoeffs based on param_mode and nonham_mode
if bsO > 0:
if other_mode == "diagonal":
otherParams = paramvals[nHam:]
expected_shape = (1,) if (param_mode in ("depol", "reldepol")) else (bsO - 1,)
assert(otherParams.shape == expected_shape)
if param_mode in ("depol", "reldepol"):
otherParams = otherParams[0] * _np.ones(bsO - 1, 'd') # replicate single param bsO-1 times
if param_mode in ("cptp", "depol"):
otherCoeffs = otherParams**2 # Analagous to L*L_dagger
else: # "unconstrained"
otherCoeffs = otherParams
elif other_mode == "diag_affine":
if param_mode in ("depol", "reldepol"):
otherParams = paramvals[nHam:].reshape((1 + bsO - 1,))
otherCoeffs = _np.empty((2, bsO - 1), 'd') # leave as real type b/c doesn't have complex entries
if param_mode == "depol":
otherCoeffs[0, :] = otherParams[0]**2
else:
otherCoeffs[0, :] = otherParams[0]
otherCoeffs[1, :] = otherParams[1:]
else:
otherParams = paramvals[nHam:].reshape((2, bsO - 1))
if param_mode == "cptp":
otherCoeffs = otherParams.copy()
otherCoeffs[0, :] = otherParams[0]**2
else: # param_mode == "unconstrained"
#otherCoeffs = _np.empty((2,bsO-1),'complex')
otherCoeffs = otherParams
else: # other_mode == "all"
otherParams = paramvals[nHam:].reshape((bsO - 1, bsO - 1))
if param_mode == "cptp":
# otherParams is an array of length (bs-1)*(bs-1) that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = otherParams[i,i]
# Lmx[i,j] = otherParams[i,j] + 1j*otherParams[j,i] (i > j)
for i in range(bsO - 1):
Lmx[i, i] = otherParams[i, i]
for j in range(i):
Lmx[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
#The matrix of (complex) "other"-coefficients is build by
# assuming Lmx is its Cholesky decomp; means otherCoeffs
# is pos-def.
# NOTE that the Cholesky decomp with all positive real diagonal
# elements is *unique* for a given positive-definite otherCoeffs
# matrix, but we don't care about this uniqueness criteria and so
# the diagonal els of Lmx can be negative and that's fine -
# otherCoeffs will still be posdef.
otherCoeffs = _np.dot(Lmx, Lmx.T.conjugate())
#DEBUG - test for pos-def
#evals = _np.linalg.eigvalsh(otherCoeffs)
#DEBUG_TOL = 1e-16; #print("EVALS DEBUG = ",evals)
#assert(all([ev >= -DEBUG_TOL for ev in evals]))
else: # param_mode == "unconstrained"
#otherParams holds otherCoeff real and imaginary parts directly
otherCoeffs = _np.empty((bsO - 1, bsO - 1), 'complex')
for i in range(bsO - 1):
otherCoeffs[i, i] = otherParams[i, i]
for j in range(i):
otherCoeffs[i, j] = otherParams[i, j] + 1j * otherParams[j, i]
otherCoeffs[j, i] = otherParams[i, j] - 1j * otherParams[j, i]
else:
otherCoeffs = None
return hamCoeffs, otherCoeffs
#TODO: replace two_qubit_gate, one_qubit_gate, unitary_to_pauligate_* with
# calls to this one and unitary_to_processmx
def rotation_gate_mx(r, mxBasis="gm"):
"""
Construct a rotation operation matrix.
Build the operation matrix corresponding to the unitary
`exp(-i * (r[0]/2*PP[0]*sqrt(d) + r[1]/2*PP[1]*sqrt(d) + ...) )`
where `PP' is the array of Pauli-product matrices
obtained via `pp_matrices(d)`, where `d = sqrt(len(r)+1)`.
The division by 2 is for convention, and the sqrt(d) is to
essentially un-normalise the matrices returned by `pp_matrices`
to they are equal to products of the *standard* Pauli matrices.
Parameters
----------
r : tuple
A tuple of coeffiecients, one per non-identity
Pauli-product basis element
mxBasis : {'std', 'gm', 'pp', 'qt'} or Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
.
Returns
-------
numpy array
a d^2 x d^2 operation matrix in the specified basis.
"""
d = int(round(_np.sqrt(len(r) + 1)))
assert(d**2 == len(r) + 1), "Invalid number of rotation angles"
#get Pauli-product matrices (in std basis)
pp = _bt.basis_matrices('pp', d**2)
assert(len(r) == len(pp[1:]))
#build unitary (in std basis)
ex = _np.zeros((d, d), 'complex')
for rot, pp_mx in zip(r, pp[1:]):
ex += rot / 2.0 * pp_mx * _np.sqrt(d)
U = _spl.expm(-1j * ex)
stdGate = unitary_to_process_mx(U)
ret = _bt.change_basis(stdGate, 'std', mxBasis)
return ret
def project_model(model, targetModel,
projectiontypes=('H', 'S', 'H+S', 'LND'),
genType="logG-logT"):
"""
Construct one or more new models by projecting the error generator of
`model` onto some sub-space then reconstructing.
Parameters
----------
model : Model
The model whose error generator should be projected.
targetModel : Model
The set of target (ideal) gates.
projectiontypes : tuple of {'H','S','H+S','LND','LNDCP'}
Which projections to use. The length of this tuple gives the
number of `Model` objects returned. Allowed values are:
- 'H' = Hamiltonian errors
- 'S' = Stochastic Pauli-channel errors
- 'H+S' = both of the above error types
- 'LND' = errgen projected to a normal (CPTP) Lindbladian
- 'LNDF' = errgen projected to an unrestricted (full) Lindbladian
genType : {"logG-logT", "logTiG"}
The type of error generator to compute. Allowed values are:
- "logG-logT" : errgen = log(gate) - log(target_op)
- "logTiG" : errgen = log( dot(inv(target_op), gate) )
Returns
-------
projected_models : list of Models
Elements are projected versions of `model` corresponding to
the elements of `projectiontypes`.
Nps : list of parameter counts
Integer parameter counts for each model in `projected_models`.
Useful for computing the expected log-likelihood or chi2.
"""
opLabels = list(model.operations.keys()) # operation labels
basis = model.basis
#The projection basis needs to be a basis for density matrices
# (i.e. 2x2 mxs in 1Q case) rather than superoperators | |
<filename>src/abe_sim/brain/midbrain.py
import os
import sys
import math
import time
import numpy as np
import abe_sim.brain.geom as geom
from abe_sim.brain.cerebellum import Cerebellum
from abe_sim.brain.geom import angle_diff, euler_to_quaternion, euler_diff_to_angvel, invert_quaternion, quaternion_product, quaternion_to_euler, poseFromTQ
import random
import math
import heapq
import ast
import json
import socket
import threading
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORTS = 65432 # Port to listen on (non-privileged ports are > 1023)
PORTF = 54321
from flask import Flask
from flask import request
import json
import schemasim.space.space3D as space3D
import schemasim.space.space2D as space2D
import schemasim.space.space as space
import schemasim.schemas.l0_schema_templates as st
import schemasim.schemas.l1_geometric_primitives as gp
import schemasim.schemas.l2_geometric_primitive_relations as gpr
import schemasim.objects.example_objects as eo
import schemasim.simulators.physics_simulator_2D as ps2D
import schemasim.simulators.physics_simulator_3D as ps3D
import schemasim.scene_generator as sg
from schemasim.util.geometry import fibonacci_sphere
from schemasim.schemas.l11_functional_control import Support
def simpleOnNavigationDoneCallback(x):
print("Base arrived at %s" % x)
def simpleHandsLeftPositioningDoneCallback(x):
print("Left hand arrived at %s" % x)
def simpleHandsRightPositioningDoneCallback(x):
print("Right hand arrived at %s" % x)
class Validator2DVW:
def __init__(self, collisionManager, trajector):
self.collisionManager = collisionManager
self.trajector = trajector
return
def isValid(self, coordinates):
return not self.collisionManager.in_collision_single(self.trajector, ((coordinates[0], coordinates[1], 0), (0, 0, 0, 1)))
class Validator3D:
def __init__(self, collisionManager, trajectors, space):
self.collisionManager = collisionManager
self.trajectors = trajectors
self.space = space
return
def isValid(self, coordinates):
for trajector, transform in self.trajectors:
pose = self.space.poseFromTR((transform[0][0]+coordinates[0], transform[0][1]+coordinates[1], transform[0][2]+coordinates[2]), transform[1])
if self.collisionManager.in_collision_single(trajector, pose):
return False
return True
class Midbrain:
def __init__(self, headActuator, handsActuator, baseActuator, poseSensor, worldDump, simu):
self.cerebellum = Cerebellum(headActuator, handsActuator, baseActuator, poseSensor, simu, worldDump)
self.cerebellum.initializePosition("hands/left", {"x": 0, "y": 0.4, "z": 0.96, "roll": 0, "pitch": 0, "yaw": 0})
self.cerebellum.initializePosition("hands/right", {"x": 0, "y": -0.4, "z": 0.96, "roll": 0, "pitch": 0, "yaw": 0})
self.cerebellum.initializePosition("head", {"pan": 0, "tilt": 0})
self.worldDump = worldDump
self.cellMap = None
self.collisionManager = geom.BoxCollisionManager()
self.simu = simu
self.sim2D = self.cerebellum._sim2D
self.sim3D = self.cerebellum._sim3D
self._socketThread = None
self._flaskThread = None
self._flask = Flask(__name__)
self._robotActionCondition = threading.Condition()
self._lastRequestedAction = False
def _simplifyWaypoints(self, waypoints):
retq = []
if waypoints:
ops = []
cX = waypoints[0][0]
cY = waypoints[0][1]
cA = waypoints[0][2]
for wp in waypoints[1:]:
dx = cX - wp[0]
dy = cY - wp[1]
d = math.sqrt(dx*dx + dy*dy)
da = geom.angle_diff(cA, wp[2])
if 0.001 < d:
ops.append("fwd")
elif 0.001 < da:
ops.append("a+")
elif -0.001 > da:
ops.append("a-")
cX = wp[0]
cY = wp[1]
cA = wp[2]
ops.append("end")
cOp = None
for k, op in enumerate(ops):
if None == cOp:
cOp = op
elif "end" == cOp:
coords = self.cellMap.pointId2EmbeddingCoordinates(waypoints[k])
retq.append({"x": coords[0], "y": coords[1], "yaw": coords[2]})
elif cOp != op:
coords = self.cellMap.pointId2EmbeddingCoordinates(waypoints[k])
retq.append({"x": coords[0], "y": coords[1], "yaw": coords[2]})
cOp = op
return retq
def getObjectSchemas(self):
pathPrefix = os.path.join(os.path.dirname(__file__), "../meshes")
objects = self.cerebellum._retrieveObjects()
retq = {}
for k,o in objects.items():
retq[k] = eo.MiscellaneousRigidObject(name=k, object_type=o["props"]["type"], mesh=os.path.join(pathPrefix, o["props"]["meshfile"]))
retq[k]._parameters["tx"] = o["position"]["x"]
retq[k]._parameters["ty"] = o["position"]["y"]
retq[k]._parameters["tz"] = o["position"]["z"]
retq[k]._parameters["rx"] = o["orientation"]["x"]
retq[k]._parameters["ry"] = o["orientation"]["y"]
retq[k]._parameters["rz"] = o["orientation"]["z"]
retq[k]._parameters["rw"] = o["orientation"]["w"]
retq[k]._parameters["vx"] = 0.0
retq[k]._parameters["vy"] = 0.0
retq[k]._parameters["vz"] = 0.0
retq[k]._parameters["wx"] = 0.0
retq[k]._parameters["wy"] = 0.0
retq[k]._parameters["wz"] = 0.0
return retq
def listObjects(self):
objects = self.cerebellum._retrieveObjects()
for k in sorted(objects.keys()):
props = ""
for propName in sorted(objects[k]["props"].keys()):
props = props + "\t" + propName + ": " + objects[k]["props"][propName] + "\n"
position = "\t(x: %f; y: %f; z: %f)\n" % (objects[k]["position"]["x"], objects[k]["position"]["y"], objects[k]["position"]["z"])
orientation = "\t(x: %f; y: %f; z: %f; w: %f)\n" % (objects[k]["orientation"]["x"], objects[k]["orientation"]["y"], objects[k]["orientation"]["z"], objects[k]["orientation"]["w"])
s = k+"\n"+props+position+orientation
print(s)
def updateNavigationMap(self):
objects = self.cerebellum._retrieveObjects()
self.collisionManager.clear_objects()
for k in objects.keys():
if ("furniture" in objects[k]["props"]) and objects[k]["props"]["furniture"]:
box = geom.boxFromPath(objects[k]["props"]["meshfile"])
if box:
self.collisionManager.add_object(k, box, ((objects[k]["position"]["x"], objects[k]["position"]["y"], objects[k]["position"]["z"]), (objects[k]["orientation"]["x"], objects[k]["orientation"]["y"], objects[k]["orientation"]["z"], objects[k]["orientation"]["w"])))
testBox = geom.Box()
testBox.vertices = [[-0.5, -0.5, 0], [0.5, -0.5, 0], [-0.5, 0.5, 0], [0.5, 0.5, 0], [-0.5, -0.5, 1], [0.5, -0.5, 1], [-0.5, 0.5, 1], [0.5, 0.5, 1]]
self.cellMap = space2D.Grid2DVW8(lines=10, cols=10, resolution=1, xLeft=-4.5, yDown=-4.5, gridYaw=0, validator=Validator2DVW(self.collisionManager, testBox), velocity=3, angularVelocity=3)
def _interpretSocketCommand(self, command):
opcode = ""
if 'op' in command:
opcode = command['op']
opcode = opcode.lower()
data = {}
if 'args' in command:
data = command['args']
retq = {'status': 'command not recognized', 'response': ''}
if opcode in ['hello', 'hi']:
retq['status'] = 'ok'
retq['response'] = 'hi!'
elif opcode in ['placeon']:
if ('object' in data) and ('destination' in data):
trajector = data['object']
supporter = data['destination']
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
destspec = [Support(supporter=objSchemas[supporter],supportee=trajSchema), trajSchema]
self.carryObject(trajector, destspec)
retq['status'] = 'ok'
retq['response'] = 'carrying object %s to %s' % (trajector, supporter)
else:
retq['status'] = 'insufficient parameters'
retq['response'] = 'missing object or destination'
elif opcode in ['retrieveobjects', 'ro']:
retq['status'] = 'ok'
retq['response'] = self.cerebellum._retrieveObjects()
elif opcode in ['retrieveworldstate', 'rws']:
retq['status'] = 'ok'
retq['response'] = self.cerebellum._retrieveWorldState(forJSON=True)
elif opcode in ['setworldstate', 'sws']:
retq['status'] = 'ok'
retq['response'] = ''
try:
self.cerebellum._setWorldState(data)
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
def _startSocket(self):
def thread_function_socket():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORTS))
s.listen(0)
while True:
conn, addr = s.accept()
comm = ""
with conn:
while True:
data = conn.recv(1024).decode('UTF-8')
comm = comm + data
if (not data) or (data[-1] in ['\n']):
break
comm = comm.strip()
try:
res = self._interpretSocketCommand(json.loads(comm))
except SyntaxError:
res = json.dumps({'status': 'ill-formed json for command'})
conn.sendall(bytes(res, 'UTF-8'))
def thread_function_flask():
@self._flask.route("/abe-sim-command", methods = ['POST'])
def abe_sim_command():
try:
request_data = request.get_json(force=True)
retq = self._interpretSocketCommand(request_data)
except SyntaxError:
retq = json.dumps({'status': 'ill-formed json for command'})
return retq
@self._flask.route("/abe-sim-command/to-get-kitchen", methods = ['POST'])
def to_get_kitchen():
retq = {'status': 'command not recognized', 'response': ''}
try:
request_data = request.get_json(force=True)
varName = request_data['kitchen']
retq['status'] = 'ok'
retq['response'] = {varName: self.cerebellum._retrieveWorldState(forJSON=True)}
except SyntaxError:
retq = {'status': 'ill-formed json for command'}
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-get-location", methods = ['POST'])
def to_get_location():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
locationType = request_data['type']
locationVarName = request_data['availableLocation']
kitchenState = request_data['kitchen']
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
locationName = None
data = self.cerebellum._retrieveWorldState(forJSON=True)
for o in data['worldState'].keys():
if ('props' in data['worldState'][o]) and ('type' in data['worldState'][o]['props']) and (locationType == data['worldState'][o]['props']['type']):
locationName = o
break
retq['response'] = {locationVarName: locationName}
except SyntaxError:
retq = {'status': 'ill-formed json for command'}
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-fetch", methods = ['POST'])
def to_fetch():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
kitchenState = request_data['kitchenInputState']
trajector = request_data['object']
supporter = "counterTop1"
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
destspec = [Support(supporter=objSchemas[supporter],supportee=trajSchema), trajSchema]
self._lastRequestedAction = False
self.carryObject(trajector, destspec)
with self._robotActionCondition:
self._robotActionCondition.wait()
objectName = trajector
if not self._lastRequestedAction:
objectName = None
worldState = self.cerebellum._retrieveWorldState(forJSON=True)
retq['response'] = {'fetchedObject': objectName, 'kitchenOutputState': worldState}
except KeyError:
retq['status'] = 'missing entries from state data'
return json.dumps(retq)
@self._flask.route("/abe-sim-command/to-transfer", methods = ['POST'])
def to_transfer():
retq = {'status': 'ok', 'response': ''}
try:
request_data = request.get_json(force=True)
kitchenState = request_data['kitchenInputState']
trajector = request_data['input']
supporter = request_data['container']
setWorldState = False
if 'setWorldState' in request_data:
setWorldState = request_data['setWorldState']
if setWorldState:
self.cerebellum._setWorldState(kitchenState)
scene = self.cerebellum._retrieveObjects(fullDump=True)
collisionManager = self.cerebellum._sim3D.space().makeCollisionManager()
for k, v in scene.items():
pose = self.cerebellum._sim3D.space().poseFromTR([scene[k]["position"]["x"], scene[k]["position"]["y"], scene[k]["position"]["z"]], [scene[k]["orientation"]["x"], scene[k]["orientation"]["y"], scene[k]["orientation"]["z"], scene[k]["orientation"]["w"]])
if (k != trajector) and (k in self.cerebellum._volumes.keys()):
collisionManager.add_object(k, self.cerebellum._volumes[k], np.array(pose,dtype=np.double))
objSchemas = self.getObjectSchemas()
trajSchema = objSchemas[trajector].unplace(self.sim3D)
dp = scene[supporter]['position']
dr = scene[supporter]['orientation']
arrangment = 'unorderedHeap'
if (supporter in scene) and ('arrangement' in scene[supporter]['props']):
arrangement = scene[supporter]['props']['arrangement']
if arrangement not in ['shelved']:
arrangement = 'unorderedHeap'
targetRegion = self.cerebellum._preferredLocations[supporter].copy().apply_transform(poseFromTQ([dp['x'], dp['y'], dp['z']], [dr['x'], dr['y'], dr['z'], dr['w']]))
trajectorVolume = self.cerebellum._volumes[trajector]
tBox = self.cerebellum._sim3D.space().volumeBounds(trajectorVolume)
if 'shelved' == arrangement:
shelves = trimesh.graph.split(targetRegion)
found = False
for k in range(35):
shelf = shelves[random.randrange(len(shelves))]
bBox = self.cerebellum._sim3D.space().volumeBounds(shelf)
tv = [random.uniform(bBox[i][0] - tBox[i][0], bBox[i][1] - tBox[i][1]) for i in range(2)] + [bBox[2][0] + 0.005-tBox[2][0]]
tTrajector = trajectorVolume.copy().apply_transform(poseFromTQ(tv, [dr['x'], dr['y'], dr['z'], dr['w']]))
if (not collisionManager.in_collision_single(tTrajector, poseFromTQ([0,0,0], [0,0,0,1]))) and (all(targetRegion.contains(tTrajector.vertices))):
trajSchema._parameters["tx"] = tv[0]
trajSchema._parameters["ty"] = tv[1]
trajSchema._parameters["tz"] = tv[2]
trajSchema._parameters["rx"] = dr['x']
trajSchema._parameters["ry"] = dr['y']
trajSchema._parameters["rz"] = dr['z']
trajSchema._parameters["rw"] = dr['w']
trajSchema._parameters["vx"] = 0.0
trajSchema._parameters["vy"] = 0.0
trajSchema._parameters["vz"] = 0.0
trajSchema._parameters["wx"] = 0.0
trajSchema._parameters["wy"] = 0.0
trajSchema._parameters["wz"] = 0.0
found = True
break
elif 'unorderedHeap' == arrangement:
bBox = self.cerebellum._sim3D.space().volumeBounds(targetRegion)
found = False
for k in range(35):
tv = [random.uniform(bBox[i][0] - tBox[i][0], bBox[i][1] - tBox[i][1]) for i in range(3)]
tTrajector = trajectorVolume.copy().apply_transform(poseFromTQ(tv, [dr['x'], dr['y'], dr['z'], dr['w']]))
if (not collisionManager.in_collision_single(tTrajector, poseFromTQ([0,0,0], [0,0,0,1]))) and (all(targetRegion.contains(tTrajector.vertices))):
trajSchema._parameters["tx"] = tv[0]
trajSchema._parameters["ty"] = tv[1]
trajSchema._parameters["tz"] = tv[2]
trajSchema._parameters["rx"] = dr['x']
trajSchema._parameters["ry"] = dr['y']
trajSchema._parameters["rz"] = dr['z']
trajSchema._parameters["rw"] = dr['w']
trajSchema._parameters["vx"] = 0.0
trajSchema._parameters["vy"] = | |
"g" = "http://a/b/c/g" # http://a + /b/c/ + g
# "./g" = "http://a/b/c/g" # http://a + /b/c/ + g
# "g/" = "http://a/b/c/g/" # http://a + /b/c/ + g + /
# "/g" = "http://a/g" # http://a + /g
# "//g" = "http://g" # http: + //g
# "?y" = "http://a/b/c/d;p?y" # scheme + netloc + path + param + nquery
# "g?y" = "http://a/b/c/g?y"
# "#s" = "http://a/b/c/d;p?q#s" #replace only frag
# "g#s" = "http://a/b/c/g#s"
# "g?y#s" = "http://a/b/c/g?y#s"
# ";x" = "http://a/b/c/;x" #---------这个特殊 ;x 相当于 '<empty-segment>';x
# "g;x" = "http://a/b/c/g;x"
# "g;x?y#s" = "http://a/b/c/g;x?y#s"
# "" = "http://a/b/c/d;p?q" # ----------这个特殊
# "." = "http://a/b/c/"
# "./" = "http://a/b/c/"
# ".." = "http://a/b/"
# "../" = "http://a/b/"
# "../g" = "http://a/b/g"
# "../.." = "http://a/"
# "../../" = "http://a/"
# "../../g" = "http://a/g"
# "../../../g" = "http://a/g"
# "../../../../g" = "http://a/g"
# "/./g" = "http://a/g"
# "/../g" = "http://a/g"
# "g." = "http://a/b/c/g."
# ".g" = "http://a/b/c/.g"
# "g.." = "http://a/b/c/g.."
# "..g" = "http://a/b/c/..g"
# "./../g" = "http://a/b/g"
# "./g/." = "http://a/b/c/g/"
# "g/./h" = "http://a/b/c/g/h"
# "g/../h" = "http://a/b/c/h"
# "g;x=1/./y" = "http://a/b/c/g;x=1/y"
# "g;x=1/../y" = "http://a/b/c/y"
# "g?y/./x" = "http://a/b/c/g?y/./x"
# "g?y/../x" = "http://a/b/c/g?y/../x"
# "g#s/./x" = "http://a/b/c/g#s/./x"
# "g#s/../x" = "http://a/b/c/g#s/../x"
# "http:g" = "http:g" ; for strict parsers
# / "http://a/b/c/g" ; for backward compatibility
#(R.scheme, R.authority, R.path, R.query, R.fragment) = parse(R);
#https://www.w3.org/Addressing/URL/4_3_Partial.html
#params params is obseleted ,so dont do decode
#query
#
def get_abs_url(ref_url,rel_url,**kwargs):
'''
get_abs_url("http://a/b/c/d;p?q","//g/a")
get_abs_url("http://a/b/c/d;p?q","//g/a/")
get_abs_url("http://a/b/c/d;p?q","g/a")
get_abs_url("http://a/b/c/d;p?q","g/a/")
get_abs_url("http://a/b/c/d;p?q","/g/a")
get_abs_url("http://a/b/c/d;p?q","/g/a/")
get_abs_url("http://a/b/c/d;p?q","./g/a")
get_abs_url("http://a/b/c/d;p?q","../g/a")
get_abs_url("http://a/b/c/d;p?q","./../g")
'''
return(urllib.parse.urljoin(ref_url,rel_url))
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# wrap auto_detect
def u2t(url,**kwargs):
'''
url = 'http://admin:secret@local-domain.com:8000/path?q=123#anchor'
u2t(url)
u2t(url,mode=6)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 9
if(mode == 9):
return(nin_u2t(url))
else:
return(six_u2t(url))
def t2u(t):
'''
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', 'anchor')
t2u(t)
t = ('http', 'admin:<EMAIL>:8000', '/path', '', 'q=123', 'anchor')
t2u(t)
t = ('http', 'admin:<EMAIL>:8000', '/path', 'abc', 'q=123', 'anchor')
t2u(t)
'''
typ = _get_type(t)
if(typ == 'urlnint'):
return(nin_t2u(t))
elif(typ == 'urlsixt'):
return(six_t2u(t))
else:
raise Exception("must be sixt or nint")
def u2d(url,**kwargs):
'''
url = 'http://admin:secret@local-domain.com:8000/path?q=123#anchor'
u2d(url)
u2d(url,mode=6)
'''
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 9
if(mode == 9):
return(nin_u2d(url))
else:
return(six_u2d(url))
def d2u(d):
'''
d = {'scheme': 'http', 'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2u(d)
d = {'scheme': 'http', 'netloc': 'admin:secret<EMAIL>:8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2u(d)
'''
typ = _get_type(d)
if(typ == 'urlnind'):
return(nin_d2u(d))
elif(typ == 'urlsixd'):
return(six_d2u(d))
else:
raise Exception("must be sixd or nind")
def t2d(t,**kwargs):
'''
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', 'anchor')
t2d(t)
t = ('http', 'admin:secret@local-domain.com:8000', '/path', '', 'q=123', 'anchor')
t2d(t)
t = ('http', 'admin', 'secret', 'local-domain.com', '8000', '/path', '', 'q=123', 'anchor')
t2d(t,mode=6)
t = ('http', 'admin:secret@local-domain.com:8000', '/path', '', 'q=123', 'anchor')
t2d(t,mode=6)
'''
typ = _get_type(t)
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 9
if(typ == 'urlnint'):
url = nin_t2u(t)
if(mode == 9):
return(nin_u2d(url))
else:
return(six_u2d(url))
elif(typ == 'urlsixt'):
url = six_t2u(t)
if(mode == 9):
return(nin_u2d(url))
else:
return(six_u2d(url))
else:
raise Exception("must be sixt or nint")
def d2t(d,**kwargs):
'''
d={'scheme': 'http', 'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2t(d)
d={'scheme': 'http', 'username': 'admin', 'password': '<PASSWORD>', 'hostname': 'local-domain.com', 'port': '8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2t(d)
d={'scheme': 'http', 'netloc': 'admin:<EMAIL>:8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2t(d)
d={'scheme': 'http', 'netloc': 'admin:<EMAIL>:8000', 'path': '/path', 'params': '', 'query': 'q=123', 'fragment': 'anchor'}
d2t(d)
'''
typ = _get_type(d)
if('mode' in kwargs):
mode = kwargs['mode']
else:
mode = 9
if(typ == 'urlnind'):
url = nin_d2u(d)
if(mode == 9):
return(nin_u2t(url))
else:
return(six_u2t(url))
elif(typ == 'urlsixd'):
url = six_d2u(d)
if(mode == 9):
return(nin_u2t(url))
else:
return(six_u2t(url))
else:
raise Exception("must be sixd or nind")
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# append: function append()
# constructor: function ()
# delete: function delete()
# entries: function entries()
# forEach: function forEach()
# get: function get()
# getAll: function getAll()
# has: function has()
# keys: function keys()
# set: function set()
# sort: function sort()
# toString: function toString()
# values: function values()
# Symbol(Symbol.iterator): undefined
# var paramsString = "q=URLUtils.searchParams&topic=api";
# var searchParams = new URLSearchParams(paramsString);
# //Iterate the search parameters.
# for (let p of searchParams) {
# console.log(p);
# }
# searchParams.has("topic") === true; // true
# searchParams.get("topic") === "api"; // true
# searchParams.getAll("topic"); // ["api"]
# searchParams.get("foo") === null; // true
# searchParams.append("topic", "webdev");
# searchParams.toString(); // "q=URLUtils.searchParams&topic=api&topic=webdev"
# searchParams.set("topic", "More webdev");
# searchParams.toString(); // "q=URLUtils.searchParams&topic=More+webdev"
# searchParams.delete("topic");
# searchParams.toString(); // "q=URLUtils.searchParams"
class URLSearchParams():
def __init__(self,qstr,obj=None):
if(qstr[0] == '?'):
qstr = qstr[1:]
else:
pass
self.qstr = qstr
self.qtl = query_decode(qstr,quote_plus=False,quote=False)
self.obj = obj
def __repr__(self):
elel.forEach(self.qtl,print)
return(self.qstr)
def append(self,k,v):
self.qtl = tltl._append(self.qtl,(k,v))
self.qstr = query_encode(self.qtl,quote_plus=False,quote=False)
if(self.obj):
self.obj._nind['query'] = self.qstr
self.obj.href = nin_d2u(self.obj._nind)
self.obj.query = self.qstr
self.obj.search = self.qstr
else:
pass
def prepend(self,k,v):
self.qtl = tltl._prepend(self.qtl,(k,v))
self.qstr = query_encode(self.qtl,quote_plus=False,quote=False)
if(self.obj):
self.obj._nind['query'] = self.qstr
self.obj.href = nin_d2u(self.obj._nind)
self.obj.query = self.qstr
self.obj.search = self.qstr
else:
pass
def insert(self,index,k,v):
self.qtl = tltl._insert(self.qtl,index,k,v)
self.qstr = query_encode(self.qtl,quote_plus=False,quote=False)
if(self.obj):
self.obj._nind['query'] = self.qstr
self.obj.href = nin_d2u(self.obj._nind)
self.obj.query = self.qstr
self.obj.search = self.qstr
else:
pass
def has(self,k):
cond = tltl._includes(self.qtl,key=k)
return(cond)
def delete(self,k,which=None):
indexes = tltl._indexes_all(self.qtl,key=k)
if(which == None):
tltl._pop_seqs(self.qtl,indexes)
else:
index = indexes[which]
indexes = [index]
tltl._pop_seqs(self.qtl,indexes)
self.qstr = query_encode(self.qtl,quote_plus=False,quote=False)
if(self.obj):
self.obj._nind['query'] = self.qstr
self.obj.href = nin_d2u(self.obj._nind)
self.obj.query = self.qstr
self.obj.search = self.qstr
else:
pass
def entries(self):
return(self.qtl)
def getAll(self,k):
return(tltl.get_value(self.qtl,k,whiches='all'))
def get(self,k,whiches=0):
return(tltl.get_value(self.qtl,k,whiches=whiches))
def keys(self):
ks = elel.array_map(self.qtl,lambda ele:ele[0])
return(ks)
def values(self):
vs = elel.array_map(self.qtl,lambda ele:ele[1])
return(vs)
def toString(self):
return(self.qstr)
def set(self,k,v,which=None):
cond = tltl._includes(self.qtl,key=k)
if(cond):
if(which == None):
which = 'all'
else:
pass
self.qtl = tltl.set_which(self.qtl,k,v,mode='key',which=which)
else:
self.qtl = tltl._append(self.qtl,(k,v))
self.qstr = query_encode(self.qtl,quote_plus=False,quote=False)
if(self.obj):
self.obj._nind['query'] = self.qstr
self.obj.href = nin_d2u(self.obj._nind)
self.obj.query = self.qstr
self.obj.search = self.qstr
else:
pass
# url = new URL("https://developer.mozilla.org/en-US/docs/Web/API/URL")
# hash: ""
# host: "developer.mozilla.org"
# hostname: "developer.mozilla.org"
# href: "https://developer.mozilla.org/en-US/docs/Web/API/URL"
# origin: "https://developer.mozilla.org"
# password: ""
# pathname: "/en-US/docs/Web/API/URL"
# port: ""
# protocol: "https:"
# search: ""
# searchParams: URLSearchParams
# username: ""
#url = new URL("https://github.com/search?utf8=%E2%9C%93&q=xurl&type=Repositories")
# hash: ""
# host: "github.com"
# hostname: "github.com"
# href: "https://github.com/search?utf8=%E2%9C%93&q=xurl&type=Repositories"
# origin: "https://github.com"
# password: ""
# pathname: "/search"
# port: ""
# protocol: "https:"
# search: "?utf8=%E2%9C%93&q=xurl&type=Repositories"
# searchParams: URLSearchParams { }
# username: ""
class URL():
def __init__(self,uele,**kwargs):
typ = _get_type(uele)
if(typ == 'urlnint'):
urlstr = nin_t2u(uele)
nind = nin_u2d(urlstr)
elif(typ == 'urlnind'):
urlstr = nin_d2u(uele)
nind = uele
elif(typ == 'urlsixt'):
urlstr = six_t2u(uele)
nind = nin_u2d(urlstr)
elif(typ == 'urlsixd'):
urlstr = six_d2u(uele)
nind = nin_u2d(urlstr)
else:
urlstr = uele
nind = nin_u2d(urlstr)
#######################
self._nind = nind
if('fmt' in kwargs):
fmt = kwargs['fmt']
else:
fmt = True
if(fmt):
nind['path'] = normalize_path(nind['path'],**kwargs)
urlstr = nin_d2u(nind)
else:
pass
#######################
self.href = urlstr
#######################
self.protocol = nind['scheme']
self.scheme = nind['scheme']
#
self.username = nind['username']
self.password = nind['password']
unpwd = eded._select_norecur(nind,'username','password')
self.userinfo = packup_unpw(unpwd)
self.hostname = nind['hostname']
self.port = nind['port']
hd = eded._select_norecur(nind,'hostname','port')
self.host = packup_host(hd)
nlocd = eded._select_norecur(nind,'username','password','hostname','port')
self.netloc = packup_netloc(nlocd)
self.origin = get_origin(urlstr)
#
self.path = nind['path']
self.pathname = nind['path']
self.params = nind['params']
self.query = nind['query']
self.search = nind['query']
self.fragment = nind['fragment']
self.hash = nind['fragment']
def __repr__(self):
return(self.href)
def searchParams(self):
return(URLSearchParams(self.search,self))
####################
def toDict(self,mode):
if(mode == 6):
sixd = six_u2d(self.href)
return(sixd)
else:
return(self._nind)
def toTuple(self,mode):
if(mode == 6):
sixt = six_u2t(self.href)
return(sixt)
else:
nint = nin_u2t(self.href)
return(nint)
###############################
def repl_protocol(self,new_scheme,**kwargs):
urlstr = replace_protocol(self.href,new_scheme,**kwargs)
nind = nin_u2d(urlstr)
self._nind = nind
self.href = urlstr
self.origin = get_origin(urlstr)
self.scheme = nind['scheme']
self.protocol = nind['scheme']
def repl_netloc(self,new_netloc,**kwargs):
urlstr = replace_netloc(self.href,new_netloc,**kwargs)
nind = nin_u2d(urlstr)
self._nind = nind
self.href = urlstr
self.origin = get_origin(urlstr)
self.username = nind['username']
self.password = nind['password']
unpwd = eded._select_norecur(nind,'username','password')
self.userinfo = packup_unpw(unpwd)
self.hostname = nind['hostname']
self.port = nind['port']
hd = eded._select_norecur(nind,'hostname','port')
self.host = packup_host(hd)
nlocd = eded._select_norecur(nind,'username','password','hostname','port')
self.netloc = packup_netloc(nlocd)
def repl_userinfo(self,new_userinfo,**kwargs):
urlstr = replace_userinfo(self.href,new_userinfo,**kwargs)
nind = nin_u2d(urlstr)
self._nind = nind
self.href = urlstr
self.origin = get_origin(urlstr)
self.username = nind['username']
self.password = <PASSWORD>['password']
unpwd = eded._select_norecur(nind,'username','password')
self.userinfo = packup_unpw(unpwd)
nlocd = eded._select_norecur(nind,'username','password','hostname','port')
self.netloc = packup_netloc(nlocd)
def repl_username(self,new_username,**kwargs):
urlstr = replace_username(self.href,new_username,**kwargs)
nind | |
= Var(within=Reals,bounds=(0,100),initialize=0)
m.x5077 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5078 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5079 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5080 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5081 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5082 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5083 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5084 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5085 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5086 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5087 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5088 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5089 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5090 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5091 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5092 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5093 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5094 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5095 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5096 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5097 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5098 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5099 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5100 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5101 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5102 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5103 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5104 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5105 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5106 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5107 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5108 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5109 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5110 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5111 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5112 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5113 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5114 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5115 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5116 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5117 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5118 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5119 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5120 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5121 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5122 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5123 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5124 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5125 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5126 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5127 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5128 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5129 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5130 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5131 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5132 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5133 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5134 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5135 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5136 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5137 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5138 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5139 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5140 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5141 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5142 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5143 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5144 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5145 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5146 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5147 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5148 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5149 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5150 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5151 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5152 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5153 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5154 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5155 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5156 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5157 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5158 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5159 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5160 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5161 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5162 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5163 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5164 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5165 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5166 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5167 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5168 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5169 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5170 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5171 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5172 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5173 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5174 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5175 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5176 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5177 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5178 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5179 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5180 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5181 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5182 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5183 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5184 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5185 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5186 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5187 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5188 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5189 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5190 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5191 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5192 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5193 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5194 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5195 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5196 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5197 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5198 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5199 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5200 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5201 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5202 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5203 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5204 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5205 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5206 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5207 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5208 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5209 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5210 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5211 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5212 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5213 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5214 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5215 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5216 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5217 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5218 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5219 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5220 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5221 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5222 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5223 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5224 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5225 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5226 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5227 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5228 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5229 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5230 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5231 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5232 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5233 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5234 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5235 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5236 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5237 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5238 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5239 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5240 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5241 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5242 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5243 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5244 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5245 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5246 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5247 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5248 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5249 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5250 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5251 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5252 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5253 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5254 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5255 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5256 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5257 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5258 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5259 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5260 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5261 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5262 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5263 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5264 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5265 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5266 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5267 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5268 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5269 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5270 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5271 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5272 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5273 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5274 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5275 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5276 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5277 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5278 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5279 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5280 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5281 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5282 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5283 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5284 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5285 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5286 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5287 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5288 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5289 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5290 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5291 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5292 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5293 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5294 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5295 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5296 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5297 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5298 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5299 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5300 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5301 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5302 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5303 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5304 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5305 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5306 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5307 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5308 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5309 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5310 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5311 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5312 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5313 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5314 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5315 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5316 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5317 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5318 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5319 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5320 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5321 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5322 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5323 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5324 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5325 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5326 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5327 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5328 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5329 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5330 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5331 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5332 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5333 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5334 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5335 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5336 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5337 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5338 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5339 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5340 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5341 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5342 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5343 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5344 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5345 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5346 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5347 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5348 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5349 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5350 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5351 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5352 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5353 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5354 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5355 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5356 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5357 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5358 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5359 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5360 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5361 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5362 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5363 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5364 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5365 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5366 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5367 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5368 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5369 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5370 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5371 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5372 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5373 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5374 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5375 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5376 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5377 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5378 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5379 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5380 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5381 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5382 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5383 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5384 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5385 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5386 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5387 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5388 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5389 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5390 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5391 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5392 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5393 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5394 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5395 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5396 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5397 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5398 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5399 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5400 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5401 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5402 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5403 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5404 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5405 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5406 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5407 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5408 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5409 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5410 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5411 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5412 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5413 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5414 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5415 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5416 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5417 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5418 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5419 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5420 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5421 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5422 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5423 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5424 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5425 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5426 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5427 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5428 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5429 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5430 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5431 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5432 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5433 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5434 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5435 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5436 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5437 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5438 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5439 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5440 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5441 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5442 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5443 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5444 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5445 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5446 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5447 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5448 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5449 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5450 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5451 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5452 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5453 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5454 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5455 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5456 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5457 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5458 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5459 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5460 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5461 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5462 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5463 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5464 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5465 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5466 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5467 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5468 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5469 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5470 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5471 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5472 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5473 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5474 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5475 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5476 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5477 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5478 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5479 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5480 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5481 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5482 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5483 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5484 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5485 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5486 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5487 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5488 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5489 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5490 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5491 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5492 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5493 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5494 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5495 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5496 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5497 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5498 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5499 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5500 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5501 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5502 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5503 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5504 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5505 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5506 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5507 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5508 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5509 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5510 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5511 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5512 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5513 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5514 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5515 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5516 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5517 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5518 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5519 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5520 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5521 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5522 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5523 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5524 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5525 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5526 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5527 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5528 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5529 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5530 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5531 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5532 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5533 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5534 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5535 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5536 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5537 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5538 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5539 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5540 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5541 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5542 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5543 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5544 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5545 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5546 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5547 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5548 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5549 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5550 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5551 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5552 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5553 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5554 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5555 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5556 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5557 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5558 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5559 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5560 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5561 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5562 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5563 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5564 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5565 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5566 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5567 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5568 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5569 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5570 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5571 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5572 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5573 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5574 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5575 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5576 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5577 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5578 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5579 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5580 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5581 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5582 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5583 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5584 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5585 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5586 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5587 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5588 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5589 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5590 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5591 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5592 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5593 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5594 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5595 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5596 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5597 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5598 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5599 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5600 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5601 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5602 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5603 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5604 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5605 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5606 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5607 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5608 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5609 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5610 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5611 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5612 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5613 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5614 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5615 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5616 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5617 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5618 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5619 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5620 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5621 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5622 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5623 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5624 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5625 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5626 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5627 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5628 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5629 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5630 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5631 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5632 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5633 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5634 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5635 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5636 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5637 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5638 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5639 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5640 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5641 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5642 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5643 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5644 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5645 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5646 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5647 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5648 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5649 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5650 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5651 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5652 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5653 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5654 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5655 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5656 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5657 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5658 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5659 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5660 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5661 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5662 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5663 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5664 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5665 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5666 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5667 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5668 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5669 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5670 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5671 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5672 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5673 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5674 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5675 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5676 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5677 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5678 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5679 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5680 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5681 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5682 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5683 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5684 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5685 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5686 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5687 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5688 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5689 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5690 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5691 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5692 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5693 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5694 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5695 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5696 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5697 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5698 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5699 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5700 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5701 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5702 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5703 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5704 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5705 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5706 = Var(within=Reals,bounds=(0,0),initialize=0)
m.x5707 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5708 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5709 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5710 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5711 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5712 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5713 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5714 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5715 = Var(within=Reals,bounds=(0,100),initialize=0)
m.x5716 | |
= self.get('item/' + itemId)
name = item['name']
offset = 0
first = True
while True:
files = self.get('item/%s/files' % itemId, parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset
})
if first:
if len(files) == 1 and files[0]['name'] == name:
self.downloadFile(
files[0]['_id'],
os.path.join(dest, self.transformFilename(name)),
created=files[0]['created'])
break
else:
dest = os.path.join(dest, self.transformFilename(name))
os.makedirs(dest, exist_ok=True)
for file in files:
self.downloadFile(
file['_id'],
os.path.join(dest, self.transformFilename(file['name'])),
created=file['created'])
first = False
offset += len(files)
if len(files) < DEFAULT_PAGE_LIMIT:
break
def downloadFolderRecursive(self, folderId, dest, sync=False):
"""
Download a folder recursively from Girder into a local directory.
:param folderId: Id of the Girder folder or resource path to download.
:type folderId: ObjectId or Unix-style path to the resource in Girder.
:param dest: The local download destination.
:type dest: str
:param sync: If True, check if item exists in local metadata
cache and skip download provided that metadata is identical.
:type sync: bool
"""
offset = 0
folderId = self._checkResourcePath(folderId)
while True:
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': 'folder',
'parentId': folderId
})
for folder in folders:
local = os.path.join(dest, self.transformFilename(folder['name']))
os.makedirs(local, exist_ok=True)
self.downloadFolderRecursive(folder['_id'], local, sync=sync)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
offset = 0
while True:
items = self.get('item', parameters={
'folderId': folderId,
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset
})
for item in items:
_id = item['_id']
self.incomingMetadata[_id] = item
if sync and _id in self.localMetadata and item == self.localMetadata[_id]:
continue
self.downloadItem(item['_id'], dest, name=item['name'])
offset += len(items)
if len(items) < DEFAULT_PAGE_LIMIT:
break
def downloadResource(self, resourceId, dest, resourceType='folder', sync=False):
"""
Download a collection, user, or folder recursively from Girder into a local directory.
:param resourceId: ID or path of the resource to download.
:type resourceId: ObjectId or Unix-style path to the resource in Girder.
:param dest: The local download destination. Can be an absolute path or relative to
the current working directory.
:type dest: str
:param resourceType: The type of resource being downloaded: 'collection', 'user',
or 'folder'.
:type resourceType: str
:param sync: If True, check if items exist in local metadata
cache and skip download if the metadata is identical.
:type sync: bool
"""
if resourceType == 'folder':
self.downloadFolderRecursive(resourceId, dest, sync)
elif resourceType in ('collection', 'user'):
offset = 0
resourceId = self._checkResourcePath(resourceId)
while True:
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': resourceType,
'parentId': resourceId
})
for folder in folders:
local = os.path.join(dest, self.transformFilename(folder['name']))
os.makedirs(local, exist_ok=True)
self.downloadFolderRecursive(folder['_id'], local, sync=sync)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
else:
raise Exception('Invalid resource type: %s' % resourceType)
def saveLocalMetadata(self, dest):
"""
Dumps item metadata collected during a folder download.
:param dest: The local download destination.
"""
with open(os.path.join(dest, '.girder_metadata'), 'w') as fh:
fh.write(json.dumps(self.incomingMetadata))
def loadLocalMetadata(self, dest):
"""
Reads item metadata from a local folder.
:param dest: The local download destination.
"""
try:
with open(os.path.join(dest, '.girder_metadata'), 'r') as fh:
self.localMetadata = json.loads(fh.read())
except OSError:
print('Local metadata does not exists. Falling back to download.')
def inheritAccessControlRecursive(self, ancestorFolderId, access=None, public=None):
"""
Take the access control and public value of a folder and recursively
copy that access control and public value to all folder descendants,
replacing any existing access control on the descendant folders with
that of the ancestor folder.
:param ancestorFolderId: Id of the Girder folder to copy access
control from, to all of its descendant folders.
:param access: Dictionary Access control target, if None, will take
existing access control of ancestor folder
:param public: Boolean public value target, if None, will take existing
public value of ancestor folder
"""
offset = 0
if public is None:
public = self.getFolder(ancestorFolderId)['public']
if access is None:
access = self.getFolderAccess(ancestorFolderId)
while True:
self.setFolderAccess(ancestorFolderId, json.dumps(access), public)
folders = self.get('folder', parameters={
'limit': DEFAULT_PAGE_LIMIT,
'offset': offset,
'parentType': 'folder',
'parentId': ancestorFolderId
})
for folder in folders:
self.inheritAccessControlRecursive(folder['_id'], access, public)
offset += len(folders)
if len(folders) < DEFAULT_PAGE_LIMIT:
break
def addFolderUploadCallback(self, callback):
"""Saves a passed in callback function that will be called after each
folder has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after a folder in Girder is created
and all subfolders and items for that folder have completed uploading.
Callback functions should take two parameters:
- the folder in Girder
- the full path to the local folder
:param callback: callback function to be called.
"""
self._folderUploadCallbacks.append(callback)
def addItemUploadCallback(self, callback):
"""Saves a passed in callback function that will be called after each
item has completed. Multiple callback functions can be added, they
will be called in the order they were added by calling this function.
Callback functions will be called after an item in Girder is created
and all files for that item have been uploaded. Callback functions
should take two parameters:
- the item in Girder
- the full path to the local folder or file comprising the item
:param callback: callback function to be called.
"""
self._itemUploadCallbacks.append(callback)
def loadOrCreateFolder(self, folderName, parentId, parentType, metadata=None):
"""Returns a folder in Girder with the given name under the given
parent. If none exists yet, it will create it and return it.
:param folderName: the name of the folder to look up.
:param parentId: id of parent in Girder
:param parentType: one of (collection, folder, user)
:param metadata: JSON metadata string to set on folder.
:returns: The folder that was found or created.
"""
children = self.listFolder(parentId, parentType, name=folderName)
try:
return next(children)
except StopIteration:
return self.createFolder(parentId, folderName, parentType=parentType,
metadata=metadata)
def _hasOnlyFiles(self, localFolder):
"""Returns whether a folder has only files. This will be false if the
folder contains any subdirectories.
:param localFolder: full path to the local folder
"""
return not any(os.path.isdir(os.path.join(localFolder, entry))
for entry in os.listdir(localFolder))
def loadOrCreateItem(self, name, parentFolderId, reuseExisting=True, metadata=None):
"""Create an item with the given name in the given parent folder.
:param name: The name of the item to load or create.
:param parentFolderId: id of parent folder in Girder
:param reuseExisting: boolean indicating whether to load an existing
item of the same name in the same location, or create a new one.
:param metadata: JSON metadata string to set on item.
"""
item = None
if reuseExisting:
children = self.listItem(parentFolderId, name=name)
try:
item = next(children)
except StopIteration:
pass
if item is None:
item = self.createItem(parentFolderId, name, description='', metadata=metadata)
return item
def _uploadAsItem(self, localFile, parentFolderId, filePath, reuseExisting=False, dryRun=False,
reference=None):
"""Function for doing an upload of a file as an item.
:param localFile: name of local file to upload
:param parentFolderId: id of parent folder in Girder
:param filePath: full path to the file
:param reuseExisting: boolean indicating whether to accept an existing item
of the same name in the same location, or create a new one instead
:param reference: Option reference to send along with the upload.
"""
if not self.progressReporterCls.reportProgress:
print('Uploading Item from %s' % localFile)
if not dryRun:
# If we are reusing existing items or have upload callbacks, then
# we need to know the item as part of the process. If this is a
# zero-length file, we create an item. Otherwise, we can just
# upload to the parent folder and never learn about the created
# item.
if reuseExisting or len(self._itemUploadCallbacks) or os.path.getsize(filePath) == 0:
currentItem = self.loadOrCreateItem(
os.path.basename(localFile), parentFolderId, reuseExisting)
self.uploadFileToItem(
currentItem['_id'], filePath, filename=localFile, reference=reference)
for callback in self._itemUploadCallbacks:
callback(currentItem, filePath)
else:
self.uploadFileToFolder(
parentFolderId, filePath, filename=localFile, reference=reference)
def _uploadFolderAsItem(self, localFolder, parentFolderId, reuseExisting=False, blacklist=None,
dryRun=False, reference=None):
"""
Take a folder and use its base name as the name of a new item. Then,
upload its containing files into the new item as bitstreams.
:param localFolder: The path to the folder to be uploaded.
:param parentFolderId: Id of the destination folder for the new item.
:param reuseExisting: boolean indicating whether to accept an existing item
of the same name in the same location, or create a new one instead
:param reference: Option reference to send along with the upload.
"""
blacklist = blacklist or []
print('Creating Item from folder | |
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
result_tmp = (d_a * (d_b >> 16)) << n.value
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 - result_tmp
result_w1 = e_d_1
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put results
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= 32
result |= result_w0
# set flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MSUBS_Q_63_25_Inst(Instruction):
""" Multiply-Subtract Q Format, Saturated instruction:
op = 0x63
op2 = 0x25
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MSUBS.Q_63_25'
op = "{0}{1}".format(bin(6)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(2)[2:].zfill(2))
op2_2 = "{0}".format(bin(5)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a & 0xffff) * (d_b & 0xffff)) << n.value) & (sc^0xffff))
result1 = d_d - mul_res
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result = ssov32(result1, max_pos, max_neg)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RRR1_MSUBS_Q_63_3D_Inst(Instruction):
""" Multiply-Add Q Format, Saturated instruction:
op = 0x63
op2 = 0x3D
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MSUBS.Q_63_3D'
op = "{0}{1}".format(bin(6)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(3)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xd)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc = extend_to_16_bits(((d_a & 0xffff) == 0x8000) & ((d_b & 0xffff) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a & 0xffff) * (d_b & 0xffff)) << n.value) & (sc^0xffff))
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
d_d_64_bit = (e_d_1.cast_to(Type.int_64) << 32) | e_d_0.cast_to(Type.int_64)
result_64_bit = d_d_64_bit - (mul_res.cast_to(Type.int_64) << 16)
result_w0 = (result_64_bit & 0xffffffff).cast_to(Type.int_32)
result_w1 = (result_64_bit >> 32).cast_to(Type.int_32)
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# set flags
c = 0
v = overflow_64(result_64_bit).cast_to(Type.int_32)
av = advanced_overflow_64(result_64_bit).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MSUBS_Q_63_24_Inst(Instruction):
""" Multiply-Subtract Q Format, Saturated instruction:
op = 0x63
op2 = 0x24
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MSUBS.Q_63_24'
op = "{0}{1}".format(bin(6)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(2)[2:].zfill(2))
op2_2 = "{0}".format(bin(4)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_d(self):
return self.get("d{0}".format(self.data['d']), Type.int_32)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_d_d(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
d_d = args[2]
n = args[3]
sc = extend_to_16_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a >> 16) * (d_b >> 16)) << n.value) & (sc^0xffff))
result1 = d_d - mul_res
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result = ssov32(result1, max_pos, max_neg)
# set flags
c = 0
v = overflow(result)
av = advanced_overflow(result)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
return result
def commit_result(self, res):
self.put(res, self.get_dst_reg())
class RRR1_MSUBS_Q_63_3C_Inst(Instruction):
""" Multiply-Add Q Format, Saturated instruction:
op = 0x63
op2 = 0x3C
User Status Flags: V, SV, AV, SAV
"""
name = 'RRR1_MSUBS.Q_63_3C'
op = "{0}{1}".format(bin(6)[2:].zfill(4), bin(3)[2:].zfill(4))
op2_1 = "{0}".format(bin(3)[2:].zfill(2))
op2_2 = "{0}".format(bin(0xc)[2:].zfill(4))
bin_format = op + 'b'*4 + 'a'*4 + op2_1 + op2_2 + 'n'*2 + 'c'*4 + 'd'*4
def parse(self, bitstrm):
data = Instruction.parse(self, bitstrm)
data = {"a": int(data['a'], 2),
"b": int(data['b'], 2),
"c": int(data['c'], 2),
"d": int(data['d'], 2),
"n": int(data['n'], 2)}
log_this(self.name, data, hex(self.addr))
return data
def get_dst_reg(self):
return "d{0}".format(self.data['c'])
def get_psw(self):
return self.get("psw", Type.int_32)
def get_n(self):
return self.constant(self.data['n'], Type.int_2)
def get_d_b(self):
return self.get("d{0}".format(self.data['b']), Type.int_32)
def get_d_a(self):
return self.get("d{0}".format(self.data['a']), Type.int_32)
def fetch_operands(self):
return self.get_d_a(), self.get_d_b(), self.get_n()
def compute_result(self, *args):
d_a = args[0]
d_b = args[1]
n = args[2]
sc = extend_to_16_bits(((d_a >> 16) == 0x8000) & ((d_b >> 16) == 0x8000) & (n == 1).cast_to(Type.int_32))
mul_res = (0x7fffffff & sc) | ((((d_a >> 16) * (d_b >> 16)) << n.value) & (sc^0xffff))
e_d_0 = self.get("d{0}".format(self.data['d']), Type.int_32) # E[d][31:0]
e_d_1 = self.get("d{0}".format(self.data['d']+1), Type.int_32) # E[d][62:32]
result_w0 = e_d_0 - (mul_res << 16)
result_w1 = e_d_1
# compute ssov32
max_pos = self.constant(INT32_MAX_POS, Type.int_32)
max_neg = self.constant(INT32_MAX_NEG, Type.int_32)
result_w0_ssov = ssov32(result_w0, max_pos, max_neg)
result_w1_ssov = ssov32(result_w1, max_pos, max_neg)
# put results
self.put(result_w0_ssov, "d{0}".format(self.data['c']))
self.put(result_w1_ssov, "d{0}".format(self.data['c']+1))
# prepare 64-bit object for setting flags
result = result_w1
result <<= 32
result |= result_w0
# set flags
c = 0
v = overflow_64(result).cast_to(Type.int_32)
av = advanced_overflow_64(result).cast_to(Type.int_32)
psw = self.get_psw()
cond_sv = (v == 0)
cond_sav = (av == 0)
sv = ((psw & SV_MASK) & cond_sv) | (1 & (cond_sv^1))
sav = ((psw & ASV_MASK) & cond_sav) | (1 & (cond_sav^1))
psw = set_usb(psw, c, v, sv, av, sav)
self.put(psw, "psw")
class RRR1_MSUBAD_H_E3_1A_Inst(Instruction):
""" Packed Multiply-Subtract/Add Q Format instruction:
op = 0xE3
op2 = | |
"""
Object to manage regular expressions, try to optimize the result:
- '(a|b)' => '[ab]'
- '(color red|color blue)' => 'color (red|blue)'
- '([ab]|c)' => '[abc]'
- 'ab' + 'cd' => 'abcd' (one long string)
- [a-z]|[b] => [a-z]
- [a-c]|[a-e] => [a-z]
- [a-c]|[d] => [a-d]
- [a-c]|[d-f] => [a-f]
Operation:
- str(): convert to string
- repr(): debug string
- a & b: concatenation, eg. "big " & "car" => "big car"
- a + b: alias to a & b
- a | b: a or b, eg. "dog" | "cat" => "dog|cat"
- minLength(): minimum length of matching pattern, "(cat|horse)".minLength() => 3
- maxLength(): maximum length of matching pattern, "(cat|horse)".maxLength() => 5
Utilities:
- createString(): create a regex matching a string
- createRange(): create a regex matching character ranges
TODO:
- Support Unicode regex (avoid mixing str and unicode types)
- createString("__tax") | parse("__[12]") => group '__'
- Make sure that all RegexXXX() classes are inmutable
- Use singleton for dot, start and end
See also CPAN Regexp::Assemble (Perl module):
http://search.cpan.org/~dland/Regexp-Assemble-0.28/Assemble.pm
"""
import re
import operator
from hachoir.core.tools import makePrintable
def matchSingleValue(regex):
"""
Regex only match one exact string.
>>> matchSingleValue(RegexEmpty())
True
>>> matchSingleValue(createString("abc"))
True
>>> matchSingleValue(createRange("a", "b"))
False
>>> matchSingleValue(createRange("a"))
True
>>> matchSingleValue(RegexAnd((RegexStart(), createString("abc"))))
True
"""
cls = regex.__class__
if cls in (RegexEmpty, RegexString, RegexStart, RegexEnd):
return True
if cls == RegexAnd:
return all(matchSingleValue(item) for item in regex)
if cls == RegexRange:
return len(regex.ranges) == 1 and len(regex.ranges[0]) == 1
return False
def escapeRegex(text):
"""
Escape string to use it in a regular expression:
prefix special characters « ^.+*?{}[]|()\$ » by an antislash.
"""
return re.sub(r"([][^.+*?{}|()\\$])", r"\\\1", text)
def _join(func, regex_list):
if not isinstance(regex_list, (tuple, list)):
regex_list = list(regex_list)
if len(regex_list) == 0:
return RegexEmpty()
regex = regex_list[0]
for item in regex_list[1:]:
regex = func(regex, item)
return regex
def createString(text):
"""
>>> createString('')
<RegexEmpty ''>
>>> createString('abc')
<RegexString 'abc'>
"""
if text:
return RegexString(text)
else:
return RegexEmpty()
def createRange(*text, **kw):
"""
Create a regex range using character list.
>>> createRange("a", "d", "b")
<RegexRange '[abd]'>
>>> createRange("-", "9", "4", "3", "0")
<RegexRange '[0349-]'>
"""
ranges = (RegexRangeCharacter(item) for item in text)
return RegexRange(ranges, kw.get('exclude', False))
class Regex:
"""
Abstract class defining a regular expression atom
"""
def minLength(self):
"""
Maximum length in characters of the regex.
Returns None if there is no limit.
"""
raise NotImplementedError()
def maxLength(self):
"""
Maximum length in characters of the regex.
Returns None if there is no limit.
"""
return self.minLength()
def __str__(self, **kw):
if not hasattr(self, "_str_value"):
self._str_value = {}
key = kw.get('python', False)
if key not in self._str_value:
self._str_value[key] = self._str(**kw)
return self._str_value[key]
def _str(self, **kw):
raise NotImplementedError()
def __repr__(self, **kw):
regex = self.__str__(**kw)
regex = makePrintable(regex, 'ASCII')
return "<%s '%s'>" % (
self.__class__.__name__, regex)
def __contains__(self, item):
raise NotImplementedError()
def match(self, other):
"""
Guess if self may matchs regex.
May returns False even if self does match regex.
"""
if self == other:
return True
return self._match(other)
def _match(self, other):
"""
Does regex match other regex?
Eg. "." matchs "0" or "[a-z]" but "0" doesn't match ".".
This function is used by match() which already check regex identity.
"""
return False
def _and(self, regex):
"""
Create new optimized version of a+b.
Returns None if there is no interesting optimization.
"""
return None
def __and__(self, regex):
"""
Create new optimized version of a & b.
Returns None if there is no interesting optimization.
>>> RegexEmpty() & RegexString('a')
<RegexString 'a'>
"""
if regex.__class__ == RegexEmpty:
return self
new_regex = self._and(regex)
if new_regex:
return new_regex
else:
return RegexAnd((self, regex))
def __add__(self, regex):
return self.__and__(regex)
def or_(self, other):
"""
Create new optimized version of a|b.
Returns None if there is no interesting optimization.
"""
# (a|a) => a
if self == other:
return self
# a matchs b => a
if self._match(other):
return self
# b matchs a => b
if other._match(self):
return other
# Try to optimize (a|b)
if self.__class__ != other.__class__:
new_regex = self._or_(other, False)
if new_regex:
return new_regex
# Try to optimize (b|a)
new_regex = other._or_(self, True)
if new_regex:
return new_regex
return None
else:
return self._or_(other, False)
def _or_(self, other, reverse):
"""
Try to create optimized version of self|other if reverse if False,
or of other|self if reverse if True.
"""
return None
def __or__(self, other):
"""
Public method of OR operator: a|b. It call or_() internal method.
If or_() returns None: RegexOr object is used (and otherwise,
use or_() result).
"""
# Try to optimize (a|b)
new_regex = self.or_(other)
if new_regex:
return new_regex
# Else use (a|b)
return RegexOr((self, other))
def __eq__(self, regex):
if self.__class__ != regex.__class__:
return False
return self._eq(regex)
def _eq(self, other):
"""
Check if two objects of the same class are equals
"""
raise NotImplementedError(
"Class %s has no method _eq()" % self.__class__.__name__)
def compile(self, **kw):
return re.compile(self.__str__(**kw))
def findPrefix(self, regex):
"""
Try to create a common prefix between two regex.
Eg. "abc" and "abd" => "ab"
Return None if no prefix can be found.
"""
return None
def __iter__(self):
raise NotImplementedError()
class RegexEmpty(Regex):
def minLength(self):
return 0
def _str(self, **kw):
return ''
def _and(self, other):
return other
def _eq(self, other):
return True
class RegexWord(RegexEmpty):
def _and(self, other):
if other.__class__ == RegexWord:
return self
return None
def _str(self, **kw):
return r'\b'
class RegexStart(RegexEmpty):
def _and(self, other):
if other.__class__ == RegexStart:
return self
return None
def _str(self, **kw):
return '^'
class RegexEnd(RegexStart):
def _and(self, other):
if other.__class__ == RegexEnd:
return self
return None
def _str(self, **kw):
return '$'
class RegexDot(Regex):
def minLength(self):
return 1
def _str(self, **kw):
return '.'
def _match(self, other):
if other.__class__ == RegexRange:
return True
if other.__class__ == RegexString and len(other.text) == 1:
return True
return False
def _eq(self, other):
return True
class RegexString(Regex):
def __init__(self, text=""):
assert isinstance(text, str)
self.text = text
assert 1 <= len(self.text)
def minLength(self):
return len(self.text)
def _and(self, regex):
"""
>>> RegexString('a') + RegexString('b')
<RegexString 'ab'>
"""
if regex.__class__ == RegexString:
return RegexString(self.text + regex.text)
return None
def _str(self, **kw):
return escapeRegex(self.text)
def findPrefix(self, regex):
"""
Try to find a common prefix of two string regex, returns:
- None if there is no common prefix
- (prefix, regexa, regexb) otherwise => prefix + (regexa|regexb)
>>> RegexString('color red').findPrefix(RegexString('color blue'))
(<RegexString 'color '>, <RegexString 'red'>, <RegexString 'blue'>)
"""
if regex.__class__ != RegexString:
return None
texta = self.text
textb = regex.text
# '(a|b)' => '[ab]'
if len(texta) == len(textb) == 1:
return (createRange(texta, textb), RegexEmpty(), RegexEmpty())
# '(text abc|text def)' => 'text (abc|def)'
common = None
for length in range(1, min(len(texta), len(textb)) + 1):
if textb.startswith(texta[:length]):
common = length
else:
break
if not common:
return None
return (RegexString(texta[:common]), createString(texta[common:]), createString(textb[common:]))
def _or_(self, other, reverse):
"""
Remove duplicate:
>>> RegexString("color") | RegexString("color")
<RegexString 'color'>
Group prefix:
>>> RegexString("color red") | RegexString("color blue")
<RegexAnd 'color (red|blue)'>
>>> RegexString("color red") | RegexString("color")
<RegexAnd 'color( red|)'>
"""
# Don't know any other optimization for str|other
if other.__class__ != RegexString:
return None
# Find common prefix
common = self.findPrefix(other)
if common:
if not reverse:
regex = common[1] | common[2]
else:
regex = common[2] | common[1]
return common[0] + regex
return None
def _eq(self, other):
return self.text == other.text
class RegexRangeItem:
def __init__(self, cmin, cmax=None):
try:
self.cmin = cmin
if cmax is not None:
self.cmax = cmax
else:
self.cmax = cmin
except TypeError:
raise TypeError("RegexRangeItem: two characters expected (%s, %s) found" % (
type(cmin), type(cmax)))
if self.cmax < self.cmin:
raise TypeError("RegexRangeItem: minimum (%u) is bigger than maximum (%u)" %
(self.cmin, self.cmax))
def __len__(self):
return (self.cmax - self.cmin + 1)
def __contains__(self, value):
assert issubclass(value.__class__, RegexRangeItem)
return (self.cmin <= value.cmin) and (value.cmax <= self.cmax)
def __str__(self, **kw):
cmin = chr(self.cmin)
if self.cmin != self.cmax:
cmax = chr(self.cmax)
if (self.cmin + 1) == self.cmax:
return "%s%s" % (cmin, cmax)
else:
return "%s-%s" % (cmin, cmax)
else:
return cmin
def __repr__(self):
return "<RegexRangeItem %u-%u>" % (self.cmin, self.cmax)
class RegexRangeCharacter(RegexRangeItem):
def __init__(self, char):
RegexRangeItem.__init__(self, ord(char), ord(char))
class RegexRange(Regex):
def __init__(self, ranges, exclude=False, optimize=True):
if optimize:
| |
# coding: utf-8
"""
Location API
Geolocation, Geocoding and Maps # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class GeolocationResponseSchema(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'str',
'message': 'str',
'balance': 'int',
'balance_slots': 'int',
'lat': 'float',
'lon': 'float',
'accuracy': 'int',
'address': 'str',
'address_details': 'AddressDetailsSchema',
'aged': 'int',
'fallback': 'FallbackSchema'
}
attribute_map = {
'status': 'status',
'message': 'message',
'balance': 'balance',
'balance_slots': 'balance_slots',
'lat': 'lat',
'lon': 'lon',
'accuracy': 'accuracy',
'address': 'address',
'address_details': 'address_details',
'aged': 'aged',
'fallback': 'fallback'
}
def __init__(self, status=None, message=None, balance=None, balance_slots=None, lat=None, lon=None, accuracy=None, address=None, address_details=None, aged=None, fallback=None): # noqa: E501
"""GeolocationResponseSchema - a model defined in OpenAPI""" # noqa: E501
self._status = None
self._message = None
self._balance = None
self._balance_slots = None
self._lat = None
self._lon = None
self._accuracy = None
self._address = None
self._address_details = None
self._aged = None
self._fallback = None
self.discriminator = None
if status is not None:
self.status = status
if message is not None:
self.message = message
if balance is not None:
self.balance = balance
if balance_slots is not None:
self.balance_slots = balance_slots
if lat is not None:
self.lat = lat
if lon is not None:
self.lon = lon
if accuracy is not None:
self.accuracy = accuracy
if address is not None:
self.address = address
if address_details is not None:
self.address_details = address_details
if aged is not None:
self.aged = aged
if fallback is not None:
self.fallback = fallback
@property
def status(self):
"""Gets the status of this GeolocationResponseSchema. # noqa: E501
If the request is successful, ok is returned. Otherwise error is returned # noqa: E501
:return: The status of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this GeolocationResponseSchema.
If the request is successful, ok is returned. Otherwise error is returned # noqa: E501
:param status: The status of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._status = status
@property
def message(self):
"""Gets the message of this GeolocationResponseSchema. # noqa: E501
Any additional information from the server is returned here # noqa: E501
:return: The message of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this GeolocationResponseSchema.
Any additional information from the server is returned here # noqa: E501
:param message: The message of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._message = message
@property
def balance(self):
"""Gets the balance of this GeolocationResponseSchema. # noqa: E501
This represents the remaining balance on the API token. Requests that return error are not charged and do not affect balance # noqa: E501
:return: The balance of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this GeolocationResponseSchema.
This represents the remaining balance on the API token. Requests that return error are not charged and do not affect balance # noqa: E501
:param balance: The balance of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._balance = balance
@property
def balance_slots(self):
"""Gets the balance_slots of this GeolocationResponseSchema. # noqa: E501
This represents the remaining balance of device slots. Requests that return error are not charged and do not affect balance. If -1 is returned, then observe it as an error while calculating slots balance. This element will only exist if you are on a device plan. # noqa: E501
:return: The balance_slots of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._balance_slots
@balance_slots.setter
def balance_slots(self, balance_slots):
"""Sets the balance_slots of this GeolocationResponseSchema.
This represents the remaining balance of device slots. Requests that return error are not charged and do not affect balance. If -1 is returned, then observe it as an error while calculating slots balance. This element will only exist if you are on a device plan. # noqa: E501
:param balance_slots: The balance_slots of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._balance_slots = balance_slots
@property
def lat(self):
"""Gets the lat of this GeolocationResponseSchema. # noqa: E501
The latitude representing the location # noqa: E501
:return: The lat of this GeolocationResponseSchema. # noqa: E501
:rtype: float
"""
return self._lat
@lat.setter
def lat(self, lat):
"""Sets the lat of this GeolocationResponseSchema.
The latitude representing the location # noqa: E501
:param lat: The lat of this GeolocationResponseSchema. # noqa: E501
:type: float
"""
self._lat = lat
@property
def lon(self):
"""Gets the lon of this GeolocationResponseSchema. # noqa: E501
The longitude representing the location # noqa: E501
:return: The lon of this GeolocationResponseSchema. # noqa: E501
:rtype: float
"""
return self._lon
@lon.setter
def lon(self, lon):
"""Sets the lon of this GeolocationResponseSchema.
The longitude representing the location # noqa: E501
:param lon: The lon of this GeolocationResponseSchema. # noqa: E501
:type: float
"""
self._lon = lon
@property
def accuracy(self):
"""Gets the accuracy of this GeolocationResponseSchema. # noqa: E501
The accuracy of the position is returned in meters # noqa: E501
:return: The accuracy of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._accuracy
@accuracy.setter
def accuracy(self, accuracy):
"""Sets the accuracy of this GeolocationResponseSchema.
The accuracy of the position is returned in meters # noqa: E501
:param accuracy: The accuracy of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._accuracy = accuracy
@property
def address(self):
"""Gets the address of this GeolocationResponseSchema. # noqa: E501
The physical address of the location # noqa: E501
:return: The address of this GeolocationResponseSchema. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this GeolocationResponseSchema.
The physical address of the location # noqa: E501
:param address: The address of this GeolocationResponseSchema. # noqa: E501
:type: str
"""
self._address = address
@property
def address_details(self):
"""Gets the address_details of this GeolocationResponseSchema. # noqa: E501
:return: The address_details of this GeolocationResponseSchema. # noqa: E501
:rtype: AddressDetailsSchema
"""
return self._address_details
@address_details.setter
def address_details(self, address_details):
"""Sets the address_details of this GeolocationResponseSchema.
:param address_details: The address_details of this GeolocationResponseSchema. # noqa: E501
:type: AddressDetailsSchema
"""
self._address_details = address_details
@property
def aged(self):
"""Gets the aged of this GeolocationResponseSchema. # noqa: E501
Shown when the location is based on a single measurement or those older than 90 days or is an LAC fallback # noqa: E501
:return: The aged of this GeolocationResponseSchema. # noqa: E501
:rtype: int
"""
return self._aged
@aged.setter
def aged(self, aged):
"""Sets the aged of this GeolocationResponseSchema.
Shown when the location is based on a single measurement or those older than 90 days or is an LAC fallback # noqa: E501
:param aged: The aged of this GeolocationResponseSchema. # noqa: E501
:type: int
"""
self._aged = aged
@property
def fallback(self):
"""Gets the fallback of this GeolocationResponseSchema. # noqa: E501
:return: The fallback of this GeolocationResponseSchema. # noqa: E501
:rtype: FallbackSchema
"""
return self._fallback
@fallback.setter
def fallback(self, fallback):
"""Sets the fallback of this GeolocationResponseSchema.
:param fallback: The fallback of this GeolocationResponseSchema. # noqa: E501
:type: FallbackSchema
"""
self._fallback = fallback
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GeolocationResponseSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns | |
<reponame>YerongLi2/LTVRR<gh_stars>10-100
# Written by <NAME> on Jan 2020
import numpy as np
import pandas as pd
import json
import os.path as osp
# import seaborn as sns # not critical.
import matplotlib.pylab as plt
# In[9]:
import os
import re
def files_in_subdirs(top_dir, search_pattern): # TODO: organize project as proper
join = os.path.join # python module (e.g. see https://docs.python-guide.org/writing/structure/) then move this function
regex = re.compile(search_pattern) # e.g. in the helper.py
for path, _, files in os.walk(top_dir):
for name in files:
full_name = join(path, name)
if regex.search(full_name):
yield full_name
def keep_only_heavy_tail_observations(dataframe, prediction_type, threshold_of_tail):
df = dataframe.copy()
freqs = df[[gt_prefix + '_' + prediction_type, prediction_type + '_freq_gt']]
unique_freqs = freqs.groupby(gt_prefix + '_' + prediction_type).mean() # assumes same
unique_freqs = unique_freqs.sort_values(prediction_type + '_freq_gt', ascending=False)
n_total_occurences = unique_freqs.sum()
unique_freqs[prediction_type + '_freq_gt'] /= float(n_total_occurences)
valid = unique_freqs[unique_freqs.cumsum()[prediction_type + '_freq_gt'] > threshold_of_tail].index
df = df[df[gt_prefix + '_' + prediction_type].isin(valid)]
return df
def get_group_counts(keys, ann_path):
temp = pd.read_csv(ann_path).groupby(keys).size().reset_index(name='counts').sort_values('counts')
temp = temp[keys + ['counts']]
temp.index = pd.MultiIndex.from_arrays(temp[keys].values.T)
return temp['counts']
def get_many_medium_few_scores(csv_path, cutoffs, data, data_dir, ann_dir, syn=True):
df = pd.read_csv(csv_path)
df['box_id'] = df.groupby('image_id').cumcount()
metric_type = 'top1'
all_prediction_types = ['rel', 'obj', 'sbj']
if syn:
if data == 'gvqa':
syn_obj = pd.read_csv(data_dir + 'objects_synsets.csv')
syn_obj = syn_obj[['object_name', 'synset']]
syn_obj.set_index('object_name', inplace=True)
syn_prd = pd.read_csv(data_dir + 'predicates_synsets.csv')
syn_prd = syn_prd[['predicate_name', 'synset']]
syn_prd.set_index('predicate_name', inplace=True)
if data == 'vg8k':
synsets = json.load(open(data_dir + 'words_synsets.json'))
syn_obj = pd.DataFrame.from_dict(synsets['nouns'], orient='index', columns=['synset'])
syn_prd = pd.DataFrame.from_dict(synsets['verbs'], orient='index', columns=['synset'])
for prediction_type in all_prediction_types:
df[prediction_type + '_' + metric_type] = df[prediction_type + '_rank'] < int(metric_type[3:])
if syn:
if data == 'gvqa':
for prediction_type in ['sbj', 'obj']:
df['gt_' + prediction_type + '_syn'] = syn_obj.loc[df['gt_' + prediction_type], 'synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_obj.loc[df['det_' + prediction_type], 'synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
for prediction_type in ['rel']:
df['gt_' + prediction_type + '_syn'] = syn_prd.loc[df['gt_' + prediction_type], 'synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_prd.loc[df['det_' + prediction_type], 'synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
if data == 'vg8k':
for prediction_type in ['sbj', 'obj']:
df['gt_' + prediction_type + '_syn'] = syn_obj.reindex(df['gt_' + prediction_type])['synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_obj.reindex(df['det_' + prediction_type])['synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
for prediction_type in ['rel']:
df['gt_' + prediction_type + '_syn'] = syn_prd.reindex(df['gt_' + prediction_type])['synset'].to_list()
df['det_' + prediction_type + '_syn'] = syn_prd.reindex(df['det_' + prediction_type])['synset'].to_list()
df[prediction_type + '_top1_syn'] = df['gt_' + prediction_type + '_syn'] == df['det_' + prediction_type + '_syn']
syn_key = ''
if syn:
syn_key = '_syn'
df['triplet_top1' + syn_key] = df['rel_top1' + syn_key] & df['sbj_top1' + syn_key] & df['obj_top1' + syn_key]
cutoff, cutoff_medium = cutoffs
a = df.groupby('gt_rel').mean()
classes_rel = (list(a.sort_values('rel_freq_gt').index))
classes_rel_few = classes_rel[:int(len(classes_rel)*cutoff)]
classes_rel_medium = classes_rel[int(len(classes_rel)*cutoff):int(len(classes_rel)*cutoff_medium)]
classes_rel_many = classes_rel[int(len(classes_rel)*cutoff_medium):]
a = df.groupby('gt_sbj').mean()
classes_sbj = (list(a.sort_values('sbj_freq_gt').index))
classes_sbj_few = classes_sbj[:int(len(classes_sbj)*cutoff)]
classes_sbj_medium = classes_sbj[int(len(classes_sbj)*cutoff):int(len(classes_sbj)*cutoff_medium)]
classes_sbj_many = classes_sbj[int(len(classes_sbj)*cutoff_medium):]
a = df.groupby('gt_obj').mean()
classes_obj = (list(a.sort_values('obj_freq_gt').index))
classes_obj_few = classes_obj[:int(len(classes_obj)*cutoff)]
classes_obj_medium = classes_obj[int(len(classes_obj)*cutoff):int(len(classes_obj)*cutoff_medium)]
classes_obj_many = classes_obj[int(len(classes_obj)*cutoff_medium):]
df_few_rel = df[df['gt_rel'].isin(classes_rel_few)]
df_medium_rel = df[df['gt_rel'].isin(classes_rel_medium)]
df_many_rel = df[df['gt_rel'].isin(classes_rel_many)]
df_few_sbj = df[df['gt_sbj'].isin(classes_sbj_few)]
df_medium_sbj = df[df['gt_sbj'].isin(classes_sbj_medium)]
df_many_sbj = df[df['gt_sbj'].isin(classes_sbj_many)]
df_few_obj = df[df['gt_obj'].isin(classes_obj_few)]
df_medium_obj = df[df['gt_obj'].isin(classes_obj_medium)]
df_many_obj = df[df['gt_obj'].isin(classes_obj_many)]
# print('sbj_overall_top1', num(df_['sbj_top1'].mean() * 100.))
# print('obj_overall_top1', num(df['obj_top1'].mean() * 100.))
# print('rel few:', len(df_few_rel))
# print('rel medium:',len(df_medium_rel))
# print('rel many:', len(df_many_rel))
#
# print('sbj few:', len(df_few_sbj))
# print('sbj medium:',len(df_medium_sbj))
# print('sbj many:', len(df_many_sbj))
#
# print('obj few:', len(df_few_obj))
# print('obj medium:',len(df_medium_obj))
# print('obj many:', len(df_many_obj))
# print('all:', len(df))
# print()
if syn:
tables_title = 'synsets matching'
else:
tables_title = 'exact matching'
print('=========================================================')
print()
print('Many, Medium, Few accuracy scores using {}:'.format(tables_title))
print('rel many:', '{:2.2f}'.format(df_many_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel med:', '{:2.2f}'.format(df_medium_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel few:', '{:2.2f}'.format(df_few_rel.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel all (per-class):', '{:2.2f}'.format(df.groupby('gt_rel')['rel_top1' + syn_key].mean().mean() * 100.))
print('rel all (per-example):', '{:2.2f}'.format(df['rel_top1' + syn_key].mean() * 100.))
print()
sbj_many = df_many_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_med = df_medium_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_few = df_few_sbj.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_all = df.groupby('gt_sbj')['sbj_top1' + syn_key].mean().mean() * 100.
sbj_all_o = df['sbj_top1'].mean() * 100.
obj_many = df_many_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_med = df_medium_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_few = df_few_obj.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_all = df.groupby('gt_obj')['obj_top1' + syn_key].mean().mean() * 100.
obj_all_o = df['obj_top1'].mean() * 100.
print('sbj/obj many:', '{:2.2f}'.format((sbj_many + obj_many) / 2.))
print('sbj/obj med:', '{:2.2f}'.format((sbj_med + obj_med) / 2.))
print('sbj/obj few:', '{:2.2f}'.format((sbj_few + obj_few) / 2.))
print('sbj/obj all (per-class):', '{:2.2f}'.format((sbj_all + obj_all) / 2.))
print('sbj/obj all (per-example):', '{:2.2f}'.format((sbj_all_o + obj_all_o) / 2.))
print('=========================================================')
print()
# print('triplet accuracy few:', df_few_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy med:', df_medium_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy man:', df_many_rel['triplet_top1'].mean() * 100.)
# print('triplet accuracy all:', df['triplet_top1'].mean() * 100.)
# print('=========================================================')
# print('triplet accuracy few:', df_few_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy med:', df_medium_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy man:', df_many_rel['triplet_top1_syn'].mean() * 100.)
# print('triplet accuracy all:', df['triplet_top1_syn'].mean() * 100.)
# print('=========================================================')
ann_path = ann_dir + 'rel_annotations_train.csv'
def get_triplets_scores(groupby, ann_path, syn_key, count_suffix):
groupby_ann = ['_'.join(s.split('_')[::-1]) for s in groupby]
triplets_freqs = get_group_counts(groupby_ann, ann_path)
triplets_freqs = triplets_freqs.reindex(df[groupby].to_records(index=False).tolist()).fillna(0)
df['count' + count_suffix] = triplets_freqs.to_list()
df_triplets = df.groupby(groupby).mean()[['triplet_top1' + syn_key, 'count' + count_suffix]]
df_triplets = df_triplets.reset_index().sort_values(['count' + count_suffix], ascending=True)
df_triplets_few = df_triplets.iloc[:int(cutoff * len(df_triplets))]
df_triplets_medium = df_triplets.iloc[int(cutoff * len(df_triplets)):int(cutoff_medium * len(df_triplets))]
df_triplets_many = df_triplets.iloc[int(cutoff_medium * len(df_triplets)):]
triplet_score_few = df_triplets_few['triplet_top1' + syn_key].mean() * 100.
triplet_score_medium = df_triplets_medium['triplet_top1' + syn_key].mean() * 100.
triplet_score_many = df_triplets_many['triplet_top1' + syn_key].mean() * 100.
triplet_score_all = df_triplets['triplet_top1' + syn_key].mean() * 100.
return triplet_score_many, triplet_score_medium, triplet_score_few, triplet_score_all
trip_so_scores_many, trip_so_scores_medium, trip_so_scores_few, trip_so_scores_all = get_triplets_scores(['gt_sbj', 'gt_obj'], ann_path, syn_key, '_so')
trip_sr_scores_many, trip_sr_scores_medium, trip_sr_scores_few, trip_sr_scores_all = get_triplets_scores(['gt_sbj', 'gt_rel'], ann_path, syn_key, '_sr')
trip_or_scores_many, trip_or_scores_medium, trip_or_scores_few, trip_or_scores_all = get_triplets_scores(['gt_obj', 'gt_rel'], ann_path, syn_key, '_or')
trip_scores_many, trip_scores_medium, trip_scores_few, trip_scores_all = get_triplets_scores(['gt_sbj', 'gt_obj', 'gt_rel'], ann_path, syn_key, '')
print('Triplet scores grouped by subject/object using {}:'.format(tables_title))
print('triplet so many:', '{:2.2f}'.format(trip_so_scores_many))
print('triplet so med:', '{:2.2f}'.format(trip_so_scores_medium))
print('triplet so few:', '{:2.2f}'.format(trip_so_scores_few))
print('triplet so all:', '{:2.2f}'.format(trip_so_scores_all))
print()
print('Triplet scores grouped by subject/relation using {}:'.format(tables_title))
print('triplet sr many:', '{:2.2f}'.format(trip_sr_scores_many))
print('triplet sr med:', '{:2.2f}'.format(trip_sr_scores_medium))
print('triplet sr few:', '{:2.2f}'.format(trip_sr_scores_few))
print('triplet sr all:', '{:2.2f}'.format(trip_sr_scores_all))
print()
print('Triplet scores grouped by object/relation using {}:'.format(tables_title))
print('triplet or many:', '{:2.2f}'.format(trip_or_scores_many))
print('triplet or med:', '{:2.2f}'.format(trip_or_scores_medium))
print('triplet or few:', '{:2.2f}'.format(trip_or_scores_few))
print('triplet or all:', '{:2.2f}'.format(trip_or_scores_all))
print()
print('Triplet scores grouped by subject/relation/object using {}:'.format(tables_title))
print('triplet sro many:', '{:2.2f}'.format(trip_scores_many))
print('triplet sro med:', '{:2.2f}'.format(trip_scores_medium))
print('triplet sro few:', '{:2.2f}'.format(trip_scores_few))
print('triplet sro all:', '{:2.2f}'.format(trip_scores_all))
print('=========================================================')
print()
def get_wordsim_metrics_from_csv(csv_file):
verbose = True
collected_simple_means = dict()
collected_per_class_means = dict()
print('Reading csv file')
df = pd.read_csv(csv_file)
print('Done')
# wordnet_metrics = ['lch', 'wup', 'res', 'jcn', 'lin', 'path']
wordnet_metrics = ['lch', 'wup', 'lin', 'path']
word2vec_metrics = ['w2v_gn']
gt_prefix = 'gt'
for prediction_type in ['sbj']:
for metric_type in wordnet_metrics + word2vec_metrics:
mu = df[prediction_type + '_' + metric_type].mean()
if verbose:
print('overall', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['rel']:
for metric_type in word2vec_metrics:
mu = df[prediction_type + '_' + metric_type].mean()
if verbose:
print('overall', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['sbj', 'obj']:
for metric_type in wordnet_metrics + word2vec_metrics:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_' + metric_type].mean().mean()
if verbose:
print('per-class', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
for prediction_type in ['rel']:
for metric_type in word2vec_metrics:
mu = df.groupby(gt_prefix + '_' + prediction_type)[prediction_type + '_' + metric_type].mean().mean()
if verbose:
print('per-class', prediction_type, metric_type, '{:2.2f}'.format(mu))
collected_per_class_means[(csv_file, prediction_type, metric_type)] = mu
return collected_simple_means, collected_per_class_means
def get_metrics_from_csv(csv_file, get_mr=False):
verbose = True
collected_simple_means = dict()
collected_per_class_means = dict()
print('Reading csv file')
df = pd.read_csv(csv_file)
print('Done')
# df['rel_top1'] = df['rel_rank'] < 1
metric_type = 'top1'
all_prediction_types = ['rel', 'obj', 'sbj']
gt_prefix = 'gt'
for prediction_type in all_prediction_types:
df[prediction_type + '_' + metric_type] = df[prediction_type + '_rank'] < int(metric_type[3:])
df['triplet_top1'] = df['rel_top1'] & df['sbj_top1'] & df['obj_top1']
if verbose:
print('------', metric_type, '------')
# Overall Accuracy
for prediction_type in all_prediction_types:
mu = (len(df[df[prediction_type + '_rank'] < int(metric_type[3:])]) / len(df)) * 100.0
# mu = df[prediction_type + '_' + metric_type].mean() * 100
if verbose:
print('simple-average', prediction_type, '{:2.2f}'.format(mu))
collected_simple_means[(csv_file, prediction_type, metric_type)] = mu
print()
if get_mr:
# Overall Mean Rank
for prediction_type in all_prediction_types:
mu = df[prediction_type + '_rank'].mean() * 100.0 / 250.0
# mu = df.groupby(gt_prefix + | |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
# Copyright (C) 2016 RaRe Technologies
"""This script using for extracting plain text out of a raw Wikipedia dump. Input is an xml.bz2 file provided
by MediaWiki that looks like <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2 or <LANG>wiki-latest-pages-articles.xml.bz2
(e.g. 14 GB of https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2).
It streams through all the XML articles using multiple cores (#cores - 1, by default),
decompressing on the fly and extracting plain text from the articles and their sections.
For each extracted article, it prints its title, section names and plain text section contents, in json-line format.
How to use
----------
#. Process Wikipedia dump with this script ::
python -m gensim.scripts.segment_wiki -i -f enwiki-latest-pages-articles.xml.bz2 -o enwiki-latest.json.gz
#. Read output in simple way:
.. sourcecode:: pycon
>>> from gensim import utils
>>> import json
>>>
>>> # iterate over the plain text data we just created
>>> with utils.open('enwiki-latest.json.gz', 'rb') as f:
>>> for line in f:
>>> # decode each JSON line into a Python dictionary object
>>> article = json.loads(line)
>>>
>>> # each article has a "title", a mapping of interlinks and a list of "section_titles" and
>>> # "section_texts".
>>> print("Article title: %s" % article['title'])
>>> print("Interlinks: %s" + article['interlinks'])
>>> for section_title, section_text in zip(article['section_titles'], article['section_texts']):
>>> print("Section title: %s" % section_title)
>>> print("Section text: %s" % section_text)
Notes
-----
Processing the entire English Wikipedia dump takes 1.7 hours (about 3 million articles per hour,
or 10 MB of XML per second) on an 8 core Intel i7-7700 @3.60GHz.
Command line arguments
----------------------
.. program-output:: python -m gensim.scripts.segment_wiki --help
:ellipsis: 0, -10
"""
import argparse
import json
import logging
import multiprocessing
import re
import sys
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
from functools import partial
from gensim.corpora.wikicorpus import IGNORED_NAMESPACES, WikiCorpus, filter_wiki, find_interlinks, get_namespace, utils
import gensim.utils
logger = logging.getLogger(__name__)
def segment_all_articles(file_path, min_article_character=200, workers=None, include_interlinks=False):
"""Extract article titles and sections from a MediaWiki bz2 database dump.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
Yields
------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
with gensim.utils.open(file_path, 'rb') as xml_fileobj:
wiki_sections_corpus = _WikiSectionsCorpus(
xml_fileobj, min_article_character=min_article_character, processes=workers,
include_interlinks=include_interlinks)
wiki_sections_corpus.metadata = True
wiki_sections_text = wiki_sections_corpus.get_texts_with_sections()
for article in wiki_sections_text:
yield article
def segment_and_write_all_articles(file_path, output_file, min_article_character=200, workers=None,
include_interlinks=False):
"""Write article title and sections to `output_file` (or stdout, if output_file is None).
The output format is one article per line, in json-line format with 4 fields::
'title' - title of article,
'section_titles' - list of titles of sections,
'section_texts' - list of content from sections,
(Optional) 'section_interlinks' - list of interlinks in the article.
Parameters
----------
file_path : str
Path to MediaWiki dump, typical filename is <LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2.
output_file : str or None
Path to output file in json-lines format, or None for printing to stdout.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
workers: int or None
Number of parallel workers, max(1, multiprocessing.cpu_count() - 1) if None.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
if output_file is None:
outfile = getattr(sys.stdout, 'buffer', sys.stdout) # we want write bytes, so for py3 we used 'buffer'
else:
outfile = gensim.utils.open(output_file, 'wb')
try:
article_stream = segment_all_articles(file_path, min_article_character, workers=workers,
include_interlinks=include_interlinks)
for idx, article in enumerate(article_stream):
article_title, article_sections = article[0], article[1]
if include_interlinks:
interlinks = article[2]
output_data = {
"title": article_title,
"section_titles": [],
"section_texts": [],
}
if include_interlinks:
output_data["interlinks"] = interlinks
for section_heading, section_content in article_sections:
output_data["section_titles"].append(section_heading)
output_data["section_texts"].append(section_content)
if (idx + 1) % 100000 == 0:
logger.info("processed #%d articles (at %r now)", idx + 1, article_title)
outfile.write((json.dumps(output_data) + "\n").encode('utf-8'))
finally:
if output_file is not None:
outfile.close()
def extract_page_xmls(f):
"""Extract pages from a MediaWiki database dump.
Parameters
----------
f : file
File descriptor of MediaWiki dump.
Yields
------
str
XML strings for page tags.
"""
elems = (elem for _, elem in ET.iterparse(f, events=("end",)))
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
yield ET.tostring(elem)
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
def segment(page_xml, include_interlinks=False):
"""Parse the content inside a page tag
Parameters
----------
page_xml : str
Content from page tag.
include_interlinks : bool
Whether or not interlinks should be parsed.
Returns
-------
(str, list of (str, str), (Optionally) list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally) [(interlink_article, interlink_text), ...]).
"""
elem = ET.fromstring(page_xml)
filter_namespaces = ('0',)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
lead_section_heading = "Introduction"
top_level_heading_regex = r"\n==[^=].*[^=]==\n"
top_level_heading_regex_capture = r"\n==([^=].*[^=])==\n"
title = elem.find(title_path).text
text = elem.find(text_path).text
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
if text is not None:
if include_interlinks:
interlinks = find_interlinks(text)
section_contents = re.split(top_level_heading_regex, text)
section_headings = [lead_section_heading] + re.findall(top_level_heading_regex_capture, text)
section_headings = [heading.strip() for heading in section_headings]
assert len(section_contents) == len(section_headings)
else:
interlinks = []
section_contents = []
section_headings = []
section_contents = [filter_wiki(section_content) for section_content in section_contents]
sections = list(zip(section_headings, section_contents))
if include_interlinks:
return title, sections, interlinks
else:
return title, sections
class _WikiSectionsCorpus(WikiCorpus):
"""Treat a wikipedia articles dump (<LANG>wiki-<YYYYMMDD>-pages-articles.xml.bz2
or <LANG>wiki-latest-pages-articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump can stay compressed on disk.
"""
def __init__(self, fileobj, min_article_character=200, processes=None,
lemmatize=utils.has_pattern(), filter_namespaces=('0',), include_interlinks=False):
"""
Parameters
----------
fileobj : file
File descriptor of MediaWiki dump.
min_article_character : int, optional
Minimal number of character for article (except titles and leading gaps).
processes : int, optional
Number of processes, max(1, multiprocessing.cpu_count() - 1) if None.
lemmatize : bool, optional
If `pattern` package is installed, use fancier shallow parsing to get token lemmas.
Otherwise, use simple regexp tokenization.
filter_namespaces : tuple of int, optional
Enumeration of namespaces that will be ignored.
include_interlinks: bool
Whether or not interlinks should be included in the output
"""
self.fileobj = fileobj
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
self.min_article_character = min_article_character
self.include_interlinks = include_interlinks
def get_texts_with_sections(self):
"""Iterate over the dump, returning titles and text versions of all sections of articles.
Notes
-----
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function:
.. sourcecode:: pycon
>>> for vec in wiki_corpus:
>>> print(vec)
Yields
------
(str, list of (str, str), list of (str, str))
Structure contains (title, [(section_heading, section_content), ...],
(Optionally)[(interlink_article, interlink_text), ...]).
"""
skipped_namespace, skipped_length, skipped_redirect = 0, 0, 0
total_articles, total_sections = 0, 0
page_xmls = extract_page_xmls(self.fileobj)
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(page_xmls, chunksize=10 * self.processes, maxsize=1):
for article in pool.imap(partial(segment, include_interlinks=self.include_interlinks),
group): # chunksize=10): partial(merge_names, b='Sons')
article_title, sections = article[0], article[1]
# article redirects are pruned here
if any(article_title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES): # filter non-articles
skipped_namespace += 1
continue
if not sections or sections[0][1].lstrip().lower().startswith("#redirect"): # filter redirect
skipped_redirect += 1
continue
if sum(len(body.strip()) for (_, body) in sections) < self.min_article_character:
# filter stubs (incomplete, very short articles)
skipped_length += 1
continue
total_articles += 1
total_sections += len(sections)
| |
in range(mc_count):
# Do not change these
swtch_flg = False
fol_swtch_flg = False
swtch_cost = False
count_s = 0
#### To make obstacles from here on. Defining as rings to allow nonconvexity
# Grid obs1 and obs2 now and shift them around to run all the simulations
obs1x_r = random.uniform(0.,0.2)
obs1y_r = random.uniform(0.,0.5)
#
obs2x_r = random.uniform(0.,0.4)
obs2y_r = random.uniform(0.,0.2)
Obs1 = LinearRing([(3.8 + obs1x_r ,6.-obs1y_r),(3.9+obs1x_r,6.-obs1y_r),(3.9+obs1x_r,7.-obs1y_r),(3.8+obs1x_r,7.-obs1y_r)])
Obs2 = LinearRing([(5.1 - obs2x_r, 4.5+obs2y_r), (5.1-obs2x_r, 5.+obs2y_r), (5.8-obs2x_r, 5.+obs2y_r), (5.8-obs2x_r,4.5+obs2y_r)])
#
Obs3 = LinearRing([(6.5, 3.), (6.5, 6.), (8., 6.), (8.,3)])
obsdic = {'obst1': Obs1, 'obs2': Obs2, 'obs3': Obs3} # dictionary of varying obstacles here
################### SIMULATION STARTS HERE ###########
states = np.zeros([nx,T+1])
inputs = np.zeros([2*nu,T])
cos_cl = np.zeros([1,T])
fol_prevInp = np.zeros(nu) # start follower previous input at zeros
states[:,0] = initCond
cr_obs_list = []
cr_obs_list_lead = []
obsxl = np.array([rmxdim,0.])
obsyl = np.array([0.,rmydim])
## to store the ones inferred from the follower input
obsxfi = np.array([rmxdim,0.])
obsyfi = np.array([0.,rmydim])
## follower directly sees these
obsxfD = np.array([rmxdim,0.])
obsyfD = np.array([0.,rmydim])
### Start of main time loop
for index in range(T):
if index>35: # make the plot sparser in this case towards the end
if index % 3.0 == 0.0:
show_animation = True
else:
show_animation = False
else:
show_animation = True
# first leader finds own free space
(ang,rad) = r1.seen_obs(states[0,index], states[2,index], r, obsdic) # tuple of (phi, cl_r)
ox = np.cos(ang) * rad
oy = np.sin(ang) * rad # obstacle cordinate points
# set obstacle positions (include the ones leader sees and also the ones inferred until then)
obsxl = np.append(obsxl,states[0,index]+ox)
obsyl = np.append(obsyl,states[2,index]+oy) # these are the ones directly seen by the leader. Stored.
if swtch_flg == False: # just allocated leader won't have this recent inferred info
for i in cr_obs_list_lead:
obsxfi = np.append(obsxfi,i.x)
obsyfi = np.append(obsyfi,i.y) # all inferred obstacles from the follower
obsxl = np.concatenate((obsxl, obsxfi))
obsyl = np.concatenate((obsyl, obsyfi))
### create all the obstacle unions
poi = []
for i in range(len(obsxl)):
poi.append(Point(obsxl[i],obsyl[i]))
po = unary_union(poi)
### write a piece here that checks collsions and flags them
p1 = Polygon(Obs1)
p2 = Polygon(Obs2)
p3 = Polygon(Obs3)
pol = []
pol.append(p1)
pol.append(p2)
pol.append(p3)
pol_union = unary_union(pol)
linS = LineString([(states[0,index], states[2,index]), (states[0,index]-(l1+l2)*np.cos(states[4,index]), states[2,index]-(l1+l2)*np.sin(states[4,index]))])
interS = pol_union.intersection(linS)
if interS.is_empty == False:
print('COLLISSION DETECTED ON THE ROD! COUNTING THIS AND STOPPING THE TRAJECTORY.')
col_count = col_count + 1
break
# start position
sx = states[0,index] # [m] for leader
sy = states[2,index] # [m] for leader
sfolx = sx-(l1+l2)*np.cos(states[4,index])
sfoly = sy-(l1+l2)*np.sin(states[4,index])
slinex = np.array([sx, sx-(l1+l2)*np.cos(states[4,index])])
sliney = np.array([sy, sy-(l1+l2)*np.sin(states[4,index])])
# Now have to solve an optimization problem for control synthesis
ref = get_lookah(gx, gy, N) # reference generation
u0_guess = np.random.rand(nu*N,1) # intial solution guess
## The cost must change depending on the initial leader
if swtch_flg:
count_s = count_s + 1
if count_s % 2 == 0:
swtch_cost = False
else:
swtch_cost = True
myCost = CostCl(states[0,index],states[2,index],states[1,index],states[3,index],states[4,index],states[5,index],\
Q, R, l1, l2, m1, m2, mr, K2, N, nx, nu, J, ref, po, dt, swtch_cost)
sol = sio.optimize.minimize(myCost.cost, u0_guess, options={'disp': False, 'maxiter': 100}, method='SLSQP', bounds=bnds)
sol_array = sol.x
inputs[0:nu,index] = sol_array[0:nu] # first mpc input is applied [Fa_l, Fp_l, tau_l]
#### Follower's inference part is to be done here to calculate the follower input
# sticking to leader states directly, because they can always be calculated from the follower's
states_ddt = np.transpose(rob_dyn(states[:,index], inputs[0:nu,index], fol_prevInp, l1, l2, m1, m2, mr, J, ddt))
# See appendix of the paper for these
q_1_calc = (states_ddt[1]-states[1,index])/ddt
q_2_calc = (states_ddt[3]-states[3,index])/ddt
t_1_calc = (states_ddt[5]-states[5,index])/ddt
# now calculate F_{al}, F_{pl} and tau_l from these inferred q_1, q_2 and t.
tmp1 = q_1_calc - l1*np.sin(states[4,index])*fol_prevInp[1]*l2/J + l1*np.sin(states[4,index])*fol_prevInp[2]/J + l1*np.cos(states[4,index])*states[5,index]**2 \
-1/(m1+m2+mr)*(np.cos(states[4,index])*(fol_prevInp[0]) - np.sin(states[4,index])*fol_prevInp[1])
tmp2 = q_2_calc + l1*np.cos(states[4,index])*fol_prevInp[1]*l2/J - l1*np.cos(states[4,index])*fol_prevInp[2]/J + l1*np.sin(states[4,index])*states[5,index]**2 \
-1/(m1+m2+mr)*(np.sin(states[4,index])*(fol_prevInp[0]) + np.cos(states[4,index])*fol_prevInp[1])
Amat = np.array([ [0, l1, 1], [1/(m1+m2+mr)*np.cos(states[4,index]), -(l1**2*np.sin(states[4,index])/J + 1/(m1+m2+mr)*np.sin(states[4,index])), -l1*np.sin(states[4,index])/J], \
[1/(m1+m2+mr)*np.sin(states[4,index]), (l1**2*np.cos(states[4,index])/J + 1/(m1+m2+mr)*np.cos(states[4,index])), l1*np.cos(states[4,index])/J] ])
bmat = np.array([J*t_1_calc + fol_prevInp[1]*l2 - fol_prevInp[2], tmp1, tmp2])
fol_inf_array = np.linalg.solve(Amat, bmat)
##################################################################################
### Will use these to apply the critical obstacle force correctly at that time step
[xfddt, yfddt, xfdotddt, yfdotddt] = fol_statesVel(states_ddt[0],states_ddt[2],states_ddt[1],
states_ddt[3],states_ddt[4],states_ddt[5], l1, l2)
# map all follower sensed obstacles
(angf,radf) = r1.seen_obs(xfddt, yfddt, r, obsdic) # tuple of (phi, cl_r)
oxfD = np.cos(angf) * radf
oyfD = np.sin(angf) * radf # obstacle cordinate points
# set obstacle positions directly seen by the follower
obsxfD = np.append(obsxfD,xfddt+oxfD)
obsyfD = np.append(obsyfD,yfddt+oyfD)
# Compute follower critical obstacles
fol_crOb = get_folCrOb(xfddt,yfddt,xfdotddt,yfdotddt,states_ddt[4],l1,l2,r,obsdic,dcr)
# apply the follower inputs
if fol_crOb == []:
inputs[nu:, index] = K2*fol_inf_array
fol_swtch_flg = False
else:
fromFol2Lead = [states_ddt[0]-xfddt, states_ddt[2]-yfddt]
fromFol2Obs = [fol_crOb[0][0]-xfddt, fol_crOb[0][1]-yfddt]
ang_vec = angle_vec(fromFol2Obs, fromFol2Lead)+np.pi
cr_force = (dcr-np.linalg.norm(fromFol2Obs, 2))*np.array([[np.cos(ang_vec)],[-np.sin(ang_vec)], [0]])
inputs[nu:, index] = K2*fol_inf_array + np.transpose(K1@cr_force)
### The follower switch trigger based on real critical obstacles (This is used for checking trigger sync)
if swtch_method:
if np.linalg.norm(fromFol2Obs, 2)<= swtch_thres:
fol_swtch_flg = True
else:
fol_swtch_flg = False
fol_prevInp = inputs[nu:, index] # store the previous step inputs of the follower
# Simulation step
states[:,index+1] = np.transpose(rob_dyn(states_ddt, inputs[0:nu,index], inputs[nu:,index], l1, l2, m1, m2, mr, J, dt-ddt))
### Now the leader has to infer the critical obstacles from the follower input
# See appendix of the paper
q_1_calc = (states[1,index+1]-states_ddt[1])/(dt-ddt)
q_2_calc = (states[3,index+1]-states_ddt[3])/(dt-ddt)
t_1_calc = (states[5,index+1]-states_ddt[5])/(dt-ddt)
## now calculate the follower forces from leader perspective
tmp1 = q_1_calc + l1*l1*np.sin(states_ddt[4])*inputs[1,index]/J + l1*np.sin(states_ddt[4])*inputs[2,index]/J + l1*np.cos(states_ddt[4])*states_ddt[5]**2 \
-1/(m1+m2+mr)*(np.cos(states_ddt[4])*(inputs[0,index]) - np.sin(states_ddt[4])*inputs[1,index])
tmp2 = q_2_calc - l1*l1*np.cos(states_ddt[4])*inputs[1,index]/J - l1*np.cos(states_ddt[4])*inputs[2,index]/J + l1*np.sin(states_ddt[4])*states_ddt[5]**2 \
-1/(m1+m2+mr)*(np.sin(states_ddt[4])*(inputs[0,index]) + np.cos(states_ddt[4])*inputs[1,index])
Amat = np.array([ [0, -l2, 1], [1/(m1+m2+mr)*np.cos(states_ddt[4]), (l1*l2*np.sin(states_ddt[4])/J - 1/(m1+m2+mr)*np.sin(states_ddt[4])), -l1*np.sin(states_ddt[4])/J], \
[1/(m1+m2+mr)*np.sin(states_ddt[4]), (-l1*l2*np.cos(states_ddt[4])/J + 1/(m1+m2+mr)*np.cos(states_ddt[4])), l1*np.cos(states_ddt[4])/J] ])
bmat = np.array([J*t_1_calc - inputs[1,index]*l1 - inputs[2,index], tmp1, tmp2])
x = np.linalg.solve(Amat, bmat)
Faf_infer = x[0]
Fpf_infer = x[1]
tauf_inf= x[2]
# form the obstacles on follower side
exp_force = K2*inputs[0:nu,index]
force_diff = np.array([[Faf_infer-exp_force[0]], [Fpf_infer-exp_force[1]]])
force_diff_mag = np.linalg.norm(force_diff,2)
if force_diff_mag >= 1: # infer only if the difference is significant. Tune the threshold
force_diff_ang = -np.arctan2(force_diff[1],force_diff[0]) # should be equal to ang_vec
dist_cr = dcr - force_diff_mag/np.sqrt((Fab_fc/dcr)**2*np.cos(force_diff_ang)**2 + (Fpb_fc/dcr)**2*np.sin(force_diff_ang)**2 )
Obs_p = np.array([[xfddt+dist_cr*np.cos(states_ddt[4]-force_diff_ang+np.pi)], [yfddt+dist_cr*np.sin(states_ddt[4]-force_diff_ang+np.pi)]])
obs_b_lead = Point(Obs_p[0][0], Obs_p[1][0])
cr_obs_list_lead.append(obs_b_lead)
if swtch_method:
if dist_cr <= swtch_thres:
swtch_flg = True
else:
swtch_flg = False
else:
swtch_flg = False
## Check if the switch is synching. If not, algorithm is not going to work
if swtch_method:
if fol_swtch_flg == swtch_flg:
# print('SWITCH IS SYNCHRONIZED')
if swtch_flg:
# SWAP ALL THE INFORMATION TO THE CORRECT AGENT. EACH RETAINS KNOWN INFO.
tmps1 = obsxfD
tmps2 = obsyfD
tmps3 = obsxl
tmps4 = obsyl
#
obsxl = tmps1
obsyl = tmps2
obsxfD = tmps3
obsyfD = tmps4
else:
print('SWITCH ERROR!!!!')
pdb.set_trace() # should not happen. Debug.
break
if fol_crOb: # tracking the real ones to verify
obs_b = Point(fol_crOb[0][0],fol_crOb[0][1])
cr_obs_list.append(obs_b)
# Decide if the switching is activated and switch accordingly
if swtch_flg:
[xf_s, yf_s, xfdot_s, yfdot_s] = fol_statesVel(states[0, index+1],states[2,index+1],states[1,index+1],
states[3,index+1],states[4,index+1],states[5,index+1], l1, l2)
# set the leader at the follower's position
states[:,index+1] = np.array([xf_s,xfdot_s,yf_s,yfdot_s,states[4,index+1]+np.pi,states[5,index+1]])
# Plot simulation step
if show_animation:
plt.figure(2)
plt.plot(obsxl, obsyl, "xr", markersize = 2)
if fol_crOb:
if swtch_method:
plt.plot(fol_crOb[0][0], fol_crOb[0][1], "xb", markersize = 4, mew = 2)
else:
plt.plot(fol_crOb[0][0], fol_crOb[0][1], "xb", markersize = 2)
plt.plot(sx, sy, "or", markersize=3, mew = 1)
plt.plot(sfolx, sfoly, "ob", markersize=3, mew =1)
plt.plot(gx, gy, '*r', markersize=12)
plt.plot(slinex, sliney, "-y", linewidth=1.5)
plt.grid(True)
plt.xlim((2.8,8.))
plt.ylim((3.8,8.))
plt.pause(0.001)
plt.draw()
# quit this if too close already
[xfN, yfN, xfdotN, yfdotN] = fol_statesVel(states[0, index+1],states[2,index+1],states[1,index+1],
states[3,index+1],states[4,index+1],states[5,index+1], l1, l2)
if swtch_cost:
if np.linalg.norm([xfN-gx, yfN-gy],2) <= 0.5:
step_2_tar.append(index+1)
break
else:
if np.linalg.norm([states[0,index+1]-gx, states[2,index+1]-gy],2) <= 0.5:
step_2_tar.append(index+1)
break
| |
<filename>pact/pact.py
"""API for creating a contract and configuring the mock service."""
from __future__ import unicode_literals
import fnmatch
import os
import platform
from subprocess import Popen
import psutil
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3 import Retry
from .constants import BROKER_CLIENT_PATH
from .constants import MOCK_SERVICE_PATH
from .matchers import from_term
class Pact(object):
"""
Represents a contract between a consumer and provider.
Provides Python context handlers to configure the Pact mock service to
perform tests on a Python consumer. For example:
>>> from pact import Consumer, Provider
>>> pact = Consumer('consumer').has_pact_with(Provider('provider'))
>>> (pact.given('the echo service is available')
... .upon_receiving('a request is made to the echo service')
... .with_request('get', '/echo', query={'text': 'Hello!'})
... .will_respond_with(200, body='Hello!'))
>>> with pact:
... requests.get(pact.uri + '/echo?text=Hello!')
The GET request is made to the mock service, which will verify that it
was a GET to /echo with a query string with a key named `text` and its
value is `Hello!`. If the request does not match an error is raised, if it
does match the defined interaction, it will respond with the text `Hello!`.
"""
HEADERS = {'X-Pact-Mock-Service': 'true'}
MANDATORY_FIELDS = {'response', 'description', 'request'}
def __init__(self, consumer, provider, host_name='localhost', port=1234,
log_dir=None, ssl=False, sslcert=None, sslkey=None,
cors=False, publish_to_broker=False, broker_base_url=None,
broker_username=None, broker_password=<PASSWORD>, broker_token=<PASSWORD>,
pact_dir=None, version='2.0.0', file_write_mode='overwrite'):
"""
Constructor for Pact.
:param consumer: The consumer for this contract.
:type consumer: pact.Consumer
:param provider: The provider for this contract.
:type provider: pact.Provider
:param host_name: The host name where the mock service is running.
:type host_name: str
:param port: The port number where the mock service is running.
:type port: int
:param log_dir: The directory where logs should be written. Defaults to
the current directory.
:type log_dir: str
:param ssl: Flag to control the use of a self-signed SSL cert to run
the server over HTTPS , defaults to False.
:type ssl: bool
:param sslcert: Path to a custom self-signed SSL cert file, 'ssl'
option must be set to True to use this option. Defaults to None.
:type sslcert: str
:param sslkey: Path to a custom key and self-signed SSL cert key file,
'ssl' option must be set to True to use this option.
Defaults to None.
:type sslkey: str
:param cors: Allow CORS OPTION requests to be accepted,
defaults to False.
:type cors: bool
:param publish_to_broker: Flag to control automatic publishing of
pacts to a pact broker. Defaults to False.
:type publish_to_broker: bool
:param broker_base_url: URL of the pact broker that pacts will be
published to. Can also be supplied through the PACT_BROKER_BASE_URL
environment variable. Defaults to None.
:type broker_base_url: str
:param broker_username: Username to use when connecting to the pact
broker if authentication is required. Can also be supplied through
the PACT_BROKER_USERNAME environment variable. Defaults to None.
:type broker_username: str
:param broker_password: Password to use when connecting to the pact
broker if authentication is required. Strongly recommend supplying
this value through the PACT_BROKER_PASSWORD environment variable
instead. Defaults to None.
:type broker_password: str
:param broker_token: Authentication token to use when connecting to
the pact broker. Strongly recommend supplying this value through
the PACT_BROKER_TOKEN environment variable instead.
Defaults to None.
:type broker_token: str
:param pact_dir: Directory where the resulting pact files will be
written. Defaults to the current directory.
:type pact_dir: str
:param version: The Pact Specification version to use, defaults to
'2.0.0'.
:type version: str
:param file_write_mode: `overwrite` or `merge`. Use `merge` when
running multiple mock service instances in parallel for the same
consumer/provider pair. Ensure the pact file is deleted before
running tests when using this option so that interactions deleted
from the code are not maintained in the file. Defaults to
`overwrite`.
:type file_write_mode: str
"""
scheme = 'https' if ssl else 'http'
self.uri = '{scheme}://{host_name}:{port}'.format(
host_name=host_name, port=port, scheme=scheme)
self.broker_base_url = broker_base_url
self.broker_username = broker_username
self.broker_password = <PASSWORD>
self.broker_token = <PASSWORD>_token
self.consumer = consumer
self.cors = cors
self.file_write_mode = file_write_mode
self.host_name = host_name
self.log_dir = log_dir or os.getcwd()
self.pact_dir = pact_dir or os.getcwd()
self.port = port
self.provider = provider
self.publish_to_broker = publish_to_broker
self.ssl = ssl
self.sslcert = sslcert
self.sslkey = sslkey
self.version = version
self._interactions = []
self._process = None
def given(self, provider_state):
"""
Define the provider state for this pact.
When the provider verifies this contract, they will use this field to
setup pre-defined data that will satisfy the response expectations.
:param provider_state: The short sentence that is unique to describe
the provider state for this contract.
:type provider_state: basestring
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['provider_state'] = provider_state
return self
@staticmethod
def _normalize_consumer_name(name):
return name.lower().replace(' ', '_')
def publish(self):
"""Publish the generated pact files to the specified pact broker."""
if self.broker_base_url is None \
and "PACT_BROKER_BASE_URL" not in os.environ:
raise RuntimeError("No pact broker URL specified. " +
"Did you expect the PACT_BROKER_BASE_URL " +
"environment variable to be set?")
pact_files = fnmatch.filter(
os.listdir(self.pact_dir),
self._normalize_consumer_name(self.consumer.name) + '*.json'
)
command = [
BROKER_CLIENT_PATH,
'publish',
'--consumer-app-version={}'.format(self.consumer.version)]
if self.broker_base_url is not None:
command.append('--broker-base-url={}'.format(self.broker_base_url))
if self.broker_username is not None:
command.append('--broker-username={}'.format(self.broker_username))
if self.broker_password is not None:
command.append('--broker-password={}'.format(self.broker_password))
if self.broker_token is not None:
command.append('--broker-token={}'.format(self.broker_token))
command.extend(pact_files)
if self.consumer.tag_with_git_branch:
command.append('--tag-with-git-branch')
if self.consumer.tags is not None:
for tag in self.consumer.tags:
command.extend(['-t', tag])
publish_process = Popen(command)
publish_process.wait()
if publish_process.returncode != 0:
url = self.broker_base_url or os.environ["PACT_BROKER_BASE_URL"]
raise RuntimeError(
"There was an error while publishing to the " +
"pact broker at {}."
.format(url))
def setup(self):
"""Configure the Mock Service to ready it for a test."""
try:
resp = requests.delete(
self.uri + '/interactions', headers=self.HEADERS)
assert resp.status_code == 200, resp.text
resp = requests.put(
self.uri + '/interactions',
headers=self.HEADERS,
json={"interactions": self._interactions})
assert resp.status_code == 200, resp.text
except AssertionError:
raise
def start_service(self):
"""
Start the external Mock Service.
:raises RuntimeError: if there is a problem starting the mock service.
"""
command = [
MOCK_SERVICE_PATH,
'service',
'--host={}'.format(self.host_name),
'--port={}'.format(self.port),
'--log', '{}/pact-mock-service.log'.format(self.log_dir),
'--pact-dir', self.pact_dir,
'--pact-file-write-mode', self.file_write_mode,
'--pact-specification-version={}'.format(self.version),
'--consumer', self.consumer.name,
'--provider', self.provider.name]
if self.ssl:
command.append('--ssl')
if self.sslcert:
command.extend(['--sslcert', self.sslcert])
if self.sslkey:
command.extend(['--sslkey', self.sslkey])
self._process = Popen(command)
self._wait_for_server_start()
def stop_service(self):
"""Stop the external Mock Service."""
is_windows = 'windows' in platform.platform().lower()
if is_windows:
# Send the signal to ruby.exe, not the *.bat process
p = psutil.Process(self._process.pid)
for child in p.children(recursive=True):
child.terminate()
p.wait()
if psutil.pid_exists(self._process.pid):
raise RuntimeError(
'There was an error when stopping the Pact mock service.')
else:
self._process.terminate()
self._process.communicate()
if self._process.returncode != 0:
raise RuntimeError(
'There was an error when stopping the Pact mock service.')
if (self.publish_to_broker):
self.publish()
def upon_receiving(self, scenario):
"""
Define the name of this contract.
:param scenario: A unique name for this contract.
:type scenario: basestring
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['description'] = scenario
return self
def verify(self):
"""
Have the mock service verify all interactions occurred.
Calls the mock service to verify that all interactions occurred as
expected, and has it write out the contracts to disk.
:raises AssertionError: When not all interactions are found.
"""
self._interactions = []
resp = requests.get(
self.uri + '/interactions/verification',
headers=self.HEADERS)
assert resp.status_code == 200, resp.text
resp = requests.post(
self.uri + '/pact', headers=self.HEADERS)
assert resp.status_code == 200, resp.text
def with_request(self, method, path, body=None, headers=None, query=None):
"""
Define the request the request that the client is expected to perform.
:param method: The HTTP method.
:type method: str
:param path: The path portion of the URI the client will access.
:type path: str, Matcher
:param body: The request body, can be a string or an object that will
serialize to JSON, like list or dict, defaults to None.
:type body: list, dict or None
:param headers: The headers the client is expected to include on with
this request. Defaults to None.
:type headers: dict or None
:param query: The query options the client is expected to send. Can be
a dict of keys and values, or a URL encoded string.
Defaults to None.
:type query: dict, basestring, or None
:rtype: Pact
"""
self._insert_interaction_if_complete()
self._interactions[0]['request'] = Request(
method, path, body=body, headers=headers, query=query).json()
return self
def will_respond_with(self, status, headers=None, body=None):
"""
Define the response the server is expected to create.
:param status: The HTTP status code.
:type status: int
:param headers: All required | |
the moment, we don't have any uncompressed data
self.uncompressed = None
self._decompress() # decompress the contents as needed
# Prepare storage to keep track of the offsets
# of the blobs in the cluster.
self._offsets = []
# proceed to actually read the offsets of the blobs in this cluster
self._read_offsets()
def _decompress(self, chunk_size=32768):
if self.compression == "lzma":
# create a bytes stream to store the uncompressed cluster data
self.buffer = io.BytesIO()
decompressor = lzma.LZMADecompressor() # prepare the decompressor
# move the file pointer to the start of the blobs as long as we
# don't reach the end of the stream.
self.file.seek(self.offset + 1)
while not decompressor.eof:
chunk = self.file.read(chunk_size) # read in a chunk
data = decompressor.decompress(chunk) # decompress the chunk
self.buffer.write(data) # and store it in the buffer area
elif self.compression == "zstd":
# create a bytes stream to store the uncompressed cluster data
self.buffer = io.BytesIO()
decompressor = zstandard.ZstdDecompressor().decompressobj() # prepare the decompressor
# move the file pointer to the start of the blobs as long as we
# don't reach the end of the stream.
self.file.seek(self.offset + 1)
while True:
chunk = self.file.read(chunk_size) # read in a chunk
try:
data = decompressor.decompress(chunk) # decompress the chunk
self.buffer.write(data) # and store it in the buffer area
except zstandard.ZstdError:
break
def _source_buffer(self):
# get the file buffer or the decompressed buffer
data_buffer = self.buffer if self.compression else self.file
# move the buffer to the starting position
data_buffer.seek(0 if self.compression else self.offset + 1)
return data_buffer
def _read_offsets(self):
# get the buffer for this cluster
data_buffer = self._source_buffer()
# read the offset for the first blob
offset0 = unpack("<I", data_buffer.read(4))[0]
# store this one in the list of offsets
self._offsets.append(offset0)
# calculate the number of blobs by dividing the first blob by 4
number_of_blobs = int(offset0 / 4)
for idx in range(number_of_blobs - 1):
# store the offsets to all other blobs
self._offsets.append(unpack("<I", data_buffer.read(4))[0])
# return either the blob itself or its offset (when return_offset is set to True)
def read_blob(self, blob_index, return_offset=False):
# check if the blob falls within the range
if blob_index >= len(self._offsets) - 1:
raise IOError("Blob index exceeds number of blobs available: %s" %
blob_index)
data_buffer = self._source_buffer() # get the buffer for this cluster
# calculate the size of the blob
blob_size = self._offsets[blob_index + 1] - self._offsets[blob_index]
# move to the position of the blob relative to current position
data_buffer.seek(self._offsets[blob_index], 1)
return data_buffer.read(blob_size) if not return_offset else data_buffer.tell()
class DirectoryBlock(Block):
def __init__(self, structure, encoding):
super(DirectoryBlock, self).__init__(structure, encoding)
def unpack_from_file(self, file_resource, seek=None):
# read the first fields as defined in the ARTICLE_ENTRY structure
field_values = super(DirectoryBlock, self)._unpack_from_file(file_resource, seek)
# then read in the url, which is a zero terminated field
field_values["url"] = read_zero_terminated(file_resource, self._encoding)
# followed by the title, which is again a zero terminated field
field_values["title"] = read_zero_terminated(file_resource, self._encoding)
field_values["namespace"] = field_values["namespace"].decode(encoding=self._encoding, errors="ignore")
return field_values
class ArticleEntryBlock(DirectoryBlock):
def __init__(self, encoding):
super(ArticleEntryBlock, self).__init__(ARTICLE_ENTRY, encoding)
class RedirectEntryBlock(DirectoryBlock):
def __init__(self, encoding):
super(RedirectEntryBlock, self).__init__(REDIRECT_ENTRY, encoding)
#####
# Support functions to simplify (1) the uniform creation of a URL
# given a namespace, and (2) searching in the index.
#####
def full_url(namespace, url):
return namespace + u"/" + url
def split_path(path, assumed_namespace="A", heuristic_split=True):
"""
split a path into the namespace and a URL
when a namespace is missing this function returns a configurable default namespace
as desired this function can apply a heuristic split to distinguish between what is likely a namespace and/or url
:param path: the path to split into a namespace and a url
:param assumed_namespace: the default namespace to return if no namespace is found
:param heuristic_split: use heuristics to identify what is a namespace and what is part of a url
:return: a pair consisting of the namespace and the url
"""
splits = path.split("/")
if len(splits) == 0:
return assumed_namespace, ""
elif len(splits) == 1:
return assumed_namespace, splits[0]
else:
if heuristic_split:
if len(splits[0]) == 1:
return splits[0], "/".join(splits[1:])
else:
return assumed_namespace, "/".join(splits[0:])
else:
return splits[0], "/".join(splits[1:])
def binary_search(func, item, front, end):
logging.debug("performing binary search with boundaries " + str(front) +
" - " + str(end))
found = False
middle = 0
# continue as long as the boundaries don't cross and we haven't found it
while front < end and not found:
middle = floor((front + end) / 2) # determine the middle index
# use the provided function to find the item at the middle index
found_item = func(middle)
if found_item == item:
found = True # flag it if the item is found
else:
if found_item < item: # if the middle is too early ...
# move the front index to the middle
# (+ 1 to make sure boundaries can be crossed)
front = middle + 1
else: # if the middle falls too late ...
# move the end index to the middle
# (- 1 to make sure boundaries can be crossed)
end = middle - 1
return middle if found else None
class ZIMFileIterator(object):
def __init__(self, zim_file, start_from=0):
self._zim_file = zim_file
self._namespace = self._zim_file.get_namespace_range("A" if zim_file.version <= (6, 0) else "C")
start = self._namespace.start if self._namespace.start else 0
self._idx = max(start, start_from)
def __iter__(self):
return self
def __next__(self):
end = self._namespace.end if self._namespace.end else 0
if self._idx <= end:
idx = self._idx
entry = self._zim_file.read_directory_entry_by_index(idx)
entry["fullUrl"] = full_url(entry["namespace"], entry["url"])
self._idx += 1
return entry["fullUrl"], entry["title"], idx
else:
raise StopIteration
def next(self):
return self.__next__()
class ZIMFile:
"""
The main class to access a ZIM file.
Two important public methods are:
get_article_by_url(...)
is used to retrieve an article given its namespace and url.
get_main_page()
is used to retrieve the main page article for the given ZIM file.
"""
def __init__(self, filename, encoding):
self._filename = filename
self._enc = encoding
# open the file as a binary file
self.file = open(filename, "rb")
# retrieve the header fields
try:
self.header_fields = HeaderBlock(self._enc).unpack_from_file(self.file)
self.major = int(self.header_fields["major_version"])
self.minor = int(self.header_fields["minor_version"])
self.version = (self.major, self.minor)
self.mimetype_list = MimeTypeListBlock(self._enc).unpack_from_file(self.file,
self.header_fields["mimeListPos"])
# create the object once for easy access
self.redirectEntryBlock = RedirectEntryBlock(self._enc)
self.articleEntryBlock = ArticleEntryBlock(self._enc)
self.clusterFormat = ClusterBlock(self._enc)
except struct_error:
raise ZIMFileUnpackError
def copy(self):
return ZIMFile(self._filename, self._enc)
def checksum(self, extra_fields=None):
# create a checksum to uniquely identify this zim file
# the UUID should be enough, but let's play it safe and also include the other header info
if not extra_fields:
extra_fields = {}
checksum_entries = []
fields = self.header_fields.copy()
fields.update(extra_fields)
# collect all the HEADER values and make sure they are ordered
for key in sorted(fields.keys()):
checksum_entries.append("'" + key + "': " + str(fields[key]))
checksum_message = (", ".join(checksum_entries)).encode("ascii")
return sha256(checksum_message).hexdigest()
def _read_offset(self, index, field_name, field_format, length):
# move to the desired position in the file
if index != 0xffffffff:
self.file.seek(self.header_fields[field_name] + int(length * index))
# and read and return the particular format
read = self.file.read(length)
# return unpack("<" + field_format, self.file.read(length))[0]
return unpack("<" + field_format, read)[0]
return None
def _read_url_offset(self, index):
return self._read_offset(index, "urlPtrPos", "Q", 8)
def _read_title_offset(self, index):
return self._read_offset(index, "titlePtrPos", "L", 4)
def _read_cluster_offset(self, index):
return self._read_offset(index, "clusterPtrPos", "Q", 8)
def _read_directory_entry(self, offset):
"""
Read a directory entry using an offset.
:return: a DirectoryBlock - either as Article Entry or Redirect Entry
"""
logging.debug("reading entry with offset " + str(offset))
self.file.seek(offset) # move to the desired offset
# retrieve the mimetype to determine the type of block
fields = unpack("<H", self.file.read(2))
# get block class
if fields[0] == 0xffff:
directory_block = self.redirectEntryBlock
else:
directory_block = self.articleEntryBlock
# unpack and return the desired Directory Block
return directory_block.unpack_from_file(self.file, offset)
def read_directory_entry_by_index(self, index):
"""
Read a directory entry using an index.
:return: a DirectoryBlock - either as Article Entry or Redirect Entry
"""
# find the offset for the given index
offset = self._read_url_offset(index)
if offset is not None:
# read the entry at that offset
directory_values = self._read_directory_entry(offset)
# set the index in the list of values
directory_values["index"] = index
return directory_values # and return | |
dddddddddddddldd",
"6-9 x: nxvkxvxxx",
"18-19 x: xsxxxrdxbkjmbdvfrrx",
"12-13 j: jnjjjgjjqrjjs",
"14-16 r: rrrrrmrrrrqrrxrvrvr",
"5-9 h: hhvvhmzjn",
"5-6 w: wwgfzt",
"10-13 v: vvvvvsvvvtvvxvvrvvv",
"2-4 h: hwhh",
"4-8 s: ssssssshss",
"5-12 n: nnnnnnnnnnncn",
"2-3 x: bxxmxcdzlj",
"14-16 x: chpxcprsxhxvkxzc",
"7-9 b: bbbbbbbbbb",
"10-11 x: xxskpxtfhxd",
"5-7 w: zwwwrww",
"1-8 l: splxkhxw",
"8-9 q: llcbqltqh",
"2-3 k: kkcckxm",
"6-11 c: ccccsccccccccccccccc",
"6-14 k: xkbbnkknkttqpb",
"12-16 d: dcddddrddnddvprsd",
"1-3 p: bpplj",
"4-6 n: nntbnpn",
"14-15 k: nwcckxptkgrrbkd",
"12-14 d: dddddddddddddxd",
"1-11 v: vvvvjvvvvvvvvv",
"4-6 g: gtgbfg",
"5-8 h: hhhhdhhhhhhh",
"13-15 n: nwnnpnftnbnknqn",
"3-4 m: mpfqmj",
"3-6 t: ttmtct",
"2-4 s: sssss",
"10-16 v: vvszvvgvvvvvvvvcv",
"11-13 t: stvdjtwjzftrtprpb",
"5-6 p: pppptp",
"12-15 d: ddtdjdddxdhxzdcd",
"12-16 b: bbbbbbbbbbbxbbbbb",
"12-14 n: qnnnnnnnnzlnnnn",
"9-11 v: sqkrmzjqvvv",
"2-5 d: cdpfdtjdkn",
"7-8 p: pppptppppp",
"2-4 s: ssszsss",
"13-15 d: cpdwdbvqxcffdrd",
"4-5 j: hbjjpppm",
"5-8 g: gvgmgjgrzz",
"6-7 s: sssssqn",
"1-11 p: tpstkbpmtbpg",
"17-18 m: zmmmsrsrgfpggmmmlgk",
"2-3 f: ffkhf",
"2-5 f: ftfff",
"2-10 m: gqxlmphwcmfc",
"6-8 v: bvgwwbvlvvvlrvv",
"13-14 n: nnnnnnnnnnnnnxn",
"2-7 f: ffffffff",
"9-10 x: xxxxxxxxxd",
"1-7 s: mssssdsksssdsssz",
"11-15 t: jtttttltmttgttthz",
"5-12 j: jqjjjjfndzjdjjjjjjjn",
"9-13 r: rrrrrrrrrrrrj",
"5-9 x: xxjxpcqxxcxznn",
"2-9 c: psdddswdcpd",
"4-10 f: fsffxffffmr",
"7-12 j: jxvjjjrjjhjfc",
"4-5 q: bqqhj",
"1-4 f: fwbclqb",
"1-2 k: skqk",
"9-11 b: bbbzbkbbhbb",
"1-2 g: nggv",
"3-7 m: mmfmmmpm",
"2-3 m: vpmr",
"7-10 d: ddddddjddm",
"2-15 t: hgvsftrbzglvmpwhsmp",
"1-5 t: qtttt",
"11-13 p: pppppppppppphp",
"4-5 p: cvqpzvpppfh",
"2-3 f: bbftxfnmb",
"2-3 r: rlrr",
"1-4 m: mvms",
"3-14 m: gfmprfxpvzhmhm",
"2-8 j: jdsjjlfl",
"8-12 k: kkkkskkvkkkkk",
"1-7 p: hpfpmpwp",
"5-8 x: xxxxxxxhxxxxxx",
"3-10 p: mmctgfppppxplpplppj",
"3-4 p: zfmpjbhwppk",
"13-16 z: zzzzzzzzzzzzvzzzz",
"1-3 d: djddd",
"2-7 p: nmplwdp",
"7-9 l: lglllllll",
"7-8 p: ptvsnpcp",
"9-10 m: rjnmxthbmg",
"5-6 j: jjccjv",
"6-8 t: tvcztdttxzkp",
"8-12 p: kmrpqdnppskj",
"6-8 j: lsjkhjjhbgj",
"11-12 x: hxxxlxmxxxtrxxxxk",
"3-6 p: pppppcgtpxpppplp",
"8-10 s: nphsvswsrssxmdh",
"10-11 p: pppcpptppkp",
"14-15 z: zzzzzzzzzzzzzcz",
"14-18 f: mffngzbffffznctfff",
"6-7 t: tttthhtttq",
"12-19 l: shqlqnkzwpplqjrwjcv",
"1-4 k: khkx",
"9-11 d: ddddddddpdrdddd",
"3-4 s: mvsssc",
"6-7 h: hhhhhhphh",
"8-14 v: vvrvsvwrwmpvlv",
"2-6 z: zxjvsn",
"9-12 f: ffffffffpfftf",
"2-5 s: lssmjh",
"11-14 k: kkwkkfkbpnjkbkk",
"4-15 x: ppcxmjmxvbrkxlqcthx",
"1-2 g: bpggz",
"3-5 x: vxjxxxnztm",
"7-8 r: rrrrrrrhr",
"8-13 n: nnblnxnnrmnnq",
"1-4 w: wwtxwwwjwwwwdwl",
"1-10 d: cddddddwfhdrdddqnd",
"1-8 j: jxmjjjrv",
"4-7 x: xfxxxxn",
"16-18 v: vvvvvvvvvvvvvvvvvvv",
"3-5 h: hhhhhh",
"9-11 z: zzztvzzhgzr",
"3-5 w: fcjwwjwwv",
"6-7 z: hszgzsl",
"3-9 l: fldswlflrll",
"10-11 n: fqbxpfncbln",
"7-14 m: jftmkxqhrmmcqmk",
"8-9 h: hhqqhkhmh",
"3-5 l: lnlqlhdjtd",
"4-11 x: lqjxqzlfsfhzjqqnttp",
"4-6 c: fccctcc",
"8-11 c: cccccccccckcr",
"2-3 k: jkxtkjhnkksksrrzhfkk",
"7-9 m: nmmmmmmkmrmmdjjms",
"5-12 m: mmmmprmsmbmmmm",
"1-2 l: lllll",
"3-7 m: gmmtchm",
"11-16 t: vddbsztmpttvsktp",
"9-13 f: ffffffffffffffffff",
"3-8 j: hjhldbcznnsx",
"5-7 v: vrfpvswbmbvvzv",
"6-15 z: bhlvbzvnlntzzzz",
"4-6 x: xzjrxcx",
"3-4 q: qqsw",
"5-6 r: rrrlrrrrrr",
"1-7 b: qbbbbbxbbb",
"6-7 b: bfbbjdd",
"4-11 f: lfrffpfgzqs",
"3-5 c: qccncmjgrczzmcz",
"9-15 x: vxxxxxnxxxxxxxxrxxxj",
"1-2 s: ssnls",
"5-9 z: zzpzzzzzszzzzzz",
"5-16 z: pzgzxgpwqmzwwlzz",
"6-7 c: ccqxccn",
"5-6 b: ngmbbs",
"2-19 d: cdmnqfjfxgtdwlrnhcd",
"9-12 p: thcvkgpcxptpxpp",
"3-6 g: sbstjvnhfgdr",
"1-18 f: fsffffffffffffffffff",
"2-16 f: rfxzxrjpbvfzcftf",
"13-14 v: rvxvvnsfcvvvrvvvqg",
"4-10 d: tqlddkdpdv",
"12-13 r: rrrrrrrrhrwrjdrlnr",
"5-6 x: xxvxgnxxxx",
"3-4 d: sddd",
"5-11 p: jphgprgjjpp",
"1-7 f: fjgfdvb",
"3-8 x: rpxvndgxx",
"1-17 b: jbbrxbbbtxjbrpbbb",
"7-10 h: bhhbzmdrkhhvhjx",
"1-5 v: fzdgv",
"5-7 f: vfdffftffffhflw",
"2-20 c: mcbhcvvxwxfvxqlgxpdc",
"5-8 w: vwktjwdsccgj",
"3-4 d: nntd",
"1-7 n: nnncmhnkgqn",
"2-6 r: bdvvbrr",
"1-15 b: bbbzwbbbbkbgkbp",
"3-8 f: slfvsmvftsstff",
"3-4 z: pvzzggdnhwzjzgp",
"8-9 n: nnnnvnnfn",
"4-10 n: nnnlnngnncnn",
"8-10 l: vktfwjrmslbh",
"3-6 n: nflmqn",
"4-5 s: svggkxz",
"3-4 w: wwjt",
"5-6 t: nbbbdt",
"3-8 d: jrjkdghxqwq",
"7-8 q: qqqqqqgq",
"7-8 k: qxgnkvckpkchqnmxb",
"1-5 v: vvvvvv",
"6-16 s: stmjwhvrrkfgrsxs",
"3-4 s: sdrswqnsjrnhrlds",
"12-15 r: rgdrrrrrrrrrrrgtg",
"12-14 g: gggggggggggjgggggggg",
"1-7 h: phhhhhdnhj",
"3-4 v: tknvv",
"10-11 j: jjbvjjxjjjjj",
"2-5 c: bcbff",
"7-11 r: rrrrrrxrrrdrrrrr",
"13-16 p: ppppppppppppdppr",
"14-16 t: tttttttvtttttttwttt",
"3-6 c: hclhccxhmxtjcbmjc",
"1-2 x: gxxxxx",
"2-3 w: wrwcqt",
"2-6 g: gmblggxgg",
"1-4 l: lllxpkml",
"1-2 d: mdddddd",
"4-8 q: qqqxqqqqq",
"3-4 v: mwvfvlqvv",
"3-6 w: wswwgc",
"2-4 d: bdfd",
"17-19 l: llllsldlbllllnlllzr",
"10-18 j: tjhgvshtbqjtcfcvlr",
"4-9 t: ttttttttttfttttttt",
"8-9 g: ngsggnbgqgtgglnjgcg",
"9-10 d: dddhdddvgbdbd",
"8-9 g: gtggvpgmq",
"11-15 j: jjqkjsmjkgfvjns",
"6-9 t: tqthttvtnttttttg",
"1-7 j: sjjjjjjjjjjjjjj",
"15-16 g: gggggggzgghgggqd",
"10-16 n: nnnnnjnnngnnnnnnnn",
"1-6 p: pppppppp",
"1-2 k: kdkk",
"1-2 b: bjvzqrgbhmgm",
"2-4 x: xvxxx",
"1-5 j: zjjjjjjj",
"2-5 f: lfnpwfz",
"1-12 j: hjjjjjjjjjjj",
"7-10 l: lllllqhlllllmmlpllll",
"11-12 m: mmmnmnmmmmmmmm",
"6-8 z: dzzzzfzzpz",
"14-15 c: cshjrbzhmmpckcwf",
"3-5 q: qqqqz",
"11-12 z: zzzzdzczzkprvztzfrdd",
"4-5 t: ttstbt",
"14-15 k: kkkkkkqkkkkkksr",
"1-12 j: jjjjjjjjjjjwgcjzj",
"2-3 k: rhkhkkg",
"1-16 z: zrzzzzkkhzzflzzzlzzq",
"12-13 h: hhhhhhhhhthgjhh",
"1-5 j: qqjjczwttz",
"11-12 h: hhhhhhhhhhmk",
"1-3 p: qpspbpjfq",
"1-3 f: ffzf",
"11-16 f: qdfmgnfnfvffflfhff",
"12-14 b: btwsgnvvljknbbdf",
"7-15 b: wdrgltbgdqscbhh",
"3-13 j: vwjpjjwjtcpjk",
"2-4 l: clxdsfqfdvkfhcgdswl",
"9-13 m: mmmmmmgmmpnmmmzmmk",
"4-5 x: hxxxx",
"1-4 b: bbbb",
"4-5 r: rrrrwfrv",
"6-9 n: nnnnnnnnsn",
"4-7 m: mvmmmmdmm",
"4-12 t: ttttttjtttttjttttt",
"1-3 c: bccc",
"2-3 m: mmcxbw",
"1-4 r: rrrrrr",
"8-10 l: lllllllplllllllll",
"18-19 h: hbhjdhhnnhfshkhhhgh",
"6-8 h: hfhwhwph",
"1-4 z: cfmz",
"2-3 m: vtmmznmvmrs",
"7-8 l: llhlwzlmjll",
"1-2 m: mmskncxdc",
"1-4 d: ndddd",
"1-3 m: mmgmmm",
"14-15 r: nzrgmcrrgmrxlbr",
"2-13 h: htkhhhhhhqhhlhhhkh",
"2-4 w: wkwsw",
"3-4 w: wmwr",
"2-9 v: kdzkhvnkv",
"8-9 t: wltttttkbktftk",
"9-14 n: vnhdtndfnsncpnf",
"3-10 n: jfnwcngtdz",
"13-15 j: nqzdlvnvvgnmhjj",
"1-12 q: qqqqqqqqqqqqq",
"10-12 c: cccdcmscbhcqccc",
"1-3 w: bzwwgg",
"2-14 d: pdkrpmxxzgcvqkzvvzqd",
"6-11 t: tfqvhtbmdztsnwnt",
"5-9 l: xllmllvjdds",
"6-14 h: pkhthnhxhhhjnscb",
"5-7 m: mmmmrmmm",
"8-10 x: xxxxxxxwxkxxx",
"4-5 k: lmkkkkkskg",
"1-2 d: dtdd",
"5-7 f: frfwsfr",
"4-5 w: rclww",
"4-5 g: gkkmtnlhbkgb",
"4-6 q: qqqfqqq",
"3-4 d: ddjb",
"11-13 n: nnnnnnnnnnnnnnnn",
"4-6 f: jvkxcffdgd",
"3-4 g: ktsgxzn",
"14-15 g: gggggggggggggggg",
"2-3 n: nqnnn",
"2-12 k: kbkpppkrkkjkkkk",
"5-7 c: ccccbcccc",
"9-10 l: plmrklsclx",
"4-11 m: mmmtmmmmmmqlm",
"6-7 b: bbbbbbq",
"7-9 g: kcgggklzg",
"3-6 p: ptxpppppppppppp",
"2-4 s: swsss",
"7-10 c: cccccccccpcc",
"5-7 d: dbddlddddcsdd",
"6-7 k: kkkkkkk",
"2-4 h: hwnvcj",
"1-9 g: nggggggggg",
"9-19 b: dzzpzvgwbdbmthmzfbhb",
"1-6 v: vrclmqxpvkhbvrfdmc",
"4-7 k: sgwktwtttmktrfjzn",
"13-16 x: whxxxxxxxxxxxxxsxx",
"4-9 z: zzzpzzzzzz",
"7-9 t: mwvtbhtxt",
"2-3 j: jfjdj",
"3-9 t: nrkffvgmtdstkkhtfpn",
"3-4 g: ggggxxdjsgrbf",
"4-10 x: kgbxbqnqmc",
"7-10 t: ztndqctmtttthxkwtlm",
"8-12 h: fbvccdshdvhhh",
"9-12 f: ffffsfffffjd",
"1-3 q: qkqqrvmmkh",
"13-14 r: rrrqrrrrrrrrrrrrrrr",
"2-3 g: bmdgkz",
"6-7 k: kkkkkkjk",
"4-9 q: qgzrflqqqd",
"12-13 s: brsswsfsvsfssps",
"5-9 q: drjcqnmwqbncmqqvcjgh",
"7-9 x: xxpxxgzxxxnx",
"2-13 s: ssdsssssjshsjsjswn",
"1-11 m: fmmmmmmmmsmmmh",
"3-8 f: zffrqqhflhvl",
"4-11 l: lqfwlxlllnl",
"12-16 k: kkkkkkkkkkkdkkkgkkkk",
"1-5 s: sssslss",
"3-13 n: ntnnnnpnnlndsnn",
"13-16 f: nzpfvhfrxpxjfmcfff",
"2-3 f: ffbf",
"3-7 r: nxrrrqrqnrrlbj",
"1-10 p: mppptppppcpppppppd",
"9-11 l: ftmflbbljjf",
"6-8 r: rrrrrlrrrr",
"10-16 k: zqknmwppdtckmpgk",
"10-13 t: dpqxttttttttb",
"2-4 f: ftff",
"9-14 k: krkkkqkkkkdkkrn",
"12-17 b: mbxbczbbbbdbbbbbpbnb",
"8-9 j: bjjjjjjjqjvxjfjjjjjj",
"1-3 s: lspsh",
"2-7 p: zpqlvwpmdp",
"6-8 q: qqqqqqqtqq",
"15-16 z: cxzmdcdzckrhzxzz",
"13-17 v: vvvvvvvvvvvvvvvvzv",
"6-8 c: ctcccccc",
"6-7 v: xjzvvfvjmnrvtvncjmdv",
"3-8 m: sqmfbqlm",
"3-9 p: bpqpxpfpzqpjjgv",
"8-10 n: clxnnnxvnnxnnnnnn",
"8-9 m: mmmmmmmmm",
"2-6 s: scssss",
"9-10 t: xtttttttftttt",
"3-5 c: rksck",
"16-17 h: ldhchxlhphlnmhvhh",
"3-6 b: bnbvlb",
"4-5 j: mjjjx",
"2-4 j: sklkmtrjpgprqdn",
"3-8 l: nllnlnll",
"1-6 g: jgbgpt",
"14-17 d: cqddcfsjddddxlcdd",
"4-6 h: qcqhwc",
"6-14 j: fzrkcjrqjssjdjjjjj",
"3-4 j: jdfj",
"1-3 k: fkpkkkck",
"11-12 w: wwwwbwwwwwfg",
"6-7 g: wngnhsg",
"4-13 d: bddjdddddddddpdddddd",
"7-8 k: kmkfkkkp",
"5-9 b: btmlgzbbdb",
"4-7 j: dxlzwsjdbjcqjsnwq",
"9-13 p: pppppmfhpptppp",
"16-17 v: vvvvvvvvvvvvvvvxn",
"1-7 m: mmmmmmtmm",
"1-8 v: vvvvvvvbv",
"9-15 t: tttttttttttttttt",
"15-17 r: rwrqrrhdrtvrfszrj",
"4-9 m: mfddknmcmqhglr",
"7-10 k: kkkkkkkkpkklkk",
"18-19 c: hpqwwkgtqbrcjxptwnc",
"2-3 p: xwppdp",
"5-11 r: vnrhrmknrrr",
"10-15 f: ffmfffffcfftfdfff",
"10-13 m: mmtsmmmmmmmmhmm",
"4-16 j: pjlplfvtgrjhvcdjjdmb",
"8-14 v: kxrjvdbbmxvrzdp",
"6-7 w: wwwwwwgwww",
"9-16 f: hhkvlfrvfvpvlzvcfsg",
"1-3 k: kkbk",
"9-12 d: ddddddddtdddd",
"13-16 w: wwwgwwwwwwwxwwwwwww",
"4-5 t: swtttpkkpwdt",
"5-7 z: zzfzzzp",
"1-5 g: grghg",
"3-4 n: wjsnnnwsxrx",
"8-10 x: xxxxxxpqxc",
"4-13 t: tttbtttttttttttt",
"1-3 w: wswwn",
"1-6 d: dddtddddd",
"12-13 x: xxflxxpxxxxxwgrxxx",
"1-2 m: mxghmm",
"6-11 j: jjjjjjjjjjxj",
"1-3 n: nnfnnnnnnnn",
"7-11 w: wxwwwwpwwwww",
"1-2 q: nqmqfxql",
"9-12 m: mmmmvmnmfmmp",
"5-7 m: mmmmmmms",
"3-4 s: ssqsssssssj",
"6-9 c: ccvcmnccccl",
"1-17 b: gbbbbbbbbbbbbbbbqbb",
"7-10 q: vqqqqxqqql",
"9-12 | |
# Create trigger_queue if none exists
if queue is None:
trigger_queue = Queue.Queue()
else:
trigger_queue = queue
# Start triggerListener (for incoming events to trigger actions)
obj_trigger_listener = TriggerListener(listen_port, trigger_queue)
obj_trigger_listener.start()
# Start triggerHandler (handling incoming events)
obj_trigger_queue_handler = TriggerQueueHandler(mode, trigger_queue, **kwargs)
obj_trigger_queue_handler.start()
# Start check of Sensors (checking sensors that trigger events)
if kwargs['sensors'] is not None:
obj_sensor_check = SensorCheck(trigger_queue, kwargs['device_id'], kwargs['sensors'])
obj_sensor_check.start()
print("Client started and monitoring sensors: " + str(kwargs['sensors']))
# Wait for key to abort
print("Press Enter to exit")
raw_input()
obj_trigger_listener.stop()
obj_trigger_queue_handler.stop()
if kwargs['sensors'] is not None:
obj_sensor_check.stop()
# Exit loop
print("Aborted")
def get_enabled_sensors(config_object, log_object=None):
""" logger_object as argument, starts sensors and returns list of sensor-objects """
import json
sensors = list()
if config_object.get('sensor_motionpir', 'enable') == 'true':
pin = config_object.get('sensor_motionpir', 'pin')
# interval = logger_object.get('sensor_motionpir', 'interval')
sensor_motionpir = PirMotion(log_object, pin=int(pin))
sensors.append(sensor_motionpir)
if config_object.get('sensor_doorswitch', 'enable') == 'true':
pin = config_object.get('sensor_doorswitch', 'pin')
# interval = logger_object.get('sensor_doorswitch', 'interval')
sensor_doorswitch = Switch(log_object, pin=int(pin))
sensors.append(sensor_doorswitch)
if config_object.get('sensor_dht11_humid', 'enable') == 'true':
pin = config_object.get('sensor_dht11_humid', 'pin')
limit = config_object.get('sensor_dht11_humid', 'limit')
sensor_dht11_humid = DHT(log_object, pin=int(pin), type=0, limit=limit)
sensors.append(sensor_dht11_humid)
if config_object.get('sensor_dht11_temp', 'enable') == 'true':
pin = config_object.get('sensor_dht11_temp', 'pin')
limit = config_object.get('sensor_dht11_temp', 'limit')
sensor_dht11_temp = DHT(log_object, pin=int(pin), type=1, limit=limit)
sensors.append(sensor_dht11_temp)
if config_object.get('sensor_mq2', 'enable') == 'true':
adc_in = int(config_object.get('sensor_mq2', 'adc_in'))
clockpin = int(config_object.get('sensor_mq2', 'clockpin'))
mosipin = int(config_object.get('sensor_mq2', 'mosipin'))
misopin = int(config_object.get('sensor_mq2', 'misopin'))
cspin = int(config_object.get('sensor_mq2', 'cspin'))
sleep_int = float(config_object.get('sensor_mq2', 'sleep_int'))
check_int = int(config_object.get('sensor_mq2', 'check_int'))
pause_int = int(config_object.get('sensor_mq2', 'pause_int'))
limit = int(config_object.get('sensor_mq2', 'limit'))
sensor_mq2 = AdcMeter(adc_in, clockpin=clockpin, mosipin=mosipin, misopin=misopin, cspin=cspin,
check_int=check_int, sleep_int=sleep_int, pause_int=pause_int, limit=limit,
logger_object=log_object)
sensors.append(sensor_mq2)
if config_object.get('sensor_luxmeter', 'enable') == 'true':
limit = int(config_object.get('sensor_luxmeter', 'limit'))
check_int = int(config_object.get('sensor_luxmeter', 'check_int'))
sensor_luxmeter = LuxMeter(limit=limit, check_int=check_int, logger_object=log_object)
sensors.append(sensor_luxmeter)
if config_object.get('sensor_power', 'enable') == 'true':
adc_in = int(config_object.get('sensor_power', 'adc_in'))
minref = int(config_object.get('sensor_power', 'minref'))
clockpin = int(config_object.get('sensor_power', 'clockpin'))
mosipin = int(config_object.get('sensor_power', 'mosipin'))
misopin = int(config_object.get('sensor_power', 'misopin'))
cspin = int(config_object.get('sensor_power', 'cspin'))
sleep_int = float(config_object.get('sensor_power', 'sleep_int'))
interval = int(config_object.get('sensor_power', 'interval'))
limit = int(config_object.get('sensor_power', 'limit'))
debug = json.loads(config_object.get('sensor_power', 'debug').lower())
sensor_power = PowerMeter(minref, adc_in, clockpin=clockpin, mosipin=mosipin, misopin=misopin, cspin=cspin,
check_int=interval, sleep_int=sleep_int, limit=limit, debug=debug,
logger_object=log_object)
sensors.append(sensor_power)
return sensors
def check_event_in_trigger((arg_device, arg_attr, arg_data), trigger_json):
""" Function to compare attributes with json_list from config """
import json
trigger_when = json.loads(trigger_json)
result = []
for current_cond in trigger_when:
if current_cond['data'][0] == '=':
if int(current_cond['dev']) == int(arg_device) and int(current_cond['attr']) == int(arg_attr) and \
current_cond['data'][1:] == arg_data:
result.append(True)
else:
result.append(False)
if current_cond['data'][0] == '>':
if int(current_cond['dev']) == int(arg_device) and int(current_cond['attr']) == int(arg_attr) and float(
arg_data) > float(current_cond['data'][1:]):
result.append(True)
else:
result.append(False)
if current_cond['data'][0] == '<':
if int(current_cond['dev']) == int(arg_device) and int(current_cond['attr']) == int(arg_attr) and float(
arg_data) < float(current_cond['data'][1:]):
result.append(True)
else:
result.append(False)
return any(result)
# noinspection PyMethodMayBeStatic
class GmailAlarm(Gmail):
""" Inherit and add new property to sendmail """
def __init__(self, *args, **kwargs):
Gmail.__init__(self, *args, **kwargs)
self.trigger_when = None
self.mail_to = None
self.mail_from = None
# noinspection PyArgumentList
def trigger(self, *args, **kwargs):
self.send(*args, **kwargs)
def stop(self):
print get_datetime() + ": Stopping gmail"
class SmsAlarm(ModemDongle):
""" Inherit and add new property to sendmail """
def __init__(self, *args, **kwargs):
ModemDongle.__init__(self, *args, **kwargs)
self.trigger_when = kwargs.get('trigger_when', None)
self.number = kwargs.get('number', None)
def trigger(self, *args, **kwargs):
self.send(*args, **kwargs)
def stop(self):
ModemDongle.stop(self)
print get_datetime() + ": Stopping SMS"
def get_enabled_alarms(config_object, log_object=None):
""" logger_object as argument, check enables alarms and returns list of alarm-objects """
alarms = list()
if config_object.get('alarm_buzzer', 'enable') == 'true':
pin = config_object.get('alarm_buzzer', 'pin')
alarm_buzzer = Buzzer(log_object, pin=int(pin))
alarm_buzzer.start()
alarms.append(alarm_buzzer)
if config_object.get('alarm_gmail', 'enable') == 'true':
gmail_user = config_object.get('alarm_gmail', 'gmail_user')
gmail_pass = config_object.get('alarm_gmail', 'gmail_pass')
alarm_gmail = GmailAlarm(gmail_user, gmail_pass, log_object)
alarm_gmail.trigger_when = config_object.get('alarm_gmail', 'trigger_when')
alarm_gmail.mail_from = config_object.get('alarm_gmail', 'from')
alarm_gmail.mail_to = config_object.get('alarm_gmail', 'to')
alarms.append(alarm_gmail)
if config_object.get('alarm_sms', 'enable') == 'true':
sms_tty = config_object.get('alarm_sms', 'sms_tty')
## sms_port = logger_object.get('alarm_sms', 'sms_port')
sms_number = config_object.get('alarm_sms', 'sms_number')
sms_check_int = config_object.get('alarm_sms', 'sms_check_int')
sms_incoming_cmd = config_object.get('alarm_sms', 'sms_incoming_cmd')
sms_trigger_when = config_object.get('alarm_sms', 'trigger_when')
# Create SMS Dongle Object
alarm_sms = SmsAlarm(log_object, tty=sms_tty, incoming_cmd=sms_incoming_cmd, check_int=sms_check_int,
number=sms_number, trigger_when=sms_trigger_when,
functions={'show_status_short': show_status_short, 'pause': pause})
alarm_sms.start()
print "Starting SMS engine thread: %s" % (str(alarm_sms),)
# Start listening daemon - experimental
## objSMSListener = smsListener(sms_port, dongle=alarm_sms)
## objSMSListener.start()
# Append object to alarms list
alarms.append(alarm_sms)
return alarms
class Camera:
""" Define class for Camera """
def __init__(self, camera_type, camera_name, camera_ftp_server=None, camera_ftp_port=None, camera_ftp_user=None,
camera_ftp_pass=None, camera_ftp_dir=None, logger_object=None):
self.camera_type = camera_type
self.camera_name = camera_name
self.camera_ftp_server = camera_ftp_server
self.camera_ftp_port = camera_ftp_port
self.camera_ftp_user = camera_ftp_user
self.camera_ftp_pass = camera_ftp_pass
self.camera_ftp_dir = camera_ftp_dir
self.cam_shots = []
self.logger_object = logger_object
self.objFTP = FTP(camera_ftp_server, camera_ftp_user, camera_ftp_pass, self.logger_object, port=camera_ftp_port)
self.objFTP.start()
self.status = None
def trigger(self):
self.status = "capture"
filename = "/tmp/" + get_datetime().replace("-", "").replace(":", "").replace(" ",
"_") + '_' + self.camera_name + ".jpg"
if self.camera_type == 'pi':
PiCamera(filename)
self.cam_shots.append(filename)
self.status = None
def upload_all(self, remove=False):
import thread
while self.status is not None:
time.sleep(1)
self.objFTP.status = "transfer"
self.objFTP.upload(self.cam_shots, self.camera_ftp_dir)
if remove is True:
while self.objFTP.status is not None:
time.sleep(1)
# Add delay to removal by thread
thread.start_new_thread(self.remove_all, ())
# self.remove_all()
def remove_all(self):
import os
import time
filelist = self.cam_shots
self.cam_shots = []
time.sleep(300)
# Clear list of files to prevent same pic being mailed again, delay del
for current_file in filelist:
os.remove(current_file)
def list_all(self):
return self.cam_shots
def stop(self):
print get_datetime() + ": Stopping camera " + self.camera_name
self.objFTP.stop()
def get_enabled_cameras(config_object, log_object=None):
""" logger_object as argument, check enables cameras and returns list of camera-objects """
import re
cameras = []
all_sections = config_object.sections()
for current_section in all_sections:
camera_id = re.match(r'camera_([0-9]+)', current_section)
if camera_id is not None:
camera_name = config_object.get(current_section, 'name')
camera_enable = config_object.get(current_section, 'enable')
camera_type = config_object.get(current_section, 'type')
camera_ftp_server = config_object.get(current_section, 'ftp_server')
camera_ftp_port = config_object.get(current_section, 'ftp_port')
camera_ftp_user = config_object.get(current_section, 'ftp_user')
camera_ftp_pass = config_object.get(current_section, 'ftp_pass')
camera_ftp_dir = config_object.get(current_section, 'ftp_dir')
# Add camera to the list
if camera_enable == 'true':
cameras.append(Camera(camera_type, camera_name, camera_ftp_server, camera_ftp_port, camera_ftp_user,
camera_ftp_pass, camera_ftp_dir, log_object))
return cameras
def trigger_all_cameras(camera_list):
""" Trigger all cameras and return list of file names """
# Get list of cameras
cameras = camera_list
# Trigger cameras
thread_list = []
camera_files = []
for camera in cameras:
t = threading.Thread(target=camera.trigger)
t.start()
thread_list.append(t)
# Wait for all cameras to complete
for thread in thread_list:
thread.join()
# Get names of files
for camera in cameras:
camera_files += camera.cam_shots
return camera_files
def upload_all_cameras(camera_list):
""" Upload and remove all files from cameras """
for camera in camera_list:
# Enable or disable delete of files
camera.upload_all(True)
def get_sensor_status(obj_db):
""" Generate list of the status of current sensors in database """
device_attr_list = obj_db.select('device_attr', "id > 0")
result_list = []
if device_attr_list is not None:
for item in device_attr_list:
dev = str(item[1])
attr = str(item[2])
status = str(item[3])
date = str(item[4])
result_list.append(date + "\nDev:" + dev + ",Attr:" + attr + "\nStatus:\n" + status)
return result_list
def display_status_lcd(obj_lcd, *args):
""" Display in LCD display status """
# If getSensorStatus is the function, then call it with objDB as argument
if args[0].__name__ == 'getSensorStatus':
messages = args[0](args[1])
elif type(args) is list:
messages = args
else:
messages = [args]
for item in messages:
obj_lcd.text(item, 8)
def check_device_condition(obj_db, arg_device, arg_attr, arg_data):
""" Checks the conditions in database for arg_device, arg_attribute """
if arg_data[0] == '=':
arg_data = arg_data[1:]
result = obj_db.select('device_attr', {'device_id': arg_device, 'attr_id': arg_attr, 'data': arg_data})
elif arg_data[0] == '>' or arg_data[0] == '<':
result = obj_db.select('device_attr',
'device_id = ' + arg_device + ' AND attr_id = ' + arg_attr + ' AND data ' + arg_data[
0] + ' ' + arg_data[1:])
else:
result = obj_db.select('device_attr', {'device_id': arg_device, 'attr_id': arg_attr, 'data': arg_data})
if result:
return True
else:
return False
def send_reset_all_clients(alarm_list, conf):
""" Function to send reset_all to all clients """
import json
ips = json.loads(alarm_settings.get('clients_ip', None))
print get_datetime() + ": Send Reset Sensors - " + str(ips)
if len(ips) > 0:
for ip, port in ips:
trigger_event(str(ip), int(port), 'reset_all_sensors')
result = test_socket(ip, port, log_object)
if result == 1:
for AlarmDev in alarm_list:
if AlarmDev.__class__.__name__ is "Buzzer" and conf.get('alarm_buzzer', 'enable') == 'true':
print get_datetime() + ": Client %s could not be reached" % (ip,)
log_object.log("Client %s could not be reached" % (ip,), 'DEBUG')
AlarmDev.buzz_on(5)
class Alarm:
""" Class that handles alarm notifications """
def __init__(self, obj_config, obj_db, alarm_dev_list, obj_led, cameras):
import json
self.objConfig = obj_config
self.objDB = obj_db
self.AlarmDevList | |
<filename>params.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
lists and dictionaries of column names
mappings of new columns to columns of original survey
mappings of column headers to creative habits
renaming dictionaries
etc.
"""
import pathlib as pl
# paths
wd = pl.Path.cwd()
datapath = wd/"data"
resultspath = wd/"results"
## attribute lists for cleaning columns
# variables from TopN matches to groupby and keep when summarizing top n matches.
topN_groupVars = ['id', 'Habits_All','Habits_Orig','Top_Habits', 'n_habits_all', 'n_habits_orig', 'Cluster_ID', 'Clus_Top_Habits' ] + ['Name']
# final attributes to keep in the best_match summary
bestMatch_finalCols = ['id', 'Name','Cluster_ID', 'Habits_All', 'Habits_Orig','Top_Habits', 'Clus_Top_Habits',
'Habits_unique', 'Habits_Clus_shared',
'n_habits_all', 'n_habits_orig', 'frac_top_habits', 'top_habits_sim','habits_sim',
'n_unique', 'n_shared', 'x_tsne', 'y_tsne']
# 'sortby_strategy', 'count', 'frac_top_'+str(topN) ]
finalCols = ['id','Name', 'Cluster_ID', 'Creative_Species', 'Clus_Percent','Cluster_Affinity', 'Clus_Top_Habits',
'Habits_All', 'Habits_Clus_shared', 'Habits_unique','n_habits_all', 'n_unique', 'n_shared',
'x', 'y',
'Mono Routinus_percent', 'Mono Routinus_top_habits', 'Mono Routinus_affinity',
'Yolo Chaotis_percent', 'Yolo Chaotis_top_habits', 'Yolo Chaotis_affinity',
'Socialis Adventurous_percent','Socialis Adventurous_top_habits','Socialis Adventurous_affinity',
'Focus Mononovous_percent', 'Focus Mononovous_top_habits', 'Focus Mononovous_affinity',
'Novo Gregarious_percent', 'Novo Gregarious_top_habits', 'Novo Gregarious_affinity',
'Sui Inspira_percent', 'Sui Inspira_top_habits', 'Sui Inspira_affinity',
'Solo Noctus_percent','Solo Noctus_top_habits','Solo Noctus_affinity',
'Montasker -- Multitasker','Specialist -- Generalist','Solo Creator -- Collaborator','Self-Critical -- Self-Assured',
'Distractible -- Focused','Inwardly vs Outwardly Inspired', 'Rational -- Intuitive', 'Internally vs Externally Motivated',
'NonKinetic -- Kinetic', 'Controlled Chaos -- Organized', 'Slow -- Fast Paced', 'Pragmastist -- Perfectionist',
'Risk Averse -- Risk Friendly','Make It Happen -- Let It Happen','Tenacious -- Reframer','Private vs Public Workspace',
'Work in Silence vs Noise/Music', 'Urban -- Nature', 'Novetly Seeker -- Creature of Habit', 'Stifled_By vs Stimulated_By Constraints',
'Happy -- Tortured', 'Non-Performer -- Performer', 'Solo-Ideator -- Group-Ideator', 'Consistent -- Inconsistent',
'Creative_Process','Biorhythm']
# list of ordinal columns
orig_OrdCols = ['Montasker -- Multitasker', # 'Monotasker -- Multitasker'
'Specialist -- Generalist',
'Solo Creator -- Collaborator',
'Self-Critical -- Self-Assured',
'Distractible -- Focused', # 'Like Distractions -- Dislike Distractions'
'Inwardly vs Outwardly Inspired', #'Inwardly -- Outwardly Inspired'
'Rational -- Intuitive',
'Internally vs Externally Motivated', # 'Internally -- Externally Motivated'
'NonKinetic -- Kinetic',
'Controlled Chaos -- Organized', # Comforting Mess -- Tidy
'Slow -- Fast Paced', # Slow-Paced -- Fast-Paced',
'Pragmastist -- Perfectionist',
'Risk Averse -- Risk Friendly', # Risk-Averse -- Risk-Friendly
'Make It Happen -- Let It Happen',
'Tenacious -- Reframer',
'Private vs Public Workspace', # 'Private Spaces -- Public Spaces'
'Work in Silence vs Noise/Music', # 'Quiet/Silence -- Noise/Music'
'Urban -- Nature', # 'Nature-Agnostic -- Nature Lover'
'Novetly Seeker -- Creature of Habit', # 'Novely-Seeker -- Routine-Seeker'
'Stifled_By vs Stimulated_By Constraints']
new_OrdCols = ['Happy -- Tortured',
'Non-Performer -- Performer',
'Solo-Ideator -- Group-Ideator',
'Consistent -- Inconsistent' ]
new_CatCols = ['Creative_Process',
'Biorhythm'] # Early Bird vs Night Owl
BiorhythmResponses = {"Early Morning": "Early Bird",
"Late Night" : "Night Owl"}
CreativeProcessResponses = {"Seeing the big picture and defining the problem": "Problem Definer",
"Generating lots of ideas or possible solutions": "Ideator",
"Picking the winning solutions from the options": "Evaluator",
"Executing and getting things done": "Implementer"
}
# column name mapping:
newCol_renameDict = {'#': 'id',
'Please leave your name': 'Name',
# ordinal questions
'What factors are most significant in motivating your creative work?': 'Internally vs Externally Motivated', #'Internally -- Externally Motivated'
'When a significant risk is involved in my creative endeavors...': 'Risk Averse -- Risk Friendly', # Risk-Averse -- Risk-Friendly
'How easy is it for you to do mediocre work if it seems prudent?': 'Pragmastist -- Perfectionist',
'Do you tend be more self-critical or more self-assured in your creative work?':'Self-Critical -- Self-Assured',
'What is the breadth of your creative interests?': 'Specialist -- Generalist',
'What do you regard as your strongest source of creative inspiration?': 'Inwardly vs Outwardly Inspired', #'Inwardly -- Outwardly Inspired'
'How often do you engage in creative collaboration?': 'Solo Creator -- Collaborator',
'Compared to others, how rapidly do you tend to generate new ideas or possible solutions?': 'Slow -- Fast Paced', # Slow-Paced -- Fast-Paced',
'To what degree does your creative work involve the element of chance?': 'Make It Happen -- Let It Happen',
'How do you feel about the role of constraints in your creative process?': 'Stifled_By vs Stimulated_By Constraints',
'On average, how many creative projects do you work on simultaneously?': 'Montasker -- Multitasker', # 'Monotasker -- Multitasker'
"When your initial plans for creative work don't pan out, how quickly do you typically move to Plan B?": 'Tenacious -- Reframer',
'How do you envision and implement your creative projects?': 'Rational -- Intuitive',
'Broadly speaking, how do you feel about the role of distractions in your creative process?': 'Distractible -- Focused', # 'Like Distractions -- Dislike Distractions'
'How important do you think physical exercise and/or movement is to being creative in your work?': 'NonKinetic -- Kinetic',
'How important to your creative process is it that you spend time outdoors in nature?': 'Urban -- Nature', # Nature-Agnostic -- Nature Lover
'How compelled would you be to clean a messy workspace before beginning your creative work?': 'Controlled Chaos -- Organized', # Comforting Mess -- Tidy
'What kind of space is most productive for you when you are working on a creative project?': 'Private vs Public Workspace', # Public Spaces -- Private Spaces
"I'm most creative if my surroundings and routine are...": 'Novetly Seeker -- Creature of Habit', # Novely-Seeker -- Routine-Seeker
'What noise level is most comfortable for you when you are working on a creative project?': 'Work in Silence vs Noise/Music', # 'Quiet/Silence -- Noise/Music'
# new questions
'How do you typically feel while your re creative problem solving or working on a creative project?':'Happy -- Tortured',
'Are you able to be creative when performing for others?': 'Non-Performer -- Performer',
'How do you feel about group brainstorming sessions?': 'Solo-Ideator -- Group-Ideator',
'How much variation is there in the rate at which you do creative work?': 'Consistent -- Inconsistent',
# categorical questions
'If you had to pick one, in which of the following stages of the creative process do you feel most confident?': 'Creative_Process',
'If you could choose, what time of day do you prefer to do creative work?': 'Biorhythm',
}
# dictionary of ordinal column names to endpoint habit tuples
habitDict = {'Montasker -- Multitasker': ("Monotasker", "Multitasker"),
'Specialist -- Generalist': ("Specialist", "Generalist"),
'Solo Creator -- Collaborator': ("Solo Creator", "Collaborator"),
'Self-Critical -- Self-Assured': ("Self-Critical", "Self-Assured"),
'Distractible -- Focused': ("Love Distractions", "Hate Distractions"), # ('Like Distractions', 'Dislike Distractions'),
'Inwardly vs Outwardly Inspired': ("Inwardly Inspired", "Outwardly Inspired"), #'Internally -- Externally Inspired'
'Rational -- Intuitive': ("Rational", "Intuitive"),
'Internally vs Externally Motivated': ("Internally Motivated", "Externally Motivated"), #'Internally -- Externally Motivated'
'NonKinetic -- Kinetic': ("NonKinetic", "Kinetic"),
'Controlled Chaos -- Organized': ("Controlled Chaos", "Organized"), #("Comforting Mess", "Tidy Workspace"),
'Slow -- Fast Paced': ("Slow Paced", "Fast Paced"),
'Pragmastist -- Perfectionist': ("Pragmastist", "Perfectionist"),
'Risk Averse -- Risk Friendly': ("Risk Averse", "Risk Friendly"),
'Make It Happen -- Let It Happen': ("Make It Happen", "Let It Happen"), #("Make It Happen", "Let It Unfold"),
'Tenacious -- Reframer': ("Tenacious", "Reframer"),
'Private vs Public Workspace': ("Private", "Public"), #("Private Spaces", "Public Spaces"),
'Work in Silence vs Noise/Music': ("Silence", "Noise/Music"), #("Quiet/Silence", "Noise/Music"),
'Urban -- Nature': ("Urban", "Nature"), #("Nature-Agnostic", "Nature-Lover"),
'Novetly Seeker -- Creature of Habit': ("Novelty Seeker", "Routine Seeker"), #("Novelty-Seeker", "Routine-Seeker")
'Stifled_By vs Stimulated_By Constraints': ("Stifled By Constraints", "Stimulated By Constraints"),
# new questions
'Happy -- Tortured': ('Happy', 'Tortured') ,
'Non-Performer -- Performer': ('Non-Performer', 'Performer'),
'Solo-Ideator -- Group-Ideator': ('Solo-Ideator', 'Group-Ideator'),
'Consistent -- Inconsistent': ('Consistent', 'Inconsistent'),
}
#########################
#rename columns with updated habit endpoints
ordCol_renameDict = {'Montasker -- Multitasker': 'Monotasker -- Multitasker',
'Distractible -- Focused': "Like Distractions -- Dislike Distractions",
'Inwardly vs Outwardly Inspired': 'Inwardly -- Outwardly Inspired',
'Internally vs Externally Motivated': 'Internally -- Externally Motivated',
'Controlled Chaos -- Organized': "Comforting Mess -- Tidy",
'Slow -- Fast Paced': "Slow-Paced -- Fast-Paced",
'Risk Averse -- Risk Friendly': "Risk-Averse -- Risk-Friendly",
'Stifled_By vs Stimulated_By Constraints': 'Stifled By -- Stimulated By Constraints',
'Private vs Public Workspace': "Private Spaces -- Public Spaces",
'Work in Silence vs Noise/Music': "Silence -- Noise",
'Urban -- Nature': "Nature-Agnostic -- Nature Lover",
'Novetly Seeker -- Creature of Habit': "Novely-Seeker -- | |
import datetime
import json
import os
import time
import requests
import jsonpickle
# Global Variables:
# Used for Url Requests :
Time_Period = 5 * 60 # 5 min in seconds
Requests = 2000 # number of requests
# Today's Date subtracting 3 Months (31days in a month):
last_3month = datetime.date.today() - datetime.timedelta(days=93) # looks like: YYYY-MM-DD
date = datetime.datetime.today().strftime('%Y-%m-%d') # Current date : YYYY-MM-DD
# Api Key To work with...
#api_key = r"Please-Enter-API-Key-Here"
# Creates Directory for each Scanning day
#os.chdir(r"C:\...")
#if not os.path.exists(date):
# os.makedirs(date)
# print("Directory Created!")
#else:
# print("Directory Already Exists!")
# Functions:
def api_key_check(apikey):
url = 'https://api.bitsighttech.com/'
response = requests.get(url, auth=(apikey, ""))
if response.status_code == 401 and str(response.json()["detail"]) == "Invalid token":
api_key = input("Invalid Api key Please Insert Valid Api key\n")
api_key_check(api_key)
else:
print("Api Key is Valid.")
def urltojson(url, apikey):
# Limit of Requests is : 5000 requests per 5 min ( if light traffic)
# or
# 100 requests per 5 min ( if heavy traffic)
time.sleep(Time_Period / Requests) # Rate Limit of 5 min / 80 requests = 3.75 sec to wait each request
response = requests.get(url, auth=(apikey, ""))
if response.status_code == 200:
return response.json()
else: # most of the time will be triggered by response code : 403 (Too Many Requests sent)
return urltojson(url, apikey) # in case of response code other than 200 try again and wait some more
def get_companies(): # Gets all Companies as Json Objects and make Company Objects out of the json
companies_list = []
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/"
json_companies = urltojson(url, api_key)
i = 1
size = len(json_companies["companies"])
for company in json_companies["companies"]:
comp = Company(company["name"], company["guid"], company["rating"])
print(f"({i} | {size}) - ", comp.Name, comp.Score)
i = i + 1
companies_list.append(comp)
except requests.exceptions.RequestException as e:
print(str(e.response))
return companies_list
# Classes:
class Company:
Name = None
Guid = None
Score = None
Diligence = None
Assets = None
def __init__(self, name, guid, rating):
self.Name = name
self.Guid = guid
self.Diligence = Diligence(self)
self.Score = rating
self.Assets = self.get_assets()
# gets all the assets of a company ip/domain name and a list of the resolved ip address in an object form
def get_assets(self):
assets = []
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Guid}/assets?&limit=1000000"
json_obj = urltojson(url, api_key)
if str(json_obj["links"]["next"]) == "None" and int(json_obj["count"]) > 0:
for line in json_obj["results"]:
name = line["asset"]
asset = Asset(name, line["ip_addresses"])
assets.append(asset)
else:
while str(json_obj["links"]["next"]) != "None":
for line in json_obj["results"]:
name = line["asset"]
asset = Asset(name, line["ip_addresses"])
assets.append(asset)
nexturl = str(json_obj["links"]["next"])
if nexturl != "None":
json_obj = urltojson(nexturl, api_key)
if str(json_obj["links"]["next"]) == "None" and int(json_obj["count"]) > 0:
for line in json_obj["results"]:
name = line["asset"]
asset = Asset(name, line["ip_addresses"])
assets.append(asset)
return assets
except requests.exceptions.RequestException as e:
print(e)
return None
class Asset: # object that represents the assets of a company ( ip/domain name and a list of resolved addresses)
AssetName = None
AssetAddress = []
def __init__(self, asset_name, address):
self.AssetName = asset_name
self.AssetAddress = address
class Diligence: # object to hold all Diligence lists by category of the company for easy access
Company = None
CompromisedSystems = []
Spf = []
Dkim = []
SSLConfiguration = []
SSLCertificates = []
OpenPorts = []
WebApplicationHeaders = []
PatchingCadence = []
InsecureSystems = []
ServerSoftware = []
DesktopSoftware = []
DnsSec = []
UserBehavior = []
def __init__(self, company): # TODO: Finish Implementation -> copy paste from spf
self.Company = company
self.CompromisedSystems = self.get_compromised_systems()
self.Spf = self.get_spf_records()
self.Dkim = self.get_dkim()
self.SSLConfiguration = self.get_ssl_configuration()
self.SSLCertificates = self.get_ssl_certificates()
self.OpenPorts = self.get_open_ports()
self.WebApplicationHeaders = self.get_web_application_headers()
self.PatchingCadence = self.get_patching_cadence()
self.InsecureSystems = self.get_insecure_systems()
self.ServerSoftware = self.get_server_software()
self.DesktopSoftware = self.get_desktop_software()
self.DnsSec = self.get_dnssec()
self.UserBehavior = self.get_user_behavior()
def get_botnet_infection(self):
botnet = []
risk_vector = "botnet_infections"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
_type = line["risk_vector_label"]
asset_name = line["evidence_key"]
location = line["details"]["geo_ip_location"]
details = line["details"]["infection"]["family"]
first_seen = line["first_seen"]
last_seen = line["last_seen"]
days = str(line["duration"]).split()[0]
obj = CompromisedSystems(_type, asset_name, location, first_seen, last_seen, days, details)
botnet.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return botnet
def get_potentially_exploited(self):
potentially_exploited = []
risk_vector = "potentially_exploited"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
_type = line["risk_vector_label"]
asset_name = line["evidence_key"]
location = line["details"]["geo_ip_location"]
details = line["details"]["infection"]["family"]
first_seen = line["first_seen"]
last_seen = line["last_seen"]
days = str(line["duration"]).split()[0]
obj = CompromisedSystems(_type, asset_name, location, first_seen, last_seen, days, details)
potentially_exploited.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return potentially_exploited
def get_compromised_systems(self):
compromisedsystems = []
compromisedsystems.extend(self.get_botnet_infection()) # Adds all Botnet objects into compromised
compromisedsystems.extend(self.get_potentially_exploited())
return compromisedsystems
def get_spf_records(self):
spf = []
risk_vector = "spf"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = Spf(first_seen, last_seen, asset_name, grade, details)
spf.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return spf
def get_dkim(self):
dkim = []
risk_vector = "dkim"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = Dkim(first_seen, last_seen, asset_name, grade, details)
dkim.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return dkim
def get_ssl_configuration(self):
sslConfig = []
risk_vector = "ssl_configurations"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = SSLConfiguration(first_seen, last_seen, asset_name, grade, details)
sslConfig.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return sslConfig
def get_ssl_certificates(self):
sslCert = []
risk_vector = "ssl_certificates"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = SSLCertificates(first_seen, last_seen, asset_name, grade, details)
sslCert.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return sslCert
def get_open_ports(self):
open_port = []
risk_vector = "open_ports"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
portnumber = line["details"]["dest_port"]
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = OpenPorts(portnumber, first_seen, last_seen, asset_name, grade, details)
open_port.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return open_port
def get_web_application_headers(self):
web = []
risk_vector = "application_security"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&grade=BAD" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
asset_name = line["evidence_key"]
grade = line["details"]["grade"]
details = []
for message in line["details"]["remediations"]:
detail = message["message"]
details.append(detail)
first_seen = line["first_seen"]
last_seen = line["last_seen"]
obj = WebApplicationHeaders(first_seen, last_seen, asset_name, grade, details)
web.append(obj)
except requests.exceptions.RequestException as e:
print(e.response.text)
return web
def get_patching_cadence(self):
patchingcadence = []
risk_vector = "patching_cadence"
try:
url = f"https://api.bitsighttech.com/ratings/v1/companies/{self.Company.Guid}/findings?" \
f"risk_vector={risk_vector}" \
f"&affects_rating=true" \
f"&last_seen_gt={last_3month}" \
f"&limit=1000000" # limit 1 million records for minimum request number
r = urltojson(url, api_key)
if int(r["count"]) > 0:
for line in r["results"]:
if line["details"]["diligence_annotations"]["is_remediated"] == False:
asset_name = line["evidence_key"]
remediated = line["details"]["diligence_annotations"]["is_remediated"]
details = []
| |
"""
Skrafldb - persistent data management for the Netskrafl application
Copyright (C) 2020 <NAME>.
Author: <NAME>
The GNU General Public License, version 3, applies to this software.
For further information, see https://github.com/mideind/Netskrafl
This module stores data in the Google App Engine NDB
(see https://developers.google.com/appengine/docs/python/ndb/).
The data model is as follows:
UserModel:
nickname : string
inactive : boolean
prefs : dict
timestamp : timestamp
MoveModel:
coord : string
tiles : string # Blanks are denoted by '?' followed by meaning
score : integer
rack : string # Contents of rack after move
timestamp : timestamp
GameModel:
player0 : key into UserModel
player1 : key into UserModel
irack0 : string # Initial rack
irack1 : string
rack0 : string # Current rack
rack1 : string
score0 : integer
score1 : integer
to_move : integer # Whose move is it, 0 or 1
over : boolean # Is the game over?
timestamp : timestamp # Start time of game
ts_last_move : timestamp # Time of last move
moves : array of MoveModel
FavoriteModel:
parent = key into UserModel
destuser: key into UserModel
ChallengeModel:
parent = key into UserModel
destuser : key into UserModel
timestamp : timestamp
prefs : dict
According to the NDB documentation, an ideal index for a query
should contain - in the order given:
1) Properties used in equality filters
2) Property used in an inequality filter (only one allowed)
3) Properties used for ordering
"""
# pylint: disable=too-many-lines
import logging
import uuid
from datetime import datetime
# The following is a hack/workaround for a Google bug
# import six; reload(six)
from google.cloud import ndb
from languages import Alphabet
from cache import memcache
class Client:
""" Wrapper for the ndb client instance singleton """
_client = ndb.Client()
_global_cache = ndb.RedisCache(memcache.get_redis_client())
def __init__(self):
pass
@classmethod
def get_context(cls):
""" Return the ndb client instance singleton """
return cls._client.context(global_cache=cls._global_cache)
class Context:
""" Wrapper for NDB context operations """
def __init__(self):
pass
@staticmethod
def disable_cache():
""" Disable the NDB in-context cache """
ndb.get_context().set_cache_policy(False)
class Unique:
""" Wrapper for generation of unique id strings for keys """
def __init__(self):
pass
@staticmethod
def id():
""" Generates unique id strings """
return str(uuid.uuid1()) # Random UUID
def iter_q(q, chunk_size=50, limit=0, projection=None):
""" Generator for iterating through a query using a cursor """
if 0 < limit < chunk_size:
# Don't fetch more than we want
chunk_size = limit
items, next_cursor, more = q.fetch_page(chunk_size, projection=projection)
count = 0
while items:
for item in items:
yield item
count += 1
if limit and count >= limit:
# A limit was set and we'we reached it: stop
return
if not more or not next_cursor:
# The query is exhausted: stop
return
# Get the next chunk
items, next_cursor, more = q.fetch_page(
chunk_size,
start_cursor=next_cursor,
projection=projection
)
class UserModel(ndb.Model):
""" Models an individual user """
nickname = ndb.StringProperty()
email = ndb.StringProperty(required=False, default=None)
image = ndb.StringProperty(required=False, default=None)
# Google Account identifier (unfortunately different from GAE user id)
account = ndb.StringProperty(required=False, default=None)
# Lower case nickname and full name of user - used for search
nick_lc = ndb.StringProperty(required=False, default=None)
name_lc = ndb.StringProperty(required=False, default=None)
inactive = ndb.BooleanProperty()
prefs = ndb.JsonProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
# Ready for challenges?
ready = ndb.BooleanProperty(required=False, default=False)
# Ready for timed challenges?
ready_timed = ndb.BooleanProperty(required=False, default=False)
# Elo points
elo = ndb.IntegerProperty(required=False, default=0, indexed=True)
# Elo points for human-only games
human_elo = ndb.IntegerProperty(required=False, default=0, indexed=True)
# Best total score in a game
highest_score = ndb.IntegerProperty(
required=False, default=0, indexed=True)
# Note: indexing of string properties is mandatory
highest_score_game = ndb.StringProperty(required=False, default=None)
# Best word laid down
# Note: indexing of string properties is mandatory
best_word = ndb.StringProperty(required=False, default=None)
best_word_score = ndb.IntegerProperty(
required=False, default=0, indexed=True)
# Note: indexing of string properties is mandatory
best_word_game = ndb.StringProperty(required=False, default=None)
@classmethod
def create(cls, user_id, account, email, nickname, image, preferences=None):
""" Create a new user """
user = cls(id=user_id)
user.image = image
user.account = account
user.email = email
user.nickname = nickname # Default to the same nickname
user.nick_lc = nickname.lower()
user.inactive = False # A new user is always active
user.prefs = preferences or {} # Default to no preferences
user.ready = False # Not ready for new challenges unless explicitly set
user.ready_timed = False # Not ready for timed games unless explicitly set
return user.put().id()
@classmethod
def fetch(cls, user_id):
""" Fetch a user entity by id """
return cls.get_by_id(user_id, use_cache=False, use_global_cache=False)
@classmethod
def fetch_account(cls, account):
""" Attempt to fetch a user by Google account id """
q = cls.query(UserModel.account == account)
return q.get()
@classmethod
def fetch_email(cls, email):
""" Attempt to fetch a user by email """
if not email:
return None
q = cls.query(UserModel.email == email.lower())
result = q.fetch()
if not result:
return None
# If multiple user records have the same email, return the newest one
# - but try to keep user records with elo==0 out of the picture
return sorted(result, key=lambda u: (u.elo > 0, u.timestamp), reverse=True)[0]
@classmethod
def fetch_multi(cls, user_ids):
""" Fetch multiple user entities by id list """
# Google NDB/RPC doesn't allow more than 1000 entities per get_multi() call
MAX_CHUNK = 1000
result = []
ix = 0
user_ids = list(user_ids)
end = len(user_ids)
while ix < end:
keys = [ndb.Key(UserModel, uid)
for uid in user_ids[ix: ix + MAX_CHUNK]]
len_keys = len(keys)
if ix == 0 and len_keys == end:
# Most common case: just a single, complete read
return ndb.get_multi(keys)
# Otherwise, accumulate chunks
result.extend(ndb.get_multi(keys))
ix += len_keys
return result
@staticmethod
def put_multi(recs):
""" Insert or update multiple user records """
ndb.put_multi(recs)
@classmethod
def count(cls):
""" Return a count of user entities """
# Beware: this seems to be EXTREMELY slow on Google Cloud Datastore
return cls.query().count()
@classmethod
def list(cls, nick_from, nick_to, max_len=100):
""" Query for a list of users within a nickname range """
nick_from = u"a" if nick_from is None else Alphabet.tolower(nick_from)
nick_to = u"ö" if nick_to is None else Alphabet.tolower(nick_to)
try:
o_from = Alphabet.full_order.index(nick_from[0])
except ValueError:
o_from = 0
try:
o_to = Alphabet.full_order.index(nick_to[0])
except ValueError:
o_to = len(Alphabet.full_order) - 1
# We do this by issuing a series of queries, each returning
# nicknames beginning with a particular letter.
# These shenanigans are necessary because NDB maintains its string
# indexes by Unicode ordinal index, which is quite different from
# the actual sort collation order we need. Additionally, the
# indexes are case-sensitive while our query boundaries are not.
# Prepare the list of query letters
q_letters = []
for i in range(o_from, o_to + 1):
# Append the lower case letter
q_letters.append(Alphabet.full_order[i])
# Append the upper case letter
q_letters.append(Alphabet.full_upper[i])
# For aesthetic cleanliness, sort the query letters (in Unicode order)
q_letters.sort()
count = 0
for q_from in q_letters:
q_to = unichr(ord(q_from) + 1)
q = cls.query(
ndb.AND(UserModel.nickname >= q_from,
UserModel.nickname < q_to)
)
# Individual letters contain >600 users as of 2015-02-12
CHUNK_SIZE = 1000
for um in iter_q(q, chunk_size=CHUNK_SIZE):
if not um.inactive:
# This entity matches: return a dict describing it
yield dict(
id=um.key.id(),
nickname=um.nickname,
prefs=um.prefs,
timestamp=um.timestamp,
ready=um.ready,
ready_timed=um.ready_timed,
human_elo=um.human_elo
)
count += 1
if max_len and count >= max_len:
# Reached limit: done
return
@classmethod
def list_prefix(cls, prefix, max_len=50):
""" Query for a list of users having a name or nick with the given prefix """
if not prefix:
# No prefix means nothing is returned
return
prefix = prefix.lower()
id_set = set()
def list_q(q, f):
""" Yield the results of a user query """
CHUNK_SIZE = 50
for um in iter_q(q, chunk_size=CHUNK_SIZE):
if not f(um).startswith(prefix):
# Iterated past the prefix
return
if not um.inactive and not um.key.id() in id_set:
# This entity matches and has not already been
# returned: yield a dict describing it
yield dict(
id=um.key.id(),
nickname=um.nickname,
prefs=um.prefs,
timestamp=um.timestamp,
ready=um.ready,
ready_timed=um.ready_timed,
human_elo=um.human_elo,
image=um.image
)
id_set.add(um.key.id())
counter = 0
# Return users with nicknames matching the prefix
q = cls.query(UserModel.nick_lc >= prefix).order(UserModel.nick_lc)
for ud in list_q(q, lambda um: um.nick_lc or ""):
yield ud
counter += | |
# -*- coding: utf-8 -*-
import datetime
import os
import mock
from django.contrib.auth.models import User
from django.test import TestCase
from django_dynamic_fixture import fixture, get
from django.utils import timezone
from allauth.socialaccount.models import SocialAccount
from readthedocs.builds.constants import (
BRANCH,
EXTERNAL,
GITHUB_EXTERNAL_VERSION_NAME,
GENERIC_EXTERNAL_VERSION_NAME
)
from readthedocs.builds.models import Build, Version
from readthedocs.doc_builder.config import load_yaml_config
from readthedocs.doc_builder.environments import LocalBuildEnvironment
from readthedocs.doc_builder.python_environments import Virtualenv
from readthedocs.oauth.models import RemoteRepository
from readthedocs.projects.models import EnvironmentVariable, Project
from readthedocs.projects.tasks import UpdateDocsTaskStep
from readthedocs.rtd_tests.tests.test_config_integration import create_load
from ..mocks.environment import EnvironmentMockGroup
class BuildEnvironmentTests(TestCase):
def setUp(self):
self.mocks = EnvironmentMockGroup()
self.mocks.start()
def tearDown(self):
self.mocks.stop()
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build(self, load_config):
"""Test full build."""
load_config.side_effect = create_load()
project = get(
Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
versions=[fixture()],
)
version = project.versions.all()[0]
self.mocks.configure_mock('api_versions', {'return_value': [version]})
self.mocks.configure_mock(
'api', {
'get.return_value': {'downloads': 'no_url_here'},
},
)
self.mocks.patches['html_build'].stop()
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
task.build_docs()
# Get command and check first part of command list is a call to sphinx
self.assertEqual(self.mocks.popen.call_count, 3)
cmd = self.mocks.popen.call_args_list[2][0]
self.assertRegex(cmd[0][0], r'python')
self.assertRegex(cmd[0][1], r'sphinx-build')
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build_respects_pdf_flag(self, load_config):
"""Build output format control."""
load_config.side_effect = create_load()
project = get(
Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
enable_pdf_build=True,
enable_epub_build=False,
versions=[fixture()],
)
version = project.versions.all()[0]
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
task.build_docs()
# The HTML and the Epub format were built.
self.mocks.html_build.assert_called_once_with()
self.mocks.pdf_build.assert_called_once_with()
# PDF however was disabled and therefore not built.
self.assertFalse(self.mocks.epub_build.called)
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_dont_localmedia_build_pdf_epub_search_in_mkdocs(self, load_config):
load_config.side_effect = create_load()
project = get(
Project,
slug='project-1',
documentation_type='mkdocs',
enable_pdf_build=True,
enable_epub_build=True,
versions=[fixture()],
)
version = project.versions.all().first()
build_env = LocalBuildEnvironment(
project=project,
version=version,
build={},
)
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
task.build_docs()
# Only html for mkdocs was built
self.mocks.html_build_mkdocs.assert_called_once()
self.mocks.html_build.assert_not_called()
self.mocks.localmedia_build.assert_not_called()
self.mocks.pdf_build.assert_not_called()
self.mocks.epub_build.assert_not_called()
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build_respects_epub_flag(self, load_config):
"""Test build with epub enabled."""
load_config.side_effect = create_load()
project = get(
Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
enable_pdf_build=False,
enable_epub_build=True,
versions=[fixture()],
)
version = project.versions.all()[0]
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
task.build_docs()
# The HTML and the Epub format were built.
self.mocks.html_build.assert_called_once_with()
self.mocks.epub_build.assert_called_once_with()
# PDF however was disabled and therefore not built.
self.assertFalse(self.mocks.pdf_build.called)
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build_respects_yaml(self, load_config):
"""Test YAML build options."""
load_config.side_effect = create_load({'formats': ['epub']})
project = get(
Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
enable_pdf_build=False,
enable_epub_build=False,
versions=[fixture()],
)
version = project.versions.all()[0]
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
task.build_docs()
# The HTML and the Epub format were built.
self.mocks.html_build.assert_called_once_with()
self.mocks.epub_build.assert_called_once_with()
# PDF however was disabled and therefore not built.
self.assertFalse(self.mocks.pdf_build.called)
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build_pdf_latex_failures(self, load_config):
"""Build failure if latex fails."""
load_config.side_effect = create_load()
self.mocks.patches['html_build'].stop()
self.mocks.patches['pdf_build'].stop()
project = get(
Project,
slug='project-1',
documentation_type='sphinx',
conf_py_file='test_conf.py',
enable_pdf_build=True,
enable_epub_build=False,
versions=[fixture()],
)
version = project.versions.all()[0]
assert project.conf_dir() == '/tmp/rtd'
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
# Mock out the separate calls to Popen using an iterable side_effect
returns = [
((b'', b''), 0), # sphinx-build html
((b'', b''), 0), # sphinx-build pdf
((b'', b''), 1), # sphinx version check
((b'', b''), 1), # latex
((b'', b''), 0), # makeindex
((b'', b''), 0), # latex
]
mock_obj = mock.Mock()
mock_obj.communicate.side_effect = [
output for (output, status)
in returns
]
type(mock_obj).returncode = mock.PropertyMock(
side_effect=[status for (output, status) in returns],
)
self.mocks.popen.return_value = mock_obj
with build_env:
task.build_docs()
self.assertEqual(self.mocks.popen.call_count, 8)
self.assertTrue(build_env.failed)
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_build_pdf_latex_not_failure(self, load_config):
"""Test pass during PDF builds and bad latex failure status code."""
load_config.side_effect = create_load()
self.mocks.patches['html_build'].stop()
self.mocks.patches['pdf_build'].stop()
project = get(
Project,
slug='project-2',
documentation_type='sphinx',
conf_py_file='test_conf.py',
enable_pdf_build=True,
enable_epub_build=False,
versions=[fixture()],
)
version = project.versions.all()[0]
assert project.conf_dir() == '/tmp/rtd'
build_env = LocalBuildEnvironment(project=project, version=version, build={})
python_env = Virtualenv(version=version, build_env=build_env)
config = load_yaml_config(version)
task = UpdateDocsTaskStep(
build_env=build_env, project=project, python_env=python_env,
version=version, config=config,
)
# Mock out the separate calls to Popen using an iterable side_effect
returns = [
((b'', b''), 0), # sphinx-build html
((b'', b''), 0), # sphinx-build pdf
((b'', b''), 1), # sphinx version check
((b'Output written on foo.pdf', b''), 1), # latex
((b'', b''), 0), # makeindex
((b'', b''), 0), # latex
]
mock_obj = mock.Mock()
mock_obj.communicate.side_effect = [
output for (output, status)
in returns
]
type(mock_obj).returncode = mock.PropertyMock(
side_effect=[status for (output, status) in returns],
)
self.mocks.popen.return_value = mock_obj
with build_env:
task.build_docs()
self.assertEqual(self.mocks.popen.call_count, 8)
self.assertTrue(build_env.successful)
@mock.patch('readthedocs.projects.tasks.api_v2')
@mock.patch('readthedocs.doc_builder.config.load_config')
def test_save_config_in_build_model(self, load_config, api_v2):
load_config.side_effect = create_load()
api_v2.build.get.return_value = {}
project = get(
Project,
slug='project',
documentation_type='sphinx',
)
build = get(Build)
version = get(Version, slug='1.8', project=project)
task = UpdateDocsTaskStep(
project=project, version=version, build={'id': build.pk},
)
task.setup_vcs = mock.Mock()
task.run_setup()
build_config = task.build['config']
# For patch
api_v2.build.assert_called_once()
assert build_config['version'] == '1'
assert 'sphinx' in build_config
assert build_config['doctype'] == 'sphinx'
def test_get_env_vars(self):
project = get(
Project,
slug='project',
documentation_type='sphinx',
)
get(
EnvironmentVariable,
name='TOKEN',
value='<PASSWORD>',
project=project,
)
build = get(Build)
version = get(Version, slug='1.8', project=project)
task = UpdateDocsTaskStep(
project=project, version=version, build={'id': build.pk},
)
# mock this object to make sure that we are NOT in a conda env
task.config = mock.Mock(conda=None)
env = {
'READTHEDOCS': True,
'READTHEDOCS_VERSION': version.slug,
'READTHEDOCS_PROJECT': project.slug,
'READTHEDOCS_LANGUAGE': project.language,
'BIN_PATH': os.path.join(
project.doc_path,
'envs',
version.slug,
'bin',
),
'TOKEN': '<PASSWORD>',
}
self.assertEqual(task.get_env_vars(), env)
# mock this object to make sure that we are in a conda env
task.config = mock.Mock(conda=True)
env.update({
'CONDA_ENVS_PATH': os.path.join(project.doc_path, 'conda'),
'CONDA_DEFAULT_ENV': version.slug,
'BIN_PATH': os.path.join(
project.doc_path,
'conda',
version.slug,
'bin',
),
})
self.assertEqual(task.get_env_vars(), env)
class BuildModelTests(TestCase):
fixtures = ['test_data']
def setUp(self):
self.eric = User(username='eric')
self.eric.set_password('<PASSWORD>')
self.eric.save()
self.project = get(Project)
self.project.users.add(self.eric)
self.version = get(Version, project=self.project)
self.pip = Project.objects.get(slug='pip')
self.external_version = get(
Version,
identifier='9F86D081884C7D659A2FEAA0C55AD015A',
verbose_name='9999',
slug='pr-9999',
project=self.pip,
active=True,
type=EXTERNAL
)
self.pip_version = get(
Version,
identifier='origin/stable',
verbose_name='stable',
slug='stable',
project=self.pip,
active=True,
type=BRANCH
)
def test_get_previous_build(self):
build_one = get(
Build,
project=self.project,
version=self.version,
config={'version': 1},
)
build_two = get(
Build,
project=self.project,
version=self.version,
config={'version': 2},
)
build_three = get(
Build,
project=self.project,
version=self.version,
config={'version': 3},
success=False,
)
self.assertIsNone(build_one.previous)
self.assertEqual(build_two.previous, build_one)
self.assertEqual(build_three.previous, build_two)
self.assertEqual(build_three.previous.previous, build_one)
def test_normal_save_config(self):
build = get(
Build,
project=self.project,
version=self.version,
)
build.config = {'version': 1}
build.save()
self.assertEqual(build.config, {'version': 1})
build.config = {'version': 2}
build.save()
self.assertEqual(build.config, {'version': 2})
def test_save_same_config(self):
build_one = get(
Build,
project=self.project,
version=self.version,
)
build_one.config = {}
build_one.save()
build_two = get(
Build,
project=self.project,
version=self.version,
)
build_two.config = {'version': 2}
build_two.save()
self.assertEqual(build_two.config, {'version': 2})
def test_save_same_config_previous_empty(self):
build_one = get(
Build,
project=self.project,
version=self.version,
)
build_one.config = {}
build_one.save()
build_two = get(
Build,
project=self.project,
version=self.version,
)
build_two.config = {}
build_two.save()
self.assertEqual(build_two.config, {})
build_two.config = {'version': 2}
build_two.save()
self.assertEqual(build_two.config, {'version': 2})
def test_do_not_save_same_config(self):
build_one = get(
Build,
project=self.project,
version=self.version,
)
build_one.config = {'version': 1}
build_one.save()
build_two = get(
Build,
project=self.project,
version=self.version,
)
build_two.config = {'version': 1}
build_two.save()
self.assertEqual(build_two._config, {Build.CONFIG_KEY: build_one.pk})
self.assertEqual(build_two.config, {'version': 1})
def test_do_not_save_same_config_nested(self):
build_one = get(
Build,
project=self.project,
version=self.version,
)
build_one.config = {'version': 1}
build_one.save()
build_two = get(
Build,
project=self.project,
version=self.version,
)
build_two.config = {'version': 1}
build_two.save()
build_three = get(
Build,
project=self.project,
version=self.version,
)
build_three.config = {'version': 1}
build_three.save()
build_four = get(
Build,
project=self.project,
version=self.version,
)
build_four.config = {'version': 2}
build_four.save()
self.assertEqual(build_one.config, {'version': 1})
self.assertEqual(build_one._config, {'version': 1})
self.assertEqual(build_two._config, {Build.CONFIG_KEY: build_one.pk})
self.assertEqual(build_three._config, {Build.CONFIG_KEY: build_one.pk})
self.assertEqual(build_two.config, {'version': 1})
self.assertEqual(build_three.config, {'version': 1})
self.assertEqual(build_four.config, {'version': 2})
self.assertEqual(build_four._config, {'version': 2})
def test_do_not_reference_empty_configs(self):
build_one = get(
Build,
project=self.project,
version=self.version,
)
build_one.config = {}
build_one.save()
build_two = get(
Build,
project=self.project,
version=self.version,
)
build_two.config = {}
build_two.save()
self.assertEqual(build_two._config, {})
self.assertEqual(build_two.config, {})
def test_build_is_stale(self):
now = timezone.now()
build_one = get(
Build,
project=self.project,
version=self.version,
date=now - datetime.timedelta(minutes=8),
state='finished'
)
build_two = get(
Build,
project=self.project,
version=self.version,
date=now - datetime.timedelta(minutes=6),
state='triggered'
)
build_three = get(
Build,
project=self.project,
version=self.version,
date=now - datetime.timedelta(minutes=2),
state='triggered'
)
self.assertFalse(build_one.is_stale)
self.assertTrue(build_two.is_stale)
self.assertFalse(build_three.is_stale)
def test_using_latest_config(self):
now = timezone.now()
build = get(
Build,
project=self.project,
version=self.version,
date=now - datetime.timedelta(minutes=8),
state='finished',
)
self.assertFalse(build.using_latest_config())
build.config = {'version': 2}
build.save()
self.assertTrue(build.using_latest_config())
def test_build_is_external(self):
# Turn the build version to EXTERNAL type.
self.version.type = EXTERNAL
self.version.save()
external_build = get(
Build,
project=self.project,
version=self.version,
config={'version': 1},
)
self.assertTrue(external_build.is_external)
def test_build_is_not_external(self):
build = get(
Build,
project=self.project,
version=self.version,
config={'version': 1},
)
self.assertFalse(build.is_external)
def test_no_external_version_name(self):
build = get(
Build,
project=self.project,
version=self.version,
config={'version': 1},
)
self.assertEqual(build.external_version_name, None)
def test_external_version_name_github(self):
social_account = get(SocialAccount, provider='github')
remote_repo = get(
RemoteRepository,
account=social_account,
| |
#!/usr/bin/env python3
#
# Copyright (c) 2020-2021 Couchbase, Inc All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import glob
import os
from os import path
import time
import json
import datetime
import re
import webbrowser
import logging
import sys
import zipfile
import pathlib
# local imports
import util
import templating
import dashboard
PROMETHEUS_BIN = 'prometheus'
PROMTIMER_DIR = '.promtimer'
PROMTIMER_LOGS_DIR = path.join(PROMTIMER_DIR, 'logs')
GRAFANA_BIN = 'grafana-server'
STATS_SNAPSHOT_DIR_NAME = 'stats_snapshot'
COUCHBASE_LOG = 'couchbase.log'
def make_snapshot_dir_path(candidate_cbcollect_dir):
"""
Returns a path representing the 'stats_snapshot' directory in
candidate_cbcollect_dir.
:type candidate_cbcollect_dir: pathlib.Path
:rtype: pathlib.Path
"""
return candidate_cbcollect_dir / '{}'.format(STATS_SNAPSHOT_DIR_NAME)
def snapshot_dir_exists(candidate_cbcollect_dir):
"""
Returns whether or not the 'stats_snapshot' directory inside
candidate_cbcollect_dir exists.
:type candidate_cbcollect_dir: ,lib.Path
"""
return make_snapshot_dir_path(candidate_cbcollect_dir).exists()
def is_cbcollect_dir(candidate_path):
"""
Returns a guess as to whether candidate_path represents a
cbcollect directory by checking whether the 'stats_snapshot' directory exists
inside it.
:type candidate_path: pathlib.Path
"""
return candidate_path.is_dir() and snapshot_dir_exists(candidate_path)
def is_executable_file(candidate_file):
return os.path.isfile(candidate_file) and os.access(candidate_file, os.X_OK)
def find_cbcollect_dirs():
cbcollects = sorted(glob.glob('cbcollect_info*'))
return [f for f in cbcollects if is_cbcollect_dir(pathlib.Path(f))]
def is_stats_snapshot_file(filename):
"""
Returns whether filename contains 'stats_snapshot' (and thus is a file we
probably want to extract from a cbcollect zip).
:type filename: string
:rtype: bool
"""
return filename.find('/{}/'.format(STATS_SNAPSHOT_DIR_NAME)) >= 0
def maybe_extract_from_zipfile(zip_file):
"""
Extract files needed for Promtimer to run if necessary. Files needed by Promtimer are:
* everything under the stats_snapshot directory; nothing is extracted if the
stats_snapshot directory is already present
* couchbase.log: extracted if not present
"""
root = zipfile.Path(zip_file)
for p in root.iterdir():
if is_cbcollect_dir(p):
stats_snapshot_exists = snapshot_dir_exists(pathlib.Path(p.name))
logging.debug("{}/stats_snapshot exists: {}".format(p.name, stats_snapshot_exists))
extracting = False
for item in zip_file.infolist():
item_path = path.join(*item.filename.split('/'))
should_extract = False
if is_stats_snapshot_file(item.filename):
should_extract = not stats_snapshot_exists
elif item.filename.endswith(COUCHBASE_LOG):
should_extract = not path.exists(item_path)
if should_extract:
logging.debug("zipfile item:{}, exists:{}".format(item_path, path.exists(item_path)))
if not extracting:
extracting = True
logging.info('extracting stats, couchbase.log from cbcollect zip:{}'
.format(zip_file.filename))
zip_file.extract(item)
def get_cbcollect_dirs():
zips = sorted(glob.glob('*.zip'))
for z in zips:
with zipfile.ZipFile(z) as zip_file:
maybe_extract_from_zipfile(zip_file)
return find_cbcollect_dirs()
def get_prometheus_times(cbcollect_dir):
min_times = []
max_times = []
meta_files = glob.glob(path.join(cbcollect_dir, 'stats_snapshot', '*', 'meta.json'))
for meta_file in meta_files:
with open(meta_file, 'r') as file:
meta = json.loads(file.read())
min_times.append(meta['minTime'])
max_times.append(meta['maxTime'])
return min(min_times), max(max_times)
def get_prometheus_min_and_max_times(cbcollects):
times = [get_prometheus_times(c) for c in cbcollects]
return min([t[0] for t in times]), max([t[1] for t in times])
def start_prometheuses(cbcollects, base_port, log_dir):
nodes = []
for i, cbcollect in enumerate(cbcollects):
log_path = path.join(log_dir, 'prom-{}.log'.format(i))
listen_addr = '0.0.0.0:{}'.format(base_port + i)
args = [PROMETHEUS_BIN,
'--config.file', path.join(util.get_root_dir(), 'noscrape.yml'),
'--storage.tsdb.path', path.join(cbcollect, 'stats_snapshot'),
'--storage.tsdb.no-lockfile',
'--storage.tsdb.retention.time', '10y',
'--web.listen-address', listen_addr]
logging.info('starting prometheus server {} (on {} against {}; logging to {})'
.format(i, listen_addr, path.join(cbcollect, 'stats_snapshot'),
log_path))
node = util.start_process(args, log_path)
nodes.append(node)
return nodes
def get_data_source_template():
with open(path.join(util.get_root_dir(), 'data-source.yaml'), 'r') as file:
return file.read()
def get_provisioning_dir():
return path.join(PROMTIMER_DIR, 'provisioning')
def get_dashboards_dir():
return path.join(get_provisioning_dir(), 'dashboards')
def get_plugins_dir():
return path.join(get_provisioning_dir(), 'plugins')
def get_notifiers_dir():
return path.join(get_provisioning_dir(), 'notifiers')
def get_custom_ini_template():
with open(path.join(util.get_root_dir(), 'custom.ini'), 'r') as file:
return file.read()
def get_home_dashboard():
with open(path.join(util.get_root_dir(), 'home.json'), 'r') as file:
return file.read()
def make_custom_ini(grafana_http_port):
os.makedirs(PROMTIMER_DIR, exist_ok=True)
replacements = {'absolute-path-to-cwd': os.path.abspath('.'),
'grafana-http-port': str(grafana_http_port)}
template = get_custom_ini_template()
contents = templating.replace(template, replacements)
with open(path.join(PROMTIMER_DIR, 'custom.ini'), 'w') as file:
file.write(contents)
def make_home_dashboard():
dash = get_home_dashboard()
with open(path.join(PROMTIMER_DIR, 'home.json'), 'w') as file:
file.write(dash)
def make_dashboards_yaml():
os.makedirs(get_dashboards_dir(), exist_ok=True)
with open(path.join(util.get_root_dir(), 'dashboards.yaml'), 'r') as file:
replacements = {'absolute-path-to-cwd': os.path.abspath('.')}
contents = templating.replace(file.read(), replacements)
with open(path.join(get_dashboards_dir(), 'dashboards.yaml'), 'w') as file_to_write:
file_to_write.write(contents)
def make_dashboards(data_sources, buckets, times):
os.makedirs(get_dashboards_dir(), exist_ok=True)
min_time = datetime.datetime.fromtimestamp(times[0] / 1000.0)
max_time = datetime.datetime.fromtimestamp(times[1] / 1000.0)
template_params = \
[{'type': 'data-source-name', 'values': data_sources},
{'type': 'bucket', 'values': buckets}]
meta_file_names = glob.glob(path.join(util.get_root_dir(), 'dashboards', '*.json'))
for meta_file_name in meta_file_names:
with open(meta_file_name, 'r') as meta_file:
meta = json.loads(meta_file.read())
base_file_name = path.basename(meta_file_name)
dash = dashboard.make_dashboard(meta, template_params, min_time, max_time)
dash['uid'] = base_file_name[:-len('.json')]
with open(path.join(get_dashboards_dir(), base_file_name), 'w') as file:
file.write(json.dumps(dash, indent=2))
def make_data_sources(data_sources_names, base_port):
datasources_dir = path.join(get_provisioning_dir(), 'datasources')
os.makedirs(datasources_dir, exist_ok=True)
template = get_data_source_template()
for i, data_source_name in enumerate(data_sources_names):
data_source_name = data_sources_names[i]
replacement_map = {'data-source-name': data_source_name,
'data-source-port' : str(base_port + i)}
filename = path.join(datasources_dir, 'ds-{}.yaml'.format(data_source_name))
with open(filename, 'w') as file:
file.write(templating.replace(template, replacement_map))
def try_get_data_source_names(cbcollect_dirs, pattern, name_format):
data_sources = []
for cbcollect in cbcollect_dirs:
m = re.match(pattern, cbcollect)
name = cbcollect
if m:
name = name_format.format(*m.groups())
data_sources.append(name)
if len(set(data_sources)) == len(data_sources):
return data_sources
return None
def get_data_source_names(cbcollect_dirs):
regex = re.compile('cbcollect_info_ns_(\d+)\@(.*)_(\d+)-(\d+)')
formats = ['{1}', 'ns_{0}@{1}', '{1}-{2}-{3}', 'ns_{0}-{1}-{2}-{3}']
for fmt in formats:
result = try_get_data_source_names(cbcollect_dirs, regex, fmt)
if result:
return result
return cbcollect_dirs
def prepare_grafana(grafana_port, prometheus_base_port, cbcollect_dirs, buckets, times):
os.makedirs(PROMTIMER_DIR, exist_ok=True)
os.makedirs(PROMTIMER_LOGS_DIR, exist_ok=True)
os.makedirs(get_dashboards_dir(), exist_ok=True)
os.makedirs(get_plugins_dir(), exist_ok=True)
os.makedirs(get_notifiers_dir(), exist_ok=True)
data_sources = get_data_source_names(cbcollect_dirs)
make_custom_ini(grafana_port)
make_home_dashboard()
make_data_sources(data_sources, prometheus_base_port)
make_dashboards_yaml()
make_dashboards(data_sources, buckets, times)
def start_grafana(grafana_home_path, grafana_port):
args = [GRAFANA_BIN,
'--homepath', grafana_home_path,
'--config','custom.ini']
log_path = path.join(PROMTIMER_DIR, 'logs/grafana.log')
logging.info('starting grafana server (on localhost:{}; logging to {})'
.format(grafana_port, log_path))
# Don't specify a log file as it is done within the custom.ini file
# otherwise the output is duplicated.
return util.start_process(args, None, PROMTIMER_DIR)
def open_browser(grafana_http_port):
url = 'http://localhost:{}/dashboards'.format(grafana_http_port)
# Helpful for those who accidently close the browser
logging.info('starting browser using {}'.format(url))
try:
# For some reason this sometimes throws an OSError with no
# apparent side-effects. Probably related to forking processes
webbrowser.open_new(url)
except OSError:
logging.error("Hit `OSError` opening web browser")
pass
def parse_couchbase_ns_config(cbcollect_dir):
logging.debug('parsing couchbase.log (Couchbase config)')
in_config = False
in_buckets = False
buckets = []
section_divider_count = 0
with open(path.join(cbcollect_dir, 'couchbase.log'), "r") as file:
for full_line in file:
line = full_line.rstrip()
config_line = 'Couchbase config'
if not in_config and line.rstrip() == config_line:
in_config = True
elif in_config:
if line.strip().startswith('=================='):
section_divider_count += 1
if section_divider_count == 2:
break
if not in_buckets and line == ' {buckets,':
in_buckets = True
elif in_buckets:
if re.match('^ \{.*,$', line):
break
else:
m = re.match('^ [ \[]\{\"(.*)\",$', line)
if m:
bucket = m.groups()[0]
logging.debug('found bucket:{}'.format(bucket))
buckets.append(bucket)
return {'buckets': sorted(buckets)}
def parse_couchbase_chronicle_older_version(cbcollect_dir):
logging.debug('parsing couchbase.log (Chronicle config)')
in_config = False
in_buckets = False
bucket_list = ''
with open(path.join(cbcollect_dir, 'couchbase.log'), 'r') as file:
for full_line in file:
line = full_line.rstrip()
if not in_config and line == 'Chronicle config':
in_config = True
elif in_config:
# Names of bucket can be on a single or multiple lines
end_of_list = False
possible_buckets = ''
if not in_buckets:
if line.startswith(' {bucket_names,'):
in_buckets = True
possible_buckets = line.replace(' {bucket_names,[', '')
elif in_buckets:
possible_buckets = line
if possible_buckets != '':
if possible_buckets.endswith(']},'):
possible_buckets = possible_buckets[:-3]
end_of_list = True
bucket_list += possible_buckets
if end_of_list:
break
buckets = []
if bucket_list != '':
for b in bucket_list.replace(' ','').replace('"','').split(','):
buckets.append(b)
return {'buckets': sorted(buckets)}
def parse_couchbase_chronicle(cbcollect_dir):
logging.debug('parsing couchbase.log (Chronicle config)')
in_config = False
in_buckets = False
bucket_list = ''
with open(path.join(cbcollect_dir, 'couchbase.log'), 'r') as file:
for full_line in file:
line = full_line.rstrip()
if not in_config and line == 'Chronicle dump':
in_config = True
elif in_config:
# Names of bucket can be on a single or multiple lines
bucket_list = ''
possible_buckets = ''
if not in_buckets:
m = re.match('(^\s*{bucket_names,{\[)(.*)', line)
if m:
in_buckets = True
possible_buckets = m.group(2)
elif in_buckets:
possible_buckets = line
if possible_buckets != '':
m = re.match('^([^\]]*)\].*', possible_buckets)
if m:
bucket_list += m.group(1)
break
bucket_list += possible_buckets
buckets = []
if bucket_list != '':
for b in bucket_list.replace(' ','').replace('"','').split(','):
buckets.append(b)
logging.debug('found buckets:{}'.format(buckets))
return {'buckets': sorted(buckets)}
def parse_couchbase_log(cbcollect_dir):
config = parse_couchbase_chronicle(cbcollect_dir)
if config['buckets'] == []:
config = parse_couchbase_chronicle_older_version(cbcollect_dir)
if config['buckets'] == []:
config = parse_couchbase_ns_config(cbcollect_dir)
return config
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--grafana-home', dest='grafana_home_path', required=True,
help='''
Grafana configuration "homepath"; should be set to the
out-of-the-box Grafana config path. On brew-installed Grafana on
Macs this is something like:
/usr/local/Cellar/grafana/x.y.z/share/grafana
On linux systems the homepath should usually be:
/usr/share/grafana
''')
parser.add_argument('-p', '--prometheus', dest='prom_bin',
help='path to prometheus binary if it\'s not available on $PATH')
parser.add_argument('--grafana-port', dest='grafana_port', type=int,
help='http port on which Grafana should listen (default: 13300)',
default=13300)
parser.add_argument('--buckets', dest='buckets',
help='comma-separated list of buckets to build bucket dashboards '
'for; if this option is provided, auto-detection of the '
'buckets by parsing couchbase.log will be skipped')
parser.add_argument("--verbose", dest='verbose', action='store_true',
default=False, help="verbose output")
args = parser.parse_args()
os.makedirs(PROMTIMER_LOGS_DIR, exist_ok=True)
stream_handler | |
sub.set_ylim(ranges[2])
_plth0, = sub.plot([], [], c='k', ls='--')
sub.legend([_plth0], ['no noise model'], loc='lower right', handletextpad=0.1,
fontsize=20)
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, '_observables_noise.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def fig_tex(ffig, pdf=False):
''' given filename of figure return a latex friendly file name
'''
path, ffig_base = os.path.split(ffig)
ext = ffig_base.rsplit('.', 1)[-1]
ffig_name = ffig_base.rsplit('.', 1)[0]
_ffig_name = ffig_name.replace('.', '_')
if pdf: ext = 'pdf'
return os.path.join(path, '.'.join([_ffig_name, ext]))
def _sdsses():
''' which group catalog should I use?
'''
# read in the different SDSS group catalogs
fig = plt.figure(figsize=(18,6))
for i, mlim in enumerate(['9.7', '10.1', '10.5']):
tinker = Astrologs('tinkergroup', mlim=mlim)
sub = fig.add_subplot(1,3,i+1)
R_mag = tinker.data['M_r']
logms = tinker.data['log.M_star']
print('%i of %i are centrals' % (np.sum(tinker.data['iscen']),
len(tinker.data['iscen'])))
sub.scatter(logms, R_mag, c='k', s=1, label='$M_{*, lim}=%s$' % mlim)
sub.set_xlabel(r'$\log(\,M_*$ [$M_\odot$]$)$', fontsize=25)
sub.set_xlim(9.6, 12.)
sub.set_ylabel(r'$M_r$', fontsize=25)
sub.set_ylim(-17., -23.4)
ffig = os.path.join(fig_dir, '_sdsses.png')
fig.savefig(ffig, bbox_inches='tight')
plt.close()
return None
def _observables_sfr0():
''' Figure presenting the observables along with simulations without any
attenuation.
'''
#########################################################################
# read in SDSS measurements
#########################################################################
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(name='sdss',
statistic='2d', return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-0.05, 1.7), (-1., 4.)]
#########################################################################
# read in simulations without dust attenuation
#########################################################################
x_simba, sfr0_simba = _sim_observables('simba', np.array([0. for i in range(7)]),
zero_sfr_sample=False)
x_tng, sfr0_tng = _sim_observables('tng', np.array([0. for i in range(7)]),
zero_sfr_sample=False)
x_eag, sfr0_eag = _sim_observables('eagle', np.array([0. for i in range(7)]),
zero_sfr_sample=False)
print('--- fraction of galaxies w/ 0 SFR ---')
print('simba %.2f' % (np.sum(sfr0_simba)/len(sfr0_simba)))
print('tng %.2f' % (np.sum(sfr0_tng)/len(sfr0_tng)))
print('eagle %.2f' % (np.sum(sfr0_eag)/len(sfr0_eag)))
#########################################################################
# plotting
#########################################################################
xs = [x_simba, x_tng, x_eag]
names = ['SIMBA (no dust)', 'TNG (no dust)', 'EAGLE (no dust)']
clrs = ['C1', 'C0', 'C2']
sfr0s = [sfr0_simba, sfr0_tng, sfr0_eag]
fig = plt.figure(figsize=(5*len(xs),10))
for i, _x, _sfr0, name, clr in zip(range(len(xs)), xs, sfr0s, names, clrs):
# R vs (G - R)
sub = fig.add_subplot(2,len(xs),i+1)
DFM.hist2d(_x[0][~_sfr0], _x[1][~_sfr0], levels=[0.68, 0.95],
range=[ranges[0], ranges[1]], bins=20, color=clrs[i],
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(_x[0][_sfr0], _x[1][_sfr0], c='k', s=1)
sub.text(0.95, 0.95, name, ha='right', va='top', transform=sub.transAxes, fontsize=25)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([])
if i == 0:
sub.set_ylabel(r'$G-R$', fontsize=25)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[1])
sub.set_yticks([0., 0.5, 1., 1.5])
# R vs FUV-NUV
sub = fig.add_subplot(2,len(xs),i+len(xs)+1)
DFM.hist2d(_x[0][~_sfr0], _x[2][~_sfr0], levels=[0.68, 0.95],
range=[ranges[0], ranges[2]], bins=20, color=clrs[i],
contour_kwargs={'linewidths': 0.5},
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sfr0 = sub.scatter(_x[0][_sfr0], _x[2][_sfr0], c='k', s=1)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_xticklabels([-20, -21, -22, -23])
if i == 0:
sub.set_ylabel(r'$FUV - NUV$', fontsize=25)
else:
sub.set_yticklabels([])
sub.set_ylim(ranges[2])
_plth0, = sub.plot([], [], c='k', ls='--')
bkgd = fig.add_subplot(111, frameon=False)
bkgd.set_xlabel(r'$M_r$ luminosity', labelpad=10, fontsize=25)
bkgd.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
fig.subplots_adjust(wspace=0.1, hspace=0.1)
ffig = os.path.join(fig_dir, '_observables_sfr0.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def _SIMBA_oddities():
''' SIMBA has a number of differences compared to TNG and EAGLE. This
script is to examine some of the oddities:
* luminous blue galaxies
'''
# read ABC posterior
theta_T = np.loadtxt(os.path.join(os.environ['GALPOPFM_DIR'], 'abc',
'simba.slab_noll_msfr_fixbump.L2.3d', 'theta.t8.dat'))
theta_simba = np.median(theta_T, axis=0)
# run through DEM
_sim_sed = dustInfer._read_sed('simba')
wlim = (_sim_sed['wave'] > 1e3) & (_sim_sed['wave'] < 8e3)
downsample = np.ones(len(_sim_sed['logmstar'])).astype(bool)
f_downsample = 1.#0.1
cens = _sim_sed['censat'].astype(bool)
mlim = (_sim_sed['logmstar'] > 9.4)
zerosfr = (_sim_sed['logsfr.inst'] == -999)
# sample cut centrals, mass limit, non 0 SFR
cuts = cens & mlim & ~zerosfr & downsample
sim_sed = {}
sim_sed['sim'] = 'simba'
sim_sed['logmstar'] = _sim_sed['logmstar'][cuts].copy()
sim_sed['logsfr.inst'] = _sim_sed['logsfr.inst'][cuts].copy()
sim_sed['wave'] = _sim_sed['wave'][wlim].copy()
sim_sed['sed_noneb'] = _sim_sed['sed_noneb'][cuts,:][:,wlim].copy()
sim_sed['sed_onlyneb'] = _sim_sed['sed_onlyneb'][cuts,:][:,wlim].copy()
# get observables R, G-R, FUV-NUV
x_simba = dustInfer.sumstat_model(theta_simba,
sed=sim_sed,
dem='slab_noll_msfr_fixbump',
f_downsample=f_downsample,
statistic='2d',
extra_data=None,
return_datavector=True)
# galaxies with blue color but high Mr
blue_lum = (x_simba[0] > 21) & (x_simba[1] < 0.75)
# get observables with no DEM
x_nodust = dustInfer.sumstat_model(
np.array([0. for i in range(7)]),
sed=sim_sed,
dem='slab_noll_msfr_fixbump',
f_downsample=f_downsample,
statistic='2d',
extra_data=None,
return_datavector=True)
fig = plt.figure(figsize=(15,5))
# plot R vs (G - R)
sub = fig.add_subplot(131)
DFM.hist2d(x_simba[0], x_simba[1], levels=[0.68, 0.95],
range=[(20., 23.), (-0.05, 1.7)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(x_simba[0][blue_lum], x_simba[1][blue_lum], c='k', s=1)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_ylabel(r'$G-R$', fontsize=20)
sub.set_ylim((-0.05, 1.7))
sub.set_yticks([0., 0.5, 1., 1.5])
sub.set_title('SIMBA + DEM', fontsize=20)
# plot (G-R)-Mr relation with no dust
sub = fig.add_subplot(132)
DFM.hist2d(x_nodust[0], x_nodust[1], levels=[0.68, 0.95],
range=[(20., 23.), (-0.05, 1.7)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(x_nodust[0][blue_lum], x_nodust[1][blue_lum], c='k', s=1)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_ylim((-0.05, 1.7))
sub.set_yticks([0., 0.5, 1., 1.5])
sub.set_yticklabels([])
sub.set_title('SIMBA + no dust ', fontsize=20)
# plot where they lie on the M*-SFR relation
sub = fig.add_subplot(133)
DFM.hist2d(sim_sed['logmstar'], sim_sed['logsfr.inst'], levels=[0.68, 0.95],
range=[(9.0, 12.), (-3., 2.)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(sim_sed['logmstar'][blue_lum],
sim_sed['logsfr.inst'][blue_lum], c='k', s=1)
sub.set_xlabel(r'$\log M_*$', fontsize=20)
sub.set_xlim(9.0, 12)
sub.set_ylabel(r'$\log {\rm SFR}$', fontsize=20)
sub.set_ylim((-3., 2.))
fig.subplots_adjust(wspace=0.3)
ffig = os.path.join(fig_dir, '_simba_oddities.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
# what happens if we force m_\tau,SFR < 0 like the other simulations?
# get observables R, G-R, FUV-NUV
theta_modified = theta_simba.copy()
theta_modified[1] = -1.
x_modified = dustInfer.sumstat_model(
theta_modified,
sed=sim_sed,
dem='slab_noll_msfr_fixbump',
f_downsample=f_downsample,
statistic='2d',
extra_data=None,
return_datavector=True)
fig = plt.figure(figsize=(20,5))
blue_w_nodust = (x_nodust[0] > 20.2) & (x_nodust[1] < 0.15)
# plot (G-R)-Mr relation with no dust
sub = fig.add_subplot(141)
DFM.hist2d(x_nodust[0], x_nodust[1], levels=[0.68, 0.95],
range=[(20., 23.), (-0.05, 1.7)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(x_nodust[0][blue_lum], x_nodust[1][blue_lum], c='k', s=1)
sub.scatter(x_nodust[0][blue_w_nodust], x_nodust[1][blue_w_nodust], c='C0', s=2)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_ylabel(r'$G-R$', fontsize=20)
sub.set_ylim((-0.05, 1.7))
sub.set_yticks([0., 0.5, 1., 1.5])
sub.set_title('SIMBA + no dust ', fontsize=20)
# plot R vs (G - R)
sub = fig.add_subplot(142)
DFM.hist2d(x_simba[0], x_simba[1], levels=[0.68, 0.95],
range=[(20., 23.), (-0.05, 1.7)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(x_simba[0][blue_lum], x_simba[1][blue_lum], c='k', s=1)
sub.scatter(x_simba[0][blue_w_nodust], x_simba[1][blue_w_nodust], c='C0', s=2)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_ylim((-0.05, 1.7))
sub.set_yticks([0., 0.5, 1., 1.5])
sub.set_title('SIMBA + DEM', fontsize=20)
# plot color-magnitude relation if we change m_tau,SFR
sub = fig.add_subplot(143)
DFM.hist2d(x_modified[0], x_modified[1], levels=[0.68, 0.95],
range=[(20., 23.), (-0.05, 1.7)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(x_modified[0][blue_lum], x_modified[1][blue_lum], c='k', s=1)
sub.scatter(x_modified[0][blue_w_nodust], x_modified[1][blue_w_nodust], c='C0', s=2)
sub.set_xlabel(r'$M_r$ luminosity', fontsize=20)
sub.set_xlim(20., 23)
sub.set_xticks([20., 21., 22., 23])
sub.set_ylim((-0.05, 1.7))
sub.set_yticks([0., 0.5, 1., 1.5])
sub.set_yticklabels([])
sub.set_title(r'SIMBA w/ $m_{\tau, {\rm SFR}} = -1$', fontsize=20)
# plot where they lie on the M*-SFR relation
sub = fig.add_subplot(144)
DFM.hist2d(sim_sed['logmstar'], sim_sed['logsfr.inst'], levels=[0.68, 0.95],
range=[(9.0, 12.), (-3., 2.)], bins=20, color='C1',
plot_datapoints=True, fill_contours=False, plot_density=True, ax=sub)
sub.scatter(sim_sed['logmstar'][blue_lum],
sim_sed['logsfr.inst'][blue_lum], c='k', s=1)
sub.scatter(sim_sed['logmstar'][blue_w_nodust],
sim_sed['logsfr.inst'][blue_w_nodust], c='C0', s=2)
sub.set_xlabel(r'$\log M_*$', fontsize=20)
sub.set_xlim(9.0, 12)
sub.set_ylabel(r'$\log {\rm SFR}$', fontsize=20)
sub.set_ylim((-3., 2.))
fig.subplots_adjust(wspace=0.3)
ffig = os.path.join(fig_dir, '_simba_oddities1.png')
fig.savefig(ffig, bbox_inches='tight')
fig.savefig(fig_tex(ffig, pdf=True), bbox_inches='tight')
plt.close()
return None
def _simba_close_examination():
''' closer examination of simba. Reproducing some of Romeel's figures
'''
# read in SDSS measurements
r_edges, gr_edges, fn_edges, _ = dustInfer.sumstat_obs(statistic='2d',
return_bins=True)
dr = r_edges[1] - r_edges[0]
dgr = gr_edges[1] - gr_edges[0]
dfn = fn_edges[1] - fn_edges[0]
ranges = [(r_edges[0], r_edges[-1]), (-0.05, 1.7), (-1., 4.)]
sdss = Catalog('tinker')
sdss_M_fuv, sdss_M_nuv, _, sdss_M_g, sdss_M_r, _, _ = sdss.data['NSA_ABSMAG'].T
mr_complete = (sdss_M_r < -20.)
x_obs = [-1.*sdss_M_r[mr_complete],
sdss_M_g[mr_complete] - sdss_M_r[mr_complete],
sdss_M_fuv[mr_complete] - sdss_M_nuv[mr_complete]]
# read in simulations without dust attenuation
nontheta = np.zeros(6)
x_simba, simba, sfr0_simba = _sim_observables('simba', nontheta)
logssfr = simba['logsfr.inst'] | |
0069 0069",
8572: "<compat> 006C",
8573: "<compat> 0063",
8574: "<compat> 0064",
8575: "<compat> 006D",
8585: "<fraction> 0030 2044 0033",
8602: "2190 0338",
8603: "2192 0338",
8622: "2194 0338",
8653: "21D0 0338",
8654: "21D4 0338",
8655: "21D2 0338",
8708: "2203 0338",
8713: "2208 0338",
8716: "220B 0338",
8740: "2223 0338",
8742: "2225 0338",
8748: "<compat> 222B 222B",
8749: "<compat> 222B 222B 222B",
8751: "<compat> 222E 222E",
8752: "<compat> 222E 222E 222E",
8769: "223C 0338",
8772: "2243 0338",
8775: "2245 0338",
8777: "2248 0338",
8800: "003D 0338",
8802: "2261 0338",
8813: "224D 0338",
8814: "003C 0338",
8815: "003E 0338",
8816: "2264 0338",
8817: "2265 0338",
8820: "2272 0338",
8821: "2273 0338",
8824: "2276 0338",
8825: "2277 0338",
8832: "227A 0338",
8833: "227B 0338",
8836: "2282 0338",
8837: "2283 0338",
8840: "2286 0338",
8841: "2287 0338",
8876: "22A2 0338",
8877: "22A8 0338",
8878: "22A9 0338",
8879: "22AB 0338",
8928: "227C 0338",
8929: "227D 0338",
8930: "2291 0338",
8931: "2292 0338",
8938: "22B2 0338",
8939: "22B3 0338",
8940: "22B4 0338",
8941: "22B5 0338",
9001: "3008",
9002: "3009",
9312: "<circle> 0031",
9313: "<circle> 0032",
9314: "<circle> 0033",
9315: "<circle> 0034",
9316: "<circle> 0035",
9317: "<circle> 0036",
9318: "<circle> 0037",
9319: "<circle> 0038",
9320: "<circle> 0039",
9321: "<circle> 0031 0030",
9322: "<circle> 0031 0031",
9323: "<circle> 0031 0032",
9324: "<circle> 0031 0033",
9325: "<circle> 0031 0034",
9326: "<circle> 0031 0035",
9327: "<circle> 0031 0036",
9328: "<circle> 0031 0037",
9329: "<circle> 0031 0038",
9330: "<circle> 0031 0039",
9331: "<circle> 0032 0030",
9332: "<compat> 0028 0031 0029",
9333: "<compat> 0028 0032 0029",
9334: "<compat> 0028 0033 0029",
9335: "<compat> 0028 0034 0029",
9336: "<compat> 0028 0035 0029",
9337: "<compat> 0028 0036 0029",
9338: "<compat> 0028 0037 0029",
9339: "<compat> 0028 0038 0029",
9340: "<compat> 0028 0039 0029",
9341: "<compat> 0028 0031 0030 0029",
9342: "<compat> 0028 0031 0031 0029",
9343: "<compat> 0028 0031 0032 0029",
9344: "<compat> 0028 0031 0033 0029",
9345: "<compat> 0028 0031 0034 0029",
9346: "<compat> 0028 0031 0035 0029",
9347: "<compat> 0028 0031 0036 0029",
9348: "<compat> 0028 0031 0037 0029",
9349: "<compat> 0028 0031 0038 0029",
9350: "<compat> 0028 0031 0039 0029",
9351: "<compat> 0028 0032 0030 0029",
9352: "<compat> 0031 002E",
9353: "<compat> 0032 002E",
9354: "<compat> 0033 002E",
9355: "<compat> 0034 002E",
9356: "<compat> 0035 002E",
9357: "<compat> 0036 002E",
9358: "<compat> 0037 002E",
9359: "<compat> 0038 002E",
9360: "<compat> 0039 002E",
9361: "<compat> 0031 0030 002E",
9362: "<compat> 0031 0031 002E",
9363: "<compat> 0031 0032 002E",
9364: "<compat> 0031 0033 002E",
9365: "<compat> 0031 0034 002E",
9366: "<compat> 0031 0035 002E",
9367: "<compat> 0031 0036 002E",
9368: "<compat> 0031 0037 002E",
9369: "<compat> 0031 0038 002E",
9370: "<compat> 0031 0039 002E",
9371: "<compat> 0032 0030 002E",
9372: "<compat> 0028 0061 0029",
9373: "<compat> 0028 0062 0029",
9374: "<compat> 0028 0063 0029",
9375: "<compat> 0028 0064 0029",
9376: "<compat> 0028 0065 0029",
9377: "<compat> 0028 0066 0029",
9378: "<compat> 0028 0067 0029",
9379: "<compat> 0028 0068 0029",
9380: "<compat> 0028 0069 0029",
9381: "<compat> 0028 006A 0029",
9382: "<compat> 0028 006B 0029",
9383: "<compat> 0028 006C 0029",
9384: "<compat> 0028 006D 0029",
9385: "<compat> 0028 006E 0029",
9386: "<compat> 0028 006F 0029",
9387: "<compat> 0028 0070 0029",
9388: "<compat> 0028 0071 0029",
9389: "<compat> 0028 0072 0029",
9390: "<compat> 0028 0073 0029",
9391: "<compat> 0028 0074 0029",
9392: "<compat> 0028 0075 0029",
9393: "<compat> 0028 0076 0029",
9394: "<compat> 0028 0077 0029",
9395: "<compat> 0028 0078 0029",
9396: "<compat> 0028 0079 0029",
9397: "<compat> 0028 007A 0029",
9398: "<circle> 0041",
9399: "<circle> 0042",
9400: "<circle> 0043",
9401: "<circle> 0044",
9402: "<circle> 0045",
9403: "<circle> 0046",
9404: "<circle> 0047",
9405: "<circle> 0048",
9406: "<circle> 0049",
9407: "<circle> 004A",
9408: "<circle> 004B",
9409: "<circle> 004C",
9410: "<circle> 004D",
9411: "<circle> 004E",
9412: "<circle> 004F",
9413: "<circle> 0050",
9414: "<circle> 0051",
9415: "<circle> 0052",
9416: "<circle> 0053",
9417: "<circle> 0054",
9418: "<circle> 0055",
9419: "<circle> 0056",
9420: "<circle> 0057",
9421: "<circle> 0058",
9422: "<circle> 0059",
9423: "<circle> 005A",
9424: "<circle> 0061",
9425: "<circle> 0062",
9426: "<circle> 0063",
9427: "<circle> 0064",
9428: "<circle> 0065",
9429: "<circle> 0066",
9430: "<circle> 0067",
9431: "<circle> 0068",
9432: "<circle> 0069",
9433: "<circle> 006A",
9434: "<circle> 006B",
9435: "<circle> 006C",
9436: "<circle> 006D",
9437: "<circle> 006E",
9438: "<circle> 006F",
9439: "<circle> 0070",
9440: "<circle> 0071",
9441: "<circle> 0072",
9442: "<circle> 0073",
9443: "<circle> 0074",
9444: "<circle> 0075",
9445: "<circle> 0076",
9446: "<circle> 0077",
9447: "<circle> 0078",
9448: "<circle> 0079",
9449: "<circle> 007A",
9450: "<circle> 0030",
10764: "<compat> 222B 222B 222B 222B",
10868: "<compat> 003A 003A 003D",
10869: "<compat> 003D 003D",
10870: "<compat> 003D 003D 003D",
10972: "2ADD 0338",
11388: "<sub> 006A",
11389: "<super> 0056",
11631: "<super> 2D61",
11935: "<compat> 6BCD",
12019: "<compat> 9F9F",
12032: "<compat> 4E00",
12033: "<compat> 4E28",
12034: "<compat> 4E36",
12035: "<compat> 4E3F",
12036: "<compat> 4E59",
12037: "<compat> 4E85",
12038: "<compat> 4E8C",
12039: "<compat> 4EA0",
12040: "<compat> 4EBA",
12041: "<compat> 513F",
12042: "<compat> 5165",
12043: "<compat> 516B",
12044: "<compat> 5182",
12045: "<compat> 5196",
12046: "<compat> 51AB",
12047: "<compat> 51E0",
12048: "<compat> 51F5",
12049: "<compat> 5200",
12050: "<compat> 529B",
12051: "<compat> 52F9",
12052: "<compat> 5315",
12053: "<compat> 531A",
12054: "<compat> 5338",
12055: "<compat> 5341",
12056: "<compat> 535C",
12057: "<compat> 5369",
12058: "<compat> 5382",
12059: "<compat> 53B6",
12060: "<compat> 53C8",
12061: "<compat> 53E3",
12062: "<compat> 56D7",
12063: "<compat> 571F",
12064: "<compat> 58EB",
12065: "<compat> 5902",
12066: "<compat> 590A",
12067: "<compat> 5915",
12068: "<compat> 5927",
12069: "<compat> 5973",
12070: "<compat> 5B50",
12071: "<compat> 5B80",
12072: "<compat> 5BF8",
12073: "<compat> 5C0F",
12074: "<compat> 5C22",
12075: "<compat> 5C38",
12076: "<compat> 5C6E",
12077: "<compat> 5C71",
12078: "<compat> 5DDB",
12079: "<compat> 5DE5",
12080: "<compat> 5DF1",
12081: "<compat> 5DFE",
12082: "<compat> 5E72",
12083: "<compat> 5E7A",
12084: "<compat> 5E7F",
12085: "<compat> 5EF4",
12086: "<compat> 5EFE",
12087: "<compat> 5F0B",
12088: "<compat> 5F13",
12089: "<compat> 5F50",
12090: "<compat> 5F61",
12091: "<compat> 5F73",
12092: "<compat> 5FC3",
12093: "<compat> 6208",
12094: "<compat> 6236",
12095: "<compat> 624B",
12096: "<compat> 652F",
12097: "<compat> 6534",
12098: "<compat> 6587",
12099: "<compat> 6597",
12100: "<compat> 65A4",
12101: "<compat> 65B9",
12102: "<compat> 65E0",
12103: "<compat> 65E5",
12104: "<compat> 66F0",
12105: "<compat> 6708",
12106: "<compat> 6728",
12107: "<compat> 6B20",
12108: "<compat> 6B62",
12109: "<compat> 6B79",
12110: "<compat> 6BB3",
12111: "<compat> 6BCB",
12112: "<compat> 6BD4",
12113: "<compat> 6BDB",
12114: "<compat> 6C0F",
12115: "<compat> 6C14",
12116: "<compat> 6C34",
12117: "<compat> 706B",
12118: "<compat> 722A",
12119: "<compat> 7236",
12120: "<compat> 723B",
12121: "<compat> 723F",
12122: "<compat> 7247",
12123: "<compat> 7259",
12124: "<compat> 725B",
12125: "<compat> 72AC",
12126: "<compat> 7384",
12127: "<compat> 7389",
12128: "<compat> 74DC",
12129: "<compat> 74E6",
12130: "<compat> 7518",
12131: "<compat> 751F",
12132: "<compat> 7528",
12133: "<compat> 7530",
12134: "<compat> 758B",
12135: "<compat> 7592",
12136: "<compat> 7676",
12137: "<compat> 767D",
12138: "<compat> 76AE",
12139: "<compat> 76BF",
12140: "<compat> 76EE",
12141: "<compat> 77DB",
12142: "<compat> 77E2",
12143: "<compat> 77F3",
12144: "<compat> 793A",
12145: "<compat> 79B8",
12146: "<compat> 79BE",
12147: "<compat> 7A74",
12148: "<compat> 7ACB",
12149: "<compat> 7AF9",
12150: "<compat> 7C73",
12151: "<compat> 7CF8",
12152: "<compat> 7F36",
12153: "<compat> 7F51",
12154: "<compat> 7F8A",
12155: "<compat> 7FBD",
12156: "<compat> 8001",
12157: "<compat> 800C",
12158: "<compat> 8012",
12159: "<compat> 8033",
12160: "<compat> 807F",
12161: "<compat> 8089",
12162: "<compat> 81E3",
12163: "<compat> 81EA",
12164: "<compat> 81F3",
12165: "<compat> 81FC",
12166: "<compat> 820C",
12167: "<compat> 821B",
12168: "<compat> 821F",
12169: "<compat> 826E",
12170: "<compat> 8272",
12171: "<compat> 8278",
12172: "<compat> 864D",
12173: "<compat> 866B",
12174: "<compat> 8840",
12175: "<compat> 884C",
12176: "<compat> 8863",
12177: "<compat> 897E",
12178: "<compat> 898B",
12179: "<compat> 89D2",
12180: "<compat> 8A00",
12181: "<compat> 8C37",
12182: "<compat> 8C46",
12183: "<compat> 8C55",
12184: "<compat> 8C78",
12185: "<compat> 8C9D",
12186: "<compat> 8D64",
12187: "<compat> 8D70",
12188: "<compat> 8DB3",
12189: "<compat> 8EAB",
12190: "<compat> 8ECA",
12191: "<compat> 8F9B",
12192: "<compat> 8FB0",
12193: "<compat> 8FB5",
12194: "<compat> 9091",
12195: "<compat> 9149",
12196: "<compat> 91C6",
12197: "<compat> 91CC",
12198: "<compat> 91D1",
12199: "<compat> 9577",
12200: "<compat> 9580",
| |
from __future__ import annotations
from ..typecheck import *
from ..import core
from ..breakpoints import (
Breakpoints,
SourceBreakpoint,
)
from ..watch import Watch
from . import types as dap
from .variable import (
Variable,
SourceLocation,
ScopeReference,
)
from .configuration import (
AdapterConfiguration,
ConfigurationExpanded,
TaskExpanded
)
from .types import array_from_json, json_from_array
from .transport import TransportProtocol, TransportProtocolListener
class SessionListener (Protocol):
async def on_session_task_request(self, session: Session, task: TaskExpanded): ...
async def on_session_terminal_request(self, session: Session, request: dap.RunInTerminalRequest): ...
def on_session_state_changed(self, session: Session, state: int): ...
def on_session_selected_frame(self, session: Session, frame: Optional[dap.StackFrame]): ...
def on_session_output_event(self, session: Session, event: dap.OutputEvent): ...
def on_session_updated_modules(self, session: Session): ...
def on_session_updated_sources(self, session: Session): ...
def on_session_updated_variables(self, session: Session): ...
def on_session_updated_threads(self, session: Session): ...
class Session(TransportProtocolListener, core.Logger):
stopped = 0
paused = 1
running = 2
starting = 3
stopping = 4
stopped_reason_build_failed=0
stopped_reason_launch_error=1
stopped_reason_dispose=2
stopped_reason_cancel=3
stopped_reason_terminated_event=4
stopped_reason_manual=5
def __init__(self, breakpoints: Breakpoints, watch: Watch, listener: SessionListener, transport_log: core.Logger, parent: Optional[Session] = None) -> None:
self.listener = listener
self.children: List[Session] = []
self.parent = parent
if parent:
parent.children.append(self)
self.transport_log = transport_log
self.state_changed = core.Event() #type: core.Event[int]
self.breakpoints = breakpoints
self.breakpoints_for_id = {} #type: Dict[int, SourceBreakpoint]
self.breakpoints.data.on_send.add(self.on_send_data_breakpoints)
self.breakpoints.function.on_send.add(self.on_send_function_breakpoints)
self.breakpoints.filters.on_send.add(self.on_send_filters)
self.breakpoints.source.on_send.add(self.on_send_source_breakpoint)
self.watch = watch
self.watch.on_added.add(lambda expr: self.watch.evaluate_expression(self, self.selected_frame, expr))
self._transport: Optional[TransportProtocol] = None
self.adapter_configuration = None
self.launching_async = None #type: Optional[core.future]
self.capabilities = None
self.stop_requested = False
self.launch_request = True
self._state = Session.starting
self._status = 'Starting'
self.disposeables = [] #type: List[Any]
self.complete = core.future()
self.threads_for_id: Dict[int, Thread] = {}
self.all_threads_stopped = False
self.selected_explicitly = False
self.selected_thread = None
self.selected_frame = None
self.threads: List[Thread] = []
self.variables: List[Variable] = []
self.sources: Dict[Union[int, str], dap.Source] = {}
self.modules: Dict[Union[int, str], dap.Module] = {}
self.on_threads_selected: core.Event[Optional[Thread], Optional[dap.StackFrame]] = core.Event()
self.on_threads_selected.add(lambda thread, frame: self.load_frame(frame))
@property
def name(self) -> str:
return self.configuration.name
@property
def state(self) -> int:
return self._state
@state.setter
def state(self, state: int) -> None:
if self._state == state:
return
self._state = state
self.listener.on_session_state_changed(self, state)
@property
def status(self) -> Optional[str]:
return self._status
def _change_status(self, status: str):
self._status = status
self.listener.on_session_state_changed(self, self._state)
async def launch(self, adapter_configuration: AdapterConfiguration, configuration: ConfigurationExpanded, restart: Optional[Any] = None, no_debug: bool = False) -> None:
try:
self.launching_async = core.run(self._launch(adapter_configuration, configuration, restart, no_debug))
await self.launching_async
except core.Error as e:
self.launching_async = None
core.log_exception(e)
self.error('... an error occured, ' + str(e))
await self.stop_forced(reason=Session.stopped_reason_launch_error)
except core.CancelledError:
...
self.launching_async = None
async def _launch(self, adapter_configuration: AdapterConfiguration, configuration: ConfigurationExpanded, restart: Optional[Any], no_debug: bool) -> None:
assert self.state == Session.stopped, 'debugger not in stopped state?'
self.state = Session.starting
self.adapter_configuration = adapter_configuration
self.configuration = configuration
self.configuration = await adapter_configuration.configuration_resolve(configuration)
if not adapter_configuration.installed_version:
raise core.Error('Debug adapter with type name "{}" is not installed. You can install it by running Debugger: Install Adapters'.format(adapter_configuration.type))
if not await self.run_pre_debug_task():
self.info('Pre debug command failed, not starting session')
self.launching_async = None
await self.stop_forced(reason=Session.stopped_reason_build_failed)
return
self._change_status('Starting')
try:
transport = await adapter_configuration.start(log=self.transport_log, configuration=self.configuration)
except Exception as e:
raise core.Error(f'Unable to start the adapter process: {e}')
self._transport = TransportProtocol(
transport,
self,
self.transport_log
)
self.capabilities = dap.Capabilities.from_json(
await self.request('initialize', {
'clientID': 'sublime',
'clientName': 'Sublime Text',
'adapterID': configuration.type,
'pathFormat': 'path',
'linesStartAt1': True,
'columnsStartAt1': True,
'supportsVariableType': True,
'supportsVariablePaging': False,
'supportsRunInTerminalRequest': True,
'locale': 'en-us'
})
)
# remove/add any exception breakpoint filters
self.breakpoints.filters.update(self.capabilities.exceptionBreakpointFilters or [])
if restart:
configuration['__restart'] = restart
if no_debug:
configuration['noDebug'] = True
if configuration.request == 'launch':
self.launch_request = True
await self.request('launch', configuration)
elif configuration.request == 'attach':
self.launch_request = False
await self.request('attach', configuration)
else:
raise core.Error('expected configuration to have request of either "launch" or "attach" found {}'.format(configuration.request))
self.adapter_configuration.did_start_debugging(self)
# get the baseline threads after launch/attach
# according to https://microsoft.github.io/debug-adapter-protocol/overview
self.refresh_threads()
# At this point we are running?
self._change_status('Running')
self.state = Session.running
async def request(self, command: str, arguments: Any) -> Any:
if not self._transport:
raise core.Error('debugger not running')
return await self._transport.send_request_asyc(command, arguments)
async def wait(self) -> None:
await self.complete
async def run_pre_debug_task(self) -> bool:
pre_debug_command = self.configuration.pre_debug_task
if pre_debug_command:
self._change_status('Running pre debug command')
r = await self.run_task('Pre debug command', pre_debug_command)
return r
return True
async def run_post_debug_task(self) -> bool:
post_debug_command = self.configuration.post_debug_task
if post_debug_command:
self._change_status('Running post debug command')
r = await self.run_task('Post debug command', post_debug_command)
return r
return True
async def run_task(self, name: str, task: TaskExpanded) -> bool:
try:
await self.listener.on_session_task_request(self, task)
return True
except core.CancelledError:
self.error(f'{name}: cancelled')
return False
except Exception as e:
core.log_exception()
self.error(f'{name}: {e}')
return False
def _refresh_state(self) -> None:
try:
thread = self.command_thread
if thread.stopped:
self._change_status('Paused')
self.state = Session.paused
else:
self._change_status('Running')
self.state = Session.running
except core.Error as e:
self.state = Session.running
async def add_breakpoints(self) -> None:
assert self._transport
requests = [] #type: List[Awaitable[Any]]
requests.append(self.set_exception_breakpoint_filters())
requests.append(self.set_function_breakpoints())
for file, filebreaks in self.breakpoints.source.breakpoints_per_file().items():
requests.append(self.set_breakpoints_for_file(file, filebreaks))
if self.capabilities.supportsDataBreakpoints:
requests.append(self.set_data_breakpoints())
if requests:
await core.wait(requests)
async def set_exception_breakpoint_filters(self) -> None:
if not self._transport:
return
filters: List[str] = []
filterOptions: List[dict] = []
for f in self.breakpoints.filters:
if f.enabled:
filters.append(f.dap.id)
filterOptions.append({
'filterId': f.dap.id,
'condition': f.condition,
})
await self.request('setExceptionBreakpoints', {
'filters': filters,
'filterOptions': filterOptions
})
async def set_function_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.function))
if not self.capabilities.supportsFunctionBreakpoints:
# only show error message if the user tried to set a function breakpoint when they are not supported
if breakpoints:
self.error('This debugger does not support function breakpoints')
return
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setFunctionBreakpoints', {
'breakpoints': json_from_array(dap.FunctionBreakpoint.into_json, dap_breakpoints)
})
results = array_from_json(dap.BreakpointResult.from_json, response['breakpoints'])
for result, b in zip(results, breakpoints):
self.breakpoints.function.set_result(b, result)
async def set_data_breakpoints(self) -> None:
if not self._transport:
return
breakpoints = list(filter(lambda b: b.enabled, self.breakpoints.data))
dap_breakpoints = list(map(lambda b: b.dap, breakpoints))
response = await self.request('setDataBreakpoints', {
'breakpoints': json_from_array(dap.DataBreakpoint.into_json, dap_breakpoints)
})
results = array_from_json(dap.BreakpointResult.from_json, response['breakpoints'])
for result, b in zip(results, breakpoints):
self.breakpoints.data.set_result(b, result)
async def set_breakpoints_for_file(self, file: str, breakpoints: List[SourceBreakpoint]) -> None:
if not self._transport:
return
enabled_breakpoints = list(filter(lambda b: b.enabled, breakpoints))
dap_breakpoints = list(map(lambda b: b.dap, enabled_breakpoints))
try:
response = await self.request('setBreakpoints', {
'source': { 'path': file },
'breakpoints': json_from_array(dap.SourceBreakpoint.into_json, dap_breakpoints)
})
results = array_from_json(dap.BreakpointResult.from_json, response['breakpoints'])
if len(results) != len(enabled_breakpoints):
raise dap.Error(True, 'expected #breakpoints to match results')
for result, b in zip(results, enabled_breakpoints):
self.breakpoints.source.set_result(b, result)
if result.id:
self.breakpoints_for_id[result.id] = b
except dap.Error as e:
for b in enabled_breakpoints:
self.breakpoints.source.set_result(b, dap.BreakpointResult.failed)
def on_send_data_breakpoints(self, any):
core.run(self.set_data_breakpoints())
def on_send_function_breakpoints(self, any):
core.run(self.set_function_breakpoints())
def on_send_filters(self, any):
core.run(self.set_exception_breakpoint_filters())
def on_send_source_breakpoint(self, breakpoint: SourceBreakpoint) -> None:
file = breakpoint.file
core.run(self.set_breakpoints_for_file(file, self.breakpoints.source.breakpoints_for_file(file)))
async def stop(self):
# this seems to be what the spec says to do in the overview
# https://microsoft.github.io/debug-adapter-protocol/overview
# haven't started session yet
if self._transport is None:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
# If the stop is called multiple times then we call disconnect to forefully disconnect
if self.stop_requested:
await self.stop_forced(reason=Session.stopped_reason_manual)
return
self._change_status('Stop requested')
self.stop_requested = True
# first try to terminate if we can
if self.launch_request and self.capabilities and self.capabilities.supportsTerminateRequest:
try:
await self.request('terminate', {
'restart': False
})
return
except dap.Error as e:
core.log_exception()
# we couldn't terminate either not a launch request or the terminate request failed
# so we foreceully disconnect
await self.request('disconnect', {
'restart': False
})
def stop_debug_adapter_session(self):
if self.launching_async:
self.launching_async.cancel()
self.breakpoints_for_id = {}
self.watch.clear_session_data(self)
self.breakpoints.clear_session_data()
self.stop_requested = False
if self._transport:
self.adapter_configuration.did_stop_debugging(self)
self._transport.dispose()
self._transport = None
async def stop_forced(self, reason) -> None:
if self.state == Session.stopping or self.state == Session.stopped:
return
self.stopped_reason = reason
self.state = Session.stopping
self.stop_debug_adapter_session()
await self.run_post_debug_task()
self._change_status('Debug session has ended')
self.state = Session.stopped
print(self.complete)
if not self.complete.done():
self.complete.set_result(None)
def dispose(self) -> None:
self.stop_debug_adapter_session()
for disposeable in self.disposeables:
disposeable.dispose()
# clean up hierarchy if needed
for child in self.children:
child.parent = None
if self.parent:
self.parent.children.remove(self)
self.parent = None
async def resume(self):
body = await self.request('continue', {
'threadId': self.command_thread.id
})
# some adapters aren't giving a response here
if body:
allThreadsContinued = body.get('allThreadsContinued', True)
else:
allThreadsContinued = True
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, allThreadsContinued))
async def pause(self):
await self.request('pause', {
'threadId': self.command_thread.id
})
async def step_over(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('next', {
'threadId': self.command_thread.id
})
async def step_in(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepIn', {
'threadId': self.command_thread.id
})
async def step_out(self):
self.on_continued_event(dap.ContinuedEvent(self.command_thread.id, False))
await self.request('stepOut', {
'threadId': self.command_thread.id
})
async def evaluate(self, expression: str, context: str = 'repl'):
self.info(expression)
result = await self.evaluate_expression(expression, context)
if not result:
raise dap.Error(True, 'expression did not return a result')
return
# variablesReference doesn't appear to be optional in the spec... but some adapters treat it as such
event = dap.OutputEvent('console', result.result, result.variablesReference)
self.listener.on_session_output_event(self, event)
async def evaluate_expression(self, expression: str, context: Optional[str]) -> dap.EvaluateResponse:
frameId: Optional[int] = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('evaluate', {
'expression': expression,
'context': context,
'frameId': frameId,
})
# the spec doesn't say this is optional? But it seems that some implementations throw errors instead of marking things as not verified?
if response['result'] is None:
raise dap.Error(True, 'expression did not return a result')
# variablesReference doesn't appear to be optional in the spec... but some adapters treat it as such
return dap.EvaluateResponse(response['result'], response.get('variablesReference', 0))
async def stack_trace(self, thread_id: str) -> List[dap.StackFrame]:
body = await self.request('stackTrace', {
'threadId': thread_id,
})
return dap.array_from_json(dap.StackFrame.from_json, body['stackFrames'])
async def completions(self, text: str, column: int) -> List[dap.CompletionItem]:
frameId = None
if self.selected_frame:
frameId = self.selected_frame.id
response = await self.request('completions', {
'frameId': frameId,
'text': text,
'column': column,
})
return array_from_json(dap.CompletionItem.from_json, response['targets'])
async def set_variable(self, variable: dap.Variable, value: str) -> dap.Variable:
response = await self.request('setVariable', {
'variablesReference': variable.containerVariablesReference,
'name': variable.name,
'value': value,
})
variable.value = response['value']
variable.variablesReference = response.get('variablesReference', 0)
return variable
async def data_breakpoint_info(self, variable: dap.Variable) -> dap.DataBreakpointInfoResponse:
response = await self.request('dataBreakpointInfo', {
'variablesReference': variable.containerVariablesReference,
'name': variable.name,
})
return dap.DataBreakpointInfoResponse.from_json(response)
def log_output(self, string: str) -> None:
output = dap.OutputEvent('debugger.output', string + '\n', 0)
self.listener.on_session_output_event(self, output)
def log(self, type: str, value: str) -> None:
if type == 'process':
self.transport_log.info(f'⟹ process/stderr :: {value.strip()}')
return
if type == 'error':
output = dap.OutputEvent('debugger.error', value + '\n', 0)
self.listener.on_session_output_event(self, output)
return
output = dap.OutputEvent('debugger.info', value + '\n', 0)
self.listener.on_session_output_event(self, output)
def load_frame(self, frame: Optional[dap.StackFrame]):
self.listener.on_session_selected_frame(self, frame)
if frame:
core.run(self.refresh_scopes(frame))
core.run(self.watch.evaluate(self, self.selected_frame))
else:
self.variables.clear()
self.listener.on_session_updated_variables(self)
async def refresh_scopes(self, frame: dap.StackFrame):
body = await self.request('scopes', {
'frameId': frame.id
})
scopes = dap.array_from_json(dap.Scope.from_json, body['scopes'])
self.variables = [Variable(self, ScopeReference(scope)) for scope in scopes]
self.listener.on_session_updated_variables(self)
async def get_source(self, source: dap.Source) -> str:
body = await self.request('source', {
'source': {
'path': source.path,
'sourceReference': source.sourceReference
},
'sourceReference': source.sourceReference
})
return body['content']
async def get_variables(self, variablesReference: int, without_names = False) -> List[Variable]:
response = await self.request('variables', {
'variablesReference': variablesReference
})
def from_json(v):
return dap.Variable.from_json(variablesReference, v)
variables = array_from_json(from_json, response['variables'])
# vscode seems to remove the names from variables in output events
if without_names:
for v in variables:
v.name = ''
return [Variable(self, v) for v in variables]
def on_breakpoint_event(self, event: dap.BreakpointEvent):
b = self.breakpoints_for_id.get(event.result.id)
if b:
self.breakpoints.source.set_result(b, event.result)
def on_module_event(self, event: dap.ModuleEvent):
if event.reason == dap.ModuleEvent.new:
self.modules[event.module.id] = event.module
if event.reason == dap.ModuleEvent.removed:
try:
del self.modules[event.module.id]
except KeyError:
...
if event.reason == dap.ModuleEvent.changed:
self.modules[event.module.id] = event.module
self.listener.on_session_updated_modules(self)
def on_loaded_source_event(self, event: dap.LoadedSourceEvent):
if event.reason == dap.LoadedSourceEvent.new:
self.sources[event.source.id] = event.source
elif event.reason == dap.LoadedSourceEvent.removed:
try:
del self.sources[event.source.id]
except KeyError:
...
elif event.reason == dap.LoadedSourceEvent.changed:
self.sources[event.source.id] = event.source
self.listener.on_session_updated_sources(self)
# this is a bit of a weird case. Initialized will happen at some point in time
# it depends on when the debug adapter chooses it is ready for configuration information
# when it does happen we can then add all the breakpoints and complete the configuration
# NOTE: some adapters | |
"""Demo Kaplan-Meier surivival analysis.
MPyC demo based on work by <NAME>, partly covered in Section 6.2 of his paper
'Pinocchio-Based Adaptive zk-SNARKs and Secure/Correct Adaptive Function Evaluation',
AFRICACRYPT 2017, LNCS 10239, pp. 21-39, Springer (see https://eprint.iacr.org/2017/013
for the latest version).
The demo implements privacy-preserving survival analysis. The focus is on Kaplan-Meier survival
curves and the accompanying logrank test (see https://en.wikipedia.org/wiki/Logrank_test and
references therein). The Python package lifelines provides extensive support for survival
analysis, and includes several datasets.
The demo uses the following datasets, which are all included in lifelines.datasets, except for
the first one, which is from the R package KMsurv (file btrial.csv included in MPyC GitHub repo).
0=btrial: survival in months in breast cancer study (pos. vs neg. immunohistochemical response)
1=waltons: survival in days of fruit flies (miR-137 vs control group)
2=aml: no recurrence in weeks of acute myelogenous leukemia (maintenance chemo vs no maintenance)
3=lung: survival in days in lung cancer study (male vs female)
4=dd: survival in years of political regimes (democracy vs dictatorship)
5=stanford_heart_transplants: survival in days of heart patients (no transplant vs transplant)
6=kidney_transplant: survival in days after kidney transplant (male vs female)
The numbers 0-6 can be used with the command line option -i of the demo.
Each dataset is essentially a table with timestamped events. For the purpose of the demo, the
selected dataset is split between the m parties running the demo, assigning each ith row (event)
to party i, 0<=i<m. These subsets serve as the private (local) inputs held by each party.
To enable efficient secure union of these private datasets, the datasets are represented as follows.
First the global timeline 1..maxT is determined by securely taking the maximum of all time moments
(timelines are assumed to start at t=1). Then the events are mapped to the timeline 1..maxT by
recording the number of occurrences at each time t=1, ..., t=maxT. This is done separately for
the two types of events (e.g., for dataset 1=waltons, this is done separately for the miR-137
group and the control group).
The parties then secret-share their private datasets with all parties (using the mpc.input()
method). The secure union of the m private datasets is obtained by securely adding m numbers
for each time t=1, ..., t=maxT, thus representing the complete dataset secret-shared between
all parties.
The demo shows two plots to each party: (i) two Kaplan-Meier curves for its private dataset, and
(ii) two Kaplan-Meier curves for the complete dataset, however, aggregated over time intervals of
a given length referred to as the stride (command line option -s). The aggregated plot shows the
rough characteristics of the survival curves without giving away too much information about the
individual events.
The demo also performs a secure logrank test to compare the two (exact) Kaplan-Meier curves. The
secure logrank test is performed in two ways, both relying on MPyC's built-in secure fixed-point
arithmetic (setting the accuracy appropriately for each dataset). The relevant test statistic is
expressed as two sums of maxT terms each, many of which are 0, hence do not contribute to the
final result. To hide which terms are 0, however, we need to spend equal effort for all maxT terms.
Appropriately rewriting the terms, the effort is dominated by a single fixed-point division for
each time t=1, ..., t=maxT.
For most datasets, a much faster way is to exploit the information leaked anyway by the aggregated
plot. Per time interval we get an upper bound on the number of events, which is typically much
smaller than the stride. Therefore, it is favorable to first perform an oblivious compaction of
all the nonzero terms in each time interval. The test statistic is then computed as before,
however, using only one fixed-point division per candidate left.
Finally, the command line option --collapse can be used to aggregate days into weeks, for instance.
The events are collapsed immediately upon loading the dataset, effectively dividing maxT by 7.
The overall processing time is reduced accordingly, in exchange for a coarser result.
"""
import os
import logging
import argparse
from functools import reduce
import pandas as pd
import matplotlib.pyplot as plt
import lifelines.datasets
import lifelines.statistics
import lifelines.plotting
from lifelines import KaplanMeierFitter
from mpyc.runtime import mpc
def fit_plot(T1, T2, E1, E2, title, unit_of_time, label1, label2):
kmf1 = KaplanMeierFitter()
kmf2 = KaplanMeierFitter()
ax = kmf1.fit(T1, E1, label=label1, alpha=0.05).plot(show_censors=True)
ax = kmf2.fit(T2, E2, label=label2, alpha=0.05).plot(ax=ax, show_censors=True)
ax.set_title(title)
if unit_of_time:
plt.xlabel(f'timeline ({unit_of_time})')
lifelines.plotting.add_at_risk_counts(kmf1, kmf2, ax=ax, labels=None)
figname = ax.figure.canvas.get_window_title()
ax.figure.canvas.set_window_title(f'Party {mpc.pid} - {figname}')
return kmf1, kmf2
def events_to_table(maxT, T, E):
"""Create survival table, one entry for time j=1, ..., j=maxT."""
d = [0] * maxT
n = [0] * maxT
for t, e in zip(T, E):
j = round(t)
d[j-1] += e # observed events at time j
n[j-1] += 1-e # censored events at time j
N = sum(d) + sum(n)
for j in range(maxT):
n[j], N = N, N - (d[j] + n[j])
return d, n
def events_from_table(d, n):
T, E = [], []
maxT = len(d)
for j in range(maxT):
h = n[j+1] if j+1 < maxT else 0
T.extend([j+1] * (n[j] - h)) # total number of events at time j+1
E.extend([True] * d[j]) # observed events at time j+1
E.extend([False] * (n[j] - h - d[j])) # censored events at time j+1
return T, E
async def logrank_test(secfxp, d1, d2, n1, n2):
detot = secfxp(0) # sum_j d1_j - d_j n1_j / n_j
vtot = secfxp(0) # sum_j (d_j n1_j / n_j) (n2_j / n_j) (n_j - d_j) / (n_j - 1)
maxT = len(d1)
for j in range(maxT):
print(f'Progress ... {round(100*(j+1)/maxT)}%', end='\r')
d_j = d1[j] + d2[j]
n_j = n1[j] + n2[j]
a = d_j * n1[j]
b = n_j * (n_j-1)
c = 1/(n_j * b) # NB: using only one fixed-point division /
detot += d1[j] - a * b * c
vtot += a * n2[j] * (n_j - d_j) * c
await mpc.throttler(0.01)
chi = float(await mpc.output(detot**2 / vtot))
p = lifelines.statistics.chisq_test(chi, 1)
return lifelines.statistics.StatisticalResult(p_value=p, test_statistic=chi)
def aggregate(d, n, stride):
agg_d = [mpc.sum(d[start:start + stride]) for start in range(0, len(d), stride)]
agg_n = n[::stride]
return agg_d, agg_n
def agg_logrank_test(secfxp, d1, d2, n1, n2, agg_d1, agg_d2, stride):
candidates = []
maxT = len(d1)
for start in range(0, maxT, stride):
group = start // stride
n_observed_events = agg_d1[group] + agg_d2[group]
msn = min(stride, n_observed_events) # upper bound
stop = min(start + stride, maxT)
logging.info(f'Interval {group + 1} (time {start + 1} to {stop})'
f' # observed events = {n_observed_events}')
if msn == 0:
continue
oblivious_table = [[secfxp(0), secfxp(0), secfxp(1), secfxp(1)]] * msn
ix = [secfxp(0)] * msn
for j in range(start, stop):
is_active = d1[j] + d2[j] != 0
ix = mpc.if_else(is_active, [1-mpc.sum(ix)] + ix[:-1], ix)
select = mpc.scalar_mul(is_active, ix)
new = [d1[j], d2[j], n1[j], n2[j]]
for i in range(msn):
oblivious_table[i] = mpc.if_else(select[i], new, oblivious_table[i])
candidates.extend(oblivious_table)
return logrank_test(secfxp, *zip(*candidates))
async def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--dataset', type=int, metavar='I',
help=('dataset 0=btrial(default) 1=waltons 2=aml 3=lung 4=dd'
' 5=stanford_heart_transplants 6=kidney_transplant'))
parser.add_argument('-s', '--stride', type=int, metavar='S',
help='interval length for aggregated events')
parser.add_argument('-a', '--accuracy', type=int, metavar='A',
help='number of fractional bits')
parser.add_argument('--collapse', action='store_true',
default=False, help='days->weeks->month->years')
parser.add_argument('--print-tables', action='store_true',
default=False, help='print survival tables')
parser.add_argument('--plot-curves', action='store_true',
default=False, help='plot survival curves')
parser.set_defaults(dataset=0)
args = parser.parse_args()
settings = [('btrial.csv', 12, 28, 'months', 'time', 'death', 'im',
('-ve immunohistochemical response', '+ve immunohistochemical response'), (1, 2)),
('waltons', 10, 32, 'days', 'T', 'E', 'group',
('miR-137', 'control'), ('miR-137', 'control')),
('aml.csv', 16, 32, 'weeks', 'time', 'cens', 'group',
('Maintained', 'Not maintained'), (1, 2)),
('lung', 73, 32, 'days', 'time', 'status', 'sex',
('Male', 'Female'), (1, 2)),
('dd', 3, 48, 'years', 'duration', 'observed', 'democracy',
('Democracy', 'Non-democracy'), ('Democracy', 'Non-democracy')),
('stanford_heart_transplants', 90, 32, 'days', 'time', 'event', 'transplant',
('no transplant', 'transplant'), (0, 1)),
('kidney_transplant', 180, 40, 'days', 'time', 'death', 'sex',
('male', 'female'), (1, 0))]
(name, stride, accuracy, unit_of_time, times, events, groups,
(label1, label2), (value1, value2)) = settings[args.dataset]
if name.endswith('.csv'):
df = pd.read_csv(os.path.join('data', 'surv', name))
name = name[:-4]
else:
df = eval('lifelines.datasets.load_' + name)()
if name == 'lung':
df['status'] = df['status']-1 # 1-2 -> 0-1 = censored-death
elif name == 'stanford_heart_transplants':
df = df[(df['transplant'] == 1) | ~df['id'].isin(set(df[df['transplant'] == 1]['id']))]
df['time'] = round(df['stop'] - df['start'] + 0.5)
elif name == 'kidney_transplant':
df['sex'] = | |
[i["id"] for i in response.json["data"] if i["status"] == "pending"][-1]
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, award_id, self.tender_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
)
self.assertEqual(response.status, "200 OK")
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, award_id, self.bid_token),
{"data": test_complaint},
)
self.assertEqual(response.status, "201 Created")
complaint_id = response.json["data"]["id"]
owner_token = response.json["access"]["token"]
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(self.tender_id, award_id, complaint_id, owner_token),
{"data": {"status": "pending"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": "Can't update draft complaint into pending status",
"location": "body",
"name": "data",
}
],
)
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, award_id, complaint_id),
{"data": {"status": "pending"}},
)
self.assertEqual(response.status, "200 OK")
def create_tender_award_complaint(self):
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{
"data": test_complaint
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
self.assertEqual(complaint["author"]["name"], test_author["name"])
self.assertIn("id", complaint)
self.assertIn(complaint["id"], response.headers["Location"])
complaint_data = deepcopy(test_draft_complaint)
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": complaint_data},
status=201
)
self.assertEqual(response.status, "201 Created")
complaint = response.json["data"]
self.assertEqual(complaint["status"], "draft")
if get_now() > COMPLAINT_IDENTIFIER_REQUIRED_FROM:
test_draft_complaint_invalid = deepcopy(test_draft_complaint)
test_draft_complaint_invalid["author"]["identifier"]["legalName"] = ""
test_draft_complaint_invalid["author"]["identifier"]["id"] = ""
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": test_draft_complaint_invalid},
status=422
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"],
[
{
"description": {
"identifier": {
"id": ["This field is required."],
"legalName": ["This field is required."],
},
},
"location": "body",
"name": "author",
}
],
)
self.set_status("active.awarded")
response = self.app.get("/tenders/{}".format(self.tender_id))
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "active.awarded")
self.set_status("unsuccessful")
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": test_complaint},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't add complaint in current (unsuccessful) tender status"
)
def patch_tender_award_complaint(self):
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": test_draft_complaint},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
if get_now() < RELEASE_2020_04_19:
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=200,
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "cancelled")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
else:
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"],
"Can't update draft complaint into cancelled status")
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": test_draft_complaint},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], self.tender_token
),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Forbidden")
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"title": "claim title"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json["data"]["title"], "claim title")
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "claim"}},
status=422,
)
self.assertEqual(response.status, "422 Unprocessable Entity")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"],
["Value must be one of ['draft', 'pending', 'accepted', 'invalid', 'resolved', 'declined', 'cancelled', 'satisfied', 'stopping', 'stopped', 'mistaken']."]
)
if get_now() > COMPLAINT_IDENTIFIER_REQUIRED_FROM:
denied_patch_fields = {
"id": "new_id",
"scheme": "AE-ACCI",
"legalName": "new_legal_name",
}
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{
"data": {
"author": {"identifier": denied_patch_fields},
"title": "new_title",
},
},
)
self.assertEqual(response.status, "200 OK")
for key, value in denied_patch_fields.items():
self.assertNotEqual(response.json["data"]["author"]["identifier"].get(key, ""), value)
self.assertEqual(response.json["data"]["title"], "new_title")
if get_now() > RELEASE_2020_04_19:
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(
self.tender_id, self.award_id, complaint["id"]
),
{"data": {"status": "pending"}},
)
else:
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "pending"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "pending")
response = self.app.patch_json(
"/tenders/some_id/awards/some_id/complaints/some_id",
{"data": {"status": "resolved", "resolution": "resolution text"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "tender_id"}]
)
response = self.app.patch_json(
"/tenders/{}/awards/some_id/complaints/some_id".format(self.tender_id),
{"data": {"status": "resolved", "resolution": "resolution text"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "award_id"}]
)
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/some_id".format(self.tender_id, self.award_id),
{"data": {"status": "resolved", "resolution": "resolution text"}},
status=404,
)
self.assertEqual(response.status, "404 Not Found")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["status"], "error")
self.assertEqual(
response.json["errors"], [{"description": "Not Found", "location": "url", "name": "complaint_id"}]
)
if RELEASE_2020_04_19 > get_now():
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "stopping", "cancellationReason": "reason"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "stopping")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "cancelled", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"],
"Can't update complaint from stopping to cancelled status")
response = self.app.get(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, self.award_id, complaint["id"])
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "stopping")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
else:
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "stopping", "cancellationReason": "reason"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update complaint from pending to stopping status"
)
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{"data": test_draft_complaint},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
self.set_status("complete")
if get_now() > RELEASE_2020_04_19:
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(
self.tender_id, self.award_id, complaint["id"]
),
{"data": {"status": "pending"}},
status=403,
)
else:
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "pending"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update complaint in current (complete) tender status"
)
@patch("openprocurement.tender.core.views.complaint.RELEASE_2020_04_19", get_now() - timedelta(days=1))
def bot_patch_tender_award_complaint(self):
complaint_data = deepcopy(test_draft_complaint)
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(
self.tender_id, self.award_id, list(self.initial_bids_tokens.values())[0]
),
{"data": complaint_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "pending"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "pending")
@patch("openprocurement.tender.core.views.complaint.RELEASE_2020_04_19", get_now() + timedelta(days=1))
def bot_patch_tender_award_complaint_forbidden(self):
complaint_data = deepcopy(test_draft_complaint)
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(
self.tender_id, self.award_id, list(self.initial_bids_tokens.values())[0]
),
{"data": complaint_data},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}?acc_token={}".format(
self.tender_id, self.award_id, complaint["id"], owner_token
),
{"data": {"status": "pending"}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(
response.json["errors"][0]["description"], "Can't update complaint from draft to pending status"
)
def review_tender_award_complaint(self):
for status in ["invalid", "stopped", "declined", "satisfied"]:
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{
"data": test_complaint
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
now = get_now()
if RELEASE_2020_04_19 < now:
self.assertEqual(response.json["data"]["status"], "draft")
owner_token = response.json["access"]["token"]
with change_auth(self.app, ("Basic", ("bot", ""))):
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(
self.tender_id, self.award_id, complaint["id"]),
{"data": {"status": "pending"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "pending")
self.app.authorization = ("Basic", ("reviewer", ""))
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, self.award_id, complaint["id"]),
{"data": {"decision": "{} complaint".format(status), "rejectReasonDescription": "reject reason"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["decision"], "{} complaint".format(status))
self.assertEqual(response.json["data"]["rejectReasonDescription"], "reject reason")
if status in ["declined", "satisfied", "stopped"]:
data = {"status": "accepted"}
if RELEASE_2020_04_19 < now:
data.update({
"reviewDate": now.isoformat(),
"reviewPlace": "some",
})
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, self.award_id, complaint["id"]),
{"data": data},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "accepted")
if RELEASE_2020_04_19 < now:
self.assertEqual(response.json["data"]["reviewPlace"], "some")
self.assertEqual(response.json["data"]["reviewDate"], now.isoformat())
now = get_now()
data = {"decision": "accepted:{} complaint".format(status)}
if RELEASE_2020_04_19 > now:
data.update({
"reviewDate": now.isoformat(),
"reviewPlace": "some",
})
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, self.award_id, complaint["id"]),
{"data": data},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["decision"], "accepted:{} complaint".format(status))
if RELEASE_2020_04_19 > now:
self.assertEqual(response.json["data"]["reviewPlace"], "some")
self.assertEqual(response.json["data"]["reviewDate"], now.isoformat())
self.app.authorization = ("Basic", ("token", ""))
response = self.app.patch_json(
"/tenders/{}/awards/{}?acc_token={}".format(self.tender_id, self.award_id, self.tender_token),
{"data": {"status": "active", "qualified": True, "eligible": True}},
status=403,
)
self.assertEqual(response.status, "403 Forbidden")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["errors"][0]["description"], "Can't update award with accepted complaint")
self.app.authorization = ("Basic", ("reviewer", ""))
now = get_now()
data = {"status": status}
if RELEASE_2020_04_19 < now:
if status in ["invalid", "stopped"]:
data.update({
"rejectReason": "tenderCancelled",
"rejectReasonDescription": "reject reason description"
})
response = self.app.patch_json(
"/tenders/{}/awards/{}/complaints/{}".format(self.tender_id, self.award_id, complaint["id"]),
{"data": data},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], status)
def review_tender_award_stopping_complaint(self):
if RELEASE_2020_04_19 > get_now():
for status in ["stopped", "declined", "mistaken", "invalid", "satisfied"]:
self.app.authorization = ("Basic", ("broker", ""))
response = self.app.post_json(
"/tenders/{}/awards/{}/complaints?acc_token={}".format(self.tender_id, self.award_id, self.bid_token),
{
"data": test_complaint
},
)
self.assertEqual(response.status, "201 Created")
self.assertEqual(response.content_type, "application/json")
complaint = response.json["data"]
owner_token = response.json["access"]["token"]
url_patch_complaint = "/tenders/{}/awards/{}/complaints/{}".format(
self.tender_id, self.award_id, complaint["id"]
)
response = self.app.patch_json(
"{}?acc_token={}".format(url_patch_complaint, owner_token),
{"data": {"status": "stopping", "cancellationReason": "reason"}},
)
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], "stopping")
self.assertEqual(response.json["data"]["cancellationReason"], "reason")
self.app.authorization = ("Basic", ("reviewer", ""))
data = {"decision": "decision", "status": status}
if status in ["invalid", "stopped"]:
data.update({
"rejectReason": "tenderCancelled",
"rejectReasonDescription": "reject reason description"
})
response = self.app.patch_json(url_patch_complaint, {"data": data})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response.json["data"]["status"], status)
self.assertEqual(response.json["data"]["decision"], "decision")
else:
pass
# This test exist in patch_tender_complaint method
def review_tender_award_claim(self):
| |
used in combination with crop factors to
provide daily estimates of actual crop evaporation for many crop types.
Parameters:
- airtemp: (array of) daily average air temperatures [Celsius].
- rh: (array of) daily average relative humidity values [%].
- airpress: (array of) daily average air pressure data [Pa].
- Rs: (array of) average daily incoming solar radiation [J m-2 day-1].
Returns:
- Em: (array of) Makkink evaporation values [mm day-1].
Notes
-----
Meteorological measurements standard at 2 m above soil surface.
References
----------
<NAME> (1987). From Penman to Makkink, in Hooghart, C. (Ed.),
Evaporation and Weather, Proceedings and Information. Comm. Hydrological
Research TNO, The Hague. pp. 5-30.
Examples
--------
>>> Em(21.65,67.0,101300.,24200000.)
4.503830479197991
'''
# Test input array/value
airtemp,rh,airpress,Rs = meteolib._arraytest(airtemp,rh,airpress,Rs)
# Calculate Delta and gamma constants
DELTA = meteolib.Delta_calc(airtemp)
gamma = meteolib.gamma_calc(airtemp,rh,airpress)
Lambda = meteolib.L_calc(airtemp)
# calculate Em [mm/day]
Em = 0.65 * DELTA/(DELTA + gamma) * Rs / Lambda
return Em
def hargreaves(tmin, tmax, tmean, et_rad):
"""
Estimate reference evapotranspiration over grass (ETo) using the Hargreaves
equation.
Generally, when solar radiation data, relative humidity data
and/or wind speed data are missing, it is better to estimate them using
the functions available in this module, and then calculate ETo
the FAO Penman-Monteith equation. However, as an alternative, ETo can be
estimated using the Hargreaves ETo equation.
Based on equation 52 in Allen et al (1998).
:param tmin: Minimum daily temperature [deg C]
:param tmax: Maximum daily temperature [deg C]
:param tmean: Mean daily temperature [deg C]. If emasurements not
available it can be estimated as (*tmin* + *tmax*) / 2.
:param et_rad: Extraterrestrial radiation (Ra) [MJ m-2 day-1]. Can be
estimated using ``et_rad()``.
:return: Reference evapotranspiration over grass (ETo) [mm day-1]
:rtype: float
"""
# Note, multiplied by 0.408 to convert extraterrestrial radiation could
# be given in MJ m-2 day-1 rather than as equivalent evaporation in
# mm day-1
return 0.0023 * (tmean + 17.8) * (tmax - tmin) ** 0.5 * 0.408 * et_rad
def Ept(airtemp = scipy.array([]),\
rh = scipy.array([]),\
airpress = scipy.array([]),\
Rn = scipy.array([]),\
G = scipy.array([])):
'''
Function to calculate daily Priestley - Taylor evaporation:
.. math::
E_{pt} = \\alpha \\frac{R_n - G}{\\lambda} \\cdot \\frac{\\Delta}{\\Delta + \\gamma}
where alpha is set to 1.26.
Parameters:
- airtemp: (array of) daily average air temperatures [Celsius].
- rh: (array of) daily average relative humidity values [%].
- airpress: (array of) daily average air pressure data [Pa].
- Rn: (array of) average daily net radiation [J m-2 day-1].
- G: (array of) average daily soil heat flux [J m-2 day-1].
Returns:
- Ept: (array of) Priestley Taylor evaporation values [mm day-1].
Notes
-----
Meteorological parameters normally measured at 2 m above the surface.
References
----------
Priestley, C.H.B. and <NAME>, 1972. On the assessment of surface
heat flux and evaporation using large-scale parameters. Mon. Weather
Rev. 100:81-82.
Examples
--------
>>> Ept(21.65,67.0,101300.,18200000.,600000.)
6.349456116128078
'''
# Test input array/value
airtemp,rh,airpress,Rn,G = meteolib._arraytest(airtemp,rh,airpress,Rn,G)
# Calculate Delta and gamma constants
DELTA = meteolib.Delta_calc(airtemp)
gamma = meteolib.gamma_calc(airtemp,rh,airpress)
Lambda = meteolib.L_calc(airtemp)
# calculate Em [mm/day]
Ept= 1.26*DELTA/(DELTA+gamma)*(Rn-G)/Lambda
return Ept
def Epm(airtemp = scipy.array([]),\
rh = scipy.array([]),\
airpress = scipy.array([]),\
Rn = scipy.array([]),\
G = scipy.array([]),\
ra = scipy.array([]),\
rs = scipy.array([])):
'''
Function to calculate the Penman Monteith evaporation.
.. math::
E_{pm} = \\frac{\\Delta \\cdot (R_n-G)+\\rho \\cdot c_p \\cdot (e_s-e_a)/r_a}{\\lambda \\cdot (\\Delta + \\gamma \\cdot (1+\\frac{r_s}{r_a}))}
The function can be used with different time intervals, such as commonly
used hourly or daily time intervals are used. When a plant canopy is wet,
the surface resistance (rs) becomes zero (stomatal resistance irrelevant,
as evaporation is directly from wet leaf surface). Function ra() in this
module can be used to calculate the aerodynamic resistance (ra) from wind
speed and height parameters.
Parameters:
- airtemp: (array of) daily average air temperatures [Celsius].
- rh: (array of) daily average relative humidity values [%].
- airpress: (array of) daily average air pressure data [hPa].
- Rn: (array of) net radiation input over time interval t [J t-1].
- G: (array of) soil heat flux input over time interval t [J t-1].
- ra: aerodynamic resistance [s m-1].
- rs: surface resistance [s m-1].
Returns:
- Epm: (array of) Penman Monteith evaporation values [mm t-1].
References
----------
<NAME> (1965). Evaporation and environment. Symp. Soc. Exp. Biol.
19: 205-224.
Examples
--------
>>> Epm(21.67,67.0,1013.0,14100000.,500000.,104.,70.)
3.243341146049407
'''
# Test input array/value
airtemp,rh,airpress,Rn,G,ra,rs = meteolib._arraytest(airtemp,rh,airpress,Rn,G,ra,rs)
# Calculate Delta, gamma and lambda
DELTA = meteolib.Delta_calc(airtemp)/100. # [hPa/K]
airpress=airpress*100. # [Pa]
gamma = meteolib.gamma_calc(airtemp,rh,airpress)/100. # [hPa/K]
Lambda = meteolib.L_calc(airtemp) # [J/kg]
rho = meteolib.rho_calc(airtemp,rh,airpress) # [kg m-3]
cp = meteolib.cp_calc(airtemp,rh,airpress) # [J kg-1 K-1]
# Calculate saturated and actual water vapour pressures
es = meteolib.es_calc(airtemp)/100. # [hPa]
ea = meteolib.ea_calc(airtemp,rh)/100. # [hPa]
# Calculate Epm
Epm = ((DELTA*(Rn-G)+rho*cp*(es-ea)/ra)/(DELTA+gamma*(1.+rs/ra)))/Lambda
return Epm # actual ET in mm
def tvardry(rho = scipy.array([]),\
cp = scipy.array([]),\
T = scipy.array([]),\
sigma_t = scipy.array([]),\
z= float(),\
d= 0.0,
C1= 2.9,
C2= 28.4):
'''Function to calculate the sensible heat flux from high
frequency temperature measurements and their standard deviation:
.. math::
H= \\rho c_p \\left(k g (z-d) \\frac{C_2}{C_1^3}\\right)^\\frac{1}{2}\
\\left( \\frac{\\sigma_T^3}{T}\\right)^\\frac{1}{2}
Parameters:
- rho: (array of) air density values [kg m-3].
- cp: (array of) specific heat at constant temperature values [J kg-1 K-1].
- T: (array of) temperature data [Celsius].
- sigma_t: (array of) standard deviation of temperature data [Celsius].
- z: height [m] above the surface of the temperature measurement.
- d: displacement height due to vegetation, default set to zero [m].
- C1: Constant, default set to 2.9 [-] for unstable conditions\
(de Bruin et al., 1993).
- C2: Constant, default set to 28.4 [-] for unstable conditions\
(de Bruin et al., 1993).
Returns:
- H: (array of) sensible heat flux [W m-2].
Notes
-----
This function holds only for free convective conditions when C2*z/L >>1,
where L is the Obhukov length.
References
----------
- <NAME> and <NAME> and <NAME> (1993). A \
verification of some methods to determine the fluxes of momentum, sensible \
heat andwWater vapour using standard seviation and structure parameter of \
scalar meteorological quantities. Boundary-Layer Meteorology 63(3): 231-257.
- <NAME> (1972), The indirect determination of stability, heat and\
momentum fluxes in the atmosphere boundary layer from simple scalar\
variables during dry unstable conditions, Journal of Applied Meteorology\
11: 783-792.
- <NAME>, <NAME>, <NAME>, <NAME> and L.A.\
Bruijnzeel. The temperature variance method: a powerful tool in the\
estimation of actual evaporation rates. In <NAME>, editor,\
Hydrology of Warm Humid Regions, Proc. of the Yokohama Symp., IAHS\
Publication No. 216, pages 251-260, July 1993.
Examples
--------
>>> tvardry(1.25,1035.0,25.3,0.25,3.0)
34.658669290185287
>>> displ_len=0.25
>>> tvardry(1.25,1035.0,25.3,0.25,3.0,d=displ_len)
33.183149497185511
>>> tvardry(1.25,1035.0,25.3,0.25,3.0,d=displ_len,C2=30)
34.10507908798597
'''
# Test input array/value
rho,cp,T,sigma_t = meteolib._arraytest(rho,cp,T,sigma_t)
# Define constants
k = 0.40 # von Karman constant
g = 9.81 # acceleration due to gravity [m/s^2]
#C1 = 2.9 # De Bruin et al., 1992
#C2 = 28.4 # De Bruin et al., 1992
# L= Obhukov-length [m]
#Free Convection Limit
H = rho * cp * scipy.sqrt((sigma_t/C1)**3 * k * g * (z-d) / (T+273.15) * C2)
#else:
# including stability correction
#zoverL = z/L
#tvardry = rho * cp * scipy.sqrt((sigma_t/C1)**3 * k*g*(z-d) / (T+273.15) *\
# (1-C2*z/L)/(-1*z/L))
#Check if we get complex numbers (square root of negative value) and remove
#I = find(zoL >= 0 | H.imag != 0);
#H(I) = scipy.ones(size(I))*NaN;
return H # sensible heat flux
def gash79(Pg=scipy.array([]),
ER=float,
S=float,
St=float,
p=float,
pt=float):
'''
Function to calculate precipitation interception loss from daily
precipitation values and and vegetation parameters.
Parameters:
- Pg: daily rainfall data [mm].
- ER: evaporation | |
m.name:
new_metal = m
return new_metal
# Loads any custom made metals the user has previously created
# Loads it from data/custom_metals.dat - a binary file
# Parameter: drop - A DropDown object to add the metals to
def load_custom_metals(drop):
# Tries to open the file, if it can't, catches exception and tells user
i = 0
try:
f = open("data/custom_metals.dat", "rb")
# Once the file is open, keeps trying to read it until it reaches the end of the file
while 1:
try:
# Uses the pickle module to deserialised the Metal object in the file
new_metal = pickle.load(f)
# Adds the metal's name to the MetalNames list
Metal.MetalNames.append(new_metal.name)
# Adds the custom metal to the drop-down list
drop = add_new_metal(new_metal, drop)
except (EOFError, pickle.UnpicklingError):
break
# Closes the file to prevent using unnecessary memory
f.close()
except FileNotFoundError:
print("ERROR: Cannot find data/custom_metals.dat")
# Returns the modified DropDown item
return drop
# Adds a new metal object to the MetalList and updates the dropdown box that stores the metals
def add_new_metal(new_metal, drop):
Metal.MetalList.append(new_metal)
drop.data = Metal.MetalNames
drop.options = drop.data
return drop
# Calculates the alpha value for the colour of the light
# Takes in a wavelength between 100 and 850
# And an intensity between 0 and 100
def set_light_alpha(wavelength, intensity):
# wMod is the modifier to the alpha that the wavelength causes
w_mod = 1
wavelength = wavelength * math.pow(10, 9)
# If no light, fully transparent
if intensity == 0:
return 0
else:
# If the wavelength is between 350 and 300 nm, wMod decreases as wavelength does
if wavelength < 350:
if wavelength > 300:
w_mod = 1 - ((350 - wavelength) / 50)
else:
# If wavelength below 300nm it's fully transparent as its below wavelength of visible light
w_mod = 0
# If the wavelength is between 750 and 800nm, wMod decreases as wavelength increases
elif wavelength > 750:
if wavelength < 800:
w_mod = (800 - wavelength) / 50
else:
# If wavelength is above 800nm, it's fully transparent as its above wavelength of visible light
w_mod = 0
# alpha is capped at 128 (half of opaque value). Is proportional to intensity and wMod
alpha = 100 * (intensity / 100) * w_mod
# Rounds alpha to integer
alpha = round(alpha)
return alpha
# Used in setting the colour of the light and photons
# Uses the tuples min_wavelength and max_wavelength
# These tuples are wavelength boundaries for specific colours
# Given a wavelength, finds the upper and lower bounds of it to find what colour it is
def set_min_max(wavelength):
min_wavelength = 0
max_wavelength = 0
for i in range(len(wlValues) - 1):
if wavelength <= wlValues[i]:
min_wavelength = wlValues[i]
max_wavelength = wlValues[i+1]
return min_wavelength, max_wavelength
# Returns an RGB colour tuple given a wavelength
# Finds the upper and lower bounds of the colour the wavelength causes
# Sets the colour proportionally to how far the wavelength value is between the boundaries
# For example: if the wavelength is half way between the boundary between yellow and red
# The colour is half-way between yellow and orange
def set_light_colour(wavelength):
wavelength = wavelength * math.pow(10, 9)
min_wavelength, max_wavelength = set_min_max(wavelength)
# In this system, there are 3 colour variables, R G and B
# One will always by 0, 1 will always be 255 (except for violet)
# and the other will be var_colour
# var_colour is highest when the wavelength is at the upper boundary and at lowest at lower boundary
var_colour = round(((wavelength - min_wavelength) / (max_wavelength - min_wavelength)) * 255)
r = 0
g = 0
b = 0
# If ir to red
if min_wavelength == wlValues[0]:
r = 255
# If red to yellow
elif min_wavelength == wlValues[1]:
r = 255
g = var_colour
# If yellow to green
elif min_wavelength == wlValues[2]:
r = 255 - var_colour
g = 255
# If green to cyan
elif min_wavelength == wlValues[3]:
g = 255
b = var_colour
# If cyan to blue
elif min_wavelength == wlValues[4]:
g = 255 - var_colour
b = 255
# If blue to purple
elif min_wavelength == wlValues[5]:
r = round((var_colour / 255) * 180)
b = 255
# If purple to UV
elif min_wavelength == wlValues[6]:
r = 180
b = 255
return r, g, b
# Loads from settings.dat
# Reads a boolean from the file that shows individual photons when True
def load_settings():
# Checkbox holds value of the boolean in the binary file
# Set to True by default
checkbox = True
try:
# Opens the settings.dsy file, if it can't tells the user
f = open("data/settings.dat", "rb")
# Deserialises the boolean value saved to the file
checkbox = pickle.load(f)
# Closes the file to prevent unneeded memory use
f.close()
except(EOFError, pickle.UnpicklingError):
print("Error reading settings.dat")
# If file can't be read, checkbox is set to True by default
except(FileNotFoundError):
print("settings.dat is missing, creasing a new one")
f = open("data/settings.dat", "wb")
# Serialises boolean value of True as a default
pickle.dump(True, f)
# Closes file to prevent unnecessary memory usage
f.close()
return checkbox
# Deletes the contents of file f
def delete_file(f):
f.seek(0)
f.truncate()
# The main game code is run here
def game_loop():
# Creating the loop boolean, this is false until the game exits
game_exit = False
# Starting value definitions
wavelength = 0
intensity = 0
# Appends default metals to the metal list
Metal.MetalList.append(Metal("Sodium", 3.65 * math.pow(10, -19), (100, 100, 100)))
Metal.MetalList.append(Metal("Copper", 7.53 * math.pow(10, -19), (145, 88, 4)))
Metal.MetalList.append(Metal("Zinc", 6.89 * math.pow(10, -19), (185, 195, 185)))
Metal.MetalList.append(Metal("Magnesium", 5.90 * math.pow(10, -19), (205, 205, 205)))
# Sets starting metal to the first one in the list (sodium)
current_metal = Metal.MetalList[0]
# Defines the fonts that the program will use for drawing text
my_font = pygame.font.Font(None, 32)
small_font = pygame.font.Font(None, 25)
# Text objects used to describe the different GUI elements
wave_txt = my_font.render("Wavelength: ", 1, (0, 0, 0))
wave_txt2 = my_font.render("nm", 1, (0, 0, 0))
intensity_txt = my_font.render("Intensity: ", 1, black)
intensity_txt2 = my_font.render("%", 1, black)
metal_txt = my_font.render("Metal: ", 1, black)
stop_txt = my_font.render("Stopping Voltage: ", 1, black)
stop_txt2 = my_font.render("V", 1, black)
# Rectangles on left and right to represent metals
left_rect = MetalRect(10, 400, 50, 150)
right_rect = MetalRect(740, 400, 50, 150)
# Wavelength Slider bar creation
wv_slider = dan_gui.Slider(150, 5, 200, 25, small_font, (100, 850))
# Setting default wavelength
wavelength = wv_slider.get_pos()
# Intensity slider bar creation
int_slider = dan_gui.Slider(150, 40, 200, 25, small_font, (0, 100))
# Setting default intensity
intensity = int_slider.get_pos()
# Stopping voltage slider creation
stop_slider = dan_gui.Slider(300, 550, 200, 25, small_font, (-3, 3), 0.5, 1)
stop_voltage = stop_slider.get_pos()
# Dropdown menu creation
drop = dan_gui.DropDown(90, 90, 120, 25, Metal.MetalNames, my_font)
# Loads custom metals from the file
drop = load_custom_metals(drop)
# 'Create new metal' button creation
btn = dan_gui.Button(250, 90, my_font, "Create New Metal")
# Adding electron speed text to screen
speed_obj = my_font.render("Average speed: 0 ms^-1", 1, (0, 0, 0))
# Settings button
settings_btn = dan_gui.ImageButton(730, 10, my_font, "options")
# Adding buttons to save and load settings
save_button = dan_gui.Button(270, 130, my_font, "Save Values")
load_button = dan_gui.Button(270, 160, my_font, "Load Values")
# Creating surface for transparent light texture
surf = pygame.Surface((display_width, display_height), pygame.SRCALPHA)
surf.set_alpha(set_light_alpha(wavelength, intensity))
# Image for the lamp
lamp_img = pygame.image.load("img/lamp.png")
# Creating menu
cnm_menu = dan_gui.Menu(200, 200, 450, 280, my_font, "Create New Metal")
# Creating text box to add to menu
menu_name_txt = dan_gui.Textbox(80, 5, 200, 25, my_font, ["|"], 15)
# Tuple of values containing each letter of the alphabet
# Used for the 'blocked characters' for a text entry box, also contains symbols
alphabet = ("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u",
"v", "w", "x", "y", "z", "!", "\"", "£", "$", "%", "^", "&", "*", "(", ")", "_", "-", "+", "=", "?", "\\", "|", "<", ">", "{", "}", "[", | |
#! /use/env/bin python
import os
import copy
from collections import OrderedDict
from CP2K_kit.tools import data_op
from CP2K_kit.tools import log_info
from CP2K_kit.tools import traj_info
def check_step(init_step, end_step, start_frame_id, end_frame_id):
'''
check_step: check the input step
Args:
init_step : int
init_step is the initial frame id.
end_step : int
end_step is the endding frame id.
start_frame_id: int
start_frame_id is the starting frame id in the trajectory file.
end_frame_id: int
end_frame_id is the endding frame id in the trajectory file.
Returns :
none
'''
if ( init_step > end_step ):
log_info.log_error('Input error: the endding step is less than initial step, please check or reset init_step and end_step')
exit()
if ( init_step < start_frame_id ):
log_info.log_error('Input error: the initial step is less than initial step in trajectory, please check or reset init_step')
exit()
if ( end_step > end_frame_id ):
log_info.log_error('Input error: the endding step is large than endding step in trajectory, please check or reset end_step')
exit()
def check_center_inp(center_dic):
'''
check_center_inp: check the input file of center.
Args:
center_dic: dictionary
center_dic contains the parameter for center.
Returns:
new_center_dic: dictionary
new_center_dic is the revised center_dic
'''
#As we use pop, so we copy the dic.
new_center_dic = copy.deepcopy(center_dic)
if ( 'center_type' in new_center_dic.keys() ):
center_type = new_center_dic['center_type']
if ( center_type == 'center_box' or center_type == 'center_image' ):
pass
else:
log_info.log_error('Input error: only center_box and center_image are supported, please check or set analyze/center/type ')
exit()
else:
log_info.log_error('Input error: no center type, please set analyze/center/type')
exit()
if ( new_center_dic['center_type'] == 'center_image' ):
if ( 'center_atom_id' in new_center_dic.keys() ):
center_id = new_center_dic['center_atom_id']
if ( data_op.eval_str(center_id) == 1 ):
new_center_dic['center_atom_id'] = int(center_id)
else:
log_info.log_error('Input error: center atom id should be integer, please check or set analyze/center/center_id')
exit()
else:
log_info.log_error('Input error: no center atom id for center_image, please set analyze/center/center_id')
exit()
if ( 'traj_coord_file' in new_center_dic.keys() ):
traj_coord_file = new_center_dic['traj_coord_file']
if ( os.path.exists(os.path.abspath(traj_coord_file)) ):
new_center_dic['traj_coord_file'] = os.path.abspath(traj_coord_file)
else:
log_info.log_error('%s file does not exist' %(traj_coord_file))
exit()
else:
log_info.log_error('Input error: no coordination trajectory file, please set analyze/center/traj_coord_file')
exit()
if ( 'box' in new_center_dic.keys() ):
A_exist = 'A' in new_center_dic['box'].keys()
B_exist = 'B' in new_center_dic['box'].keys()
C_exist = 'C' in new_center_dic['box'].keys()
else:
log_info.log_error('Input error: no box, please set analyze/center/box')
exit()
if ( A_exist and B_exist and C_exist ):
box_A = new_center_dic['box']['A']
box_B = new_center_dic['box']['B']
box_C = new_center_dic['box']['C']
else:
log_info.log_error('Input error: box setting error, please check analyze/center/box')
exit()
if ( len(box_A) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_A) ):
new_center_dic['box']['A'] = [float(x) for x in box_A]
else:
log_info.log_error('Input error: A vector of box wrong, please check analyze/center/box/A')
exit()
if ( len(box_B) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_B) ):
new_center_dic['box']['B'] = [float(x) for x in box_B]
else:
log_info.log_error('Input error: B vector of box wrong, please check analyze/center/box/B')
exit()
if ( len(box_C) == 3 and all(data_op.eval_str(i) == 1 or data_op.eval_str(i) == 2 for i in box_C) ):
new_center_dic['box']['C'] = [float(x) for x in box_C]
else:
log_info.log_error('Input error: C vector of box wrong, please check analyze/center/box/C')
exit()
if ( 'connect0' in new_center_dic.keys() ):
group_atom = []
atom_id = []
group_num = 0
for i in new_center_dic['connect0'].keys():
if ( 'group' in i ):
group_num = group_num+1
if ( 'atom_id' in new_center_dic['connect0'][i].keys() ):
atom_id_i = data_op.get_id_list(new_center_dic['connect0'][i]['atom_id'])
atom_id.append(atom_id_i)
else:
log_info.log_error('Input error: no atom id, please set analyze/center/connect/group/atom_id')
exit()
if ( 'group_atom' in new_center_dic['connect0'][i].keys() ):
group_atom_i = new_center_dic['connect0'][i]['group_atom']
if ( isinstance(group_atom_i, list)):
if ( all(data_op.eval_str(x) == 0 for x in group_atom_i) ):
group_atom.append(group_atom_i)
else:
log_info.log_error('Input error: group atoms wrong, please check or reset analyze/center/connect/group/group_atom')
exit()
else:
group_atom.append([group_atom_i])
else:
log_info.log_error('Input error: no group atoms, please set analyze/center/connect/group/group_atom')
exit()
for i in center_dic['connect0'].keys():
new_center_dic['connect0'].pop(i)
new_center_dic['connect0']['atom_id'] = atom_id
new_center_dic['connect0']['group_atom'] = group_atom
return new_center_dic
def check_diffusion_inp(diffusion_dic):
'''
check_diffusion_inp: check the input of diffusion.
Args:
diffusion_dic: dictionary
diffusion_dic contains parameters for diffusion
Returns:
diffusion_dic: dictionary
diffusion_dic is the revised diffusion_dic
'''
#new_diffusion_dic = copy.deepcopy(diffusion_dic)
if ( 'method' in diffusion_dic.keys() ):
method = diffusion_dic['method']
if ( method == 'einstein_sum' or method == 'green_kubo' ):
pass
else:
log_info.log_error('Input error: only einstein_sum or green_kubo are supported for diffusion calculation')
exit()
else:
diffusion_dic['method'] = 'einstein_sum'
method = diffusion_dic['method']
if ( method == 'einstein_sum' ):
if ( 'traj_coord_file' in diffusion_dic.keys() ):
traj_coord_file = diffusion_dic['traj_coord_file']
if ( os.path.exists(os.path.abspath(traj_coord_file)) ):
diffusion_dic['traj_coord_file'] = os.path.abspath(traj_coord_file)
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(traj_coord_file, 'coord_xyz')
else:
log_info.log_error('Input error: %s file does not exist' %(traj_coord_file))
exit()
else:
log_info.log_error('Input error: no coordination trajectory file, please set analyze/diffusion/traj_coord_file')
exit()
if ( 'remove_com' in diffusion_dic.keys() ):
remove_com = data_op.str_to_bool(diffusion_dic['remove_com'])
if ( isinstance(remove_com, bool) ):
diffusion_dic['remove_com'] = remove_com
else:
log_info.log_error('Input error: remove_com must be bool, please check or reset analyze/diffusion/remove_com')
else:
diffusion_dic['remove_com'] = True
elif ( method == 'green_kubo' ):
if ( 'traj_vel_file' in diffusion_dic.keys() ):
traj_vel_file = diffusion_dic['traj_vel_file']
if ( os.path.exists(os.path.abspath(traj_vel_file)) ):
diffusion_dic['traj_vel_file'] = os.path.abspath(traj_vel_file)
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(traj_vel_file, 'vel')
else:
log_info.log_error('Input error: %s file does not exist' %(traj_vel_file))
exit()
else:
log_info.log_error('Input error: no velocity trajectory file, please set analyze/diffusion/traj_vel_file')
exit()
if ( 'atom_id' in diffusion_dic.keys() ):
atom_id = data_op.get_id_list(diffusion_dic['atom_id'])
diffusion_dic['atom_id'] = atom_id
else:
log_info.log_error('Input error: no atom_id, please set analyze/diffusion/atom_id')
exit()
if ( 'init_step' in diffusion_dic.keys() ):
init_step = diffusion_dic['init_step']
if ( data_op.eval_str(init_step) == 1 ):
diffusion_dic['init_step'] = int(init_step)
else:
log_info.log_error('Input error: init_step wrong, please check or set analyze/diffusion/init_step')
exit()
else:
diffusion_dic['init_step'] = start_frame_id
if ( 'end_step' in diffusion_dic.keys() ):
end_step = diffusion_dic['end_step']
if ( data_op.eval_str(end_step) == 1 ):
diffusion_dic['end_step'] = int(end_step)
else:
log_info.log_error('Input error: end_step wrong, please check or set analyze/diffusion/end_step')
exit()
else:
diffusion_dic['end_step'] = end_frame_id
init_step = diffusion_dic['init_step']
end_step = diffusion_dic['end_step']
check_step(init_step, end_step, start_frame_id, end_frame_id)
if ( 'max_frame_corr' in diffusion_dic.keys() ):
max_frame_corr = diffusion_dic['max_frame_corr']
if ( data_op.eval_str(max_frame_corr) == 1 ):
if ( int(max_frame_corr) > int(frames_num/2) ):
log_info.log_error('Input error: max_frame_corr should be less than frames_num/2, please check or reset analyze/diffusion/max_frame_corr')
exit()
else:
diffusion_dic['max_frame_corr'] = int(max_frame_corr)
else:
log_info.log_error('Input error: max_frame_corr should be integer, please check or set analyze/diffusion/max_frame_corr')
exit()
else:
diffusion_dic['max_frame_corr'] = int(frames_num/2)
return diffusion_dic
def check_file_trans_inp(file_trans_dic):
'''
check_file_trans_inp: check the input of file_trans.
Args:
file_trans_dic: dictionary
file_trans_dic contains parameters for file_trans.
Returns:
file_trans_dic: dictionary
file_trans_dic is the revised file_trans_dic
'''
if ( 'transd_file' in file_trans_dic.keys() ):
transd_file = file_trans_dic['transd_file']
if ( os.path.exists(os.path.abspath(transd_file)) ):
file_trans_dic['transd_file'] = os.path.abspath(transd_file)
else:
log_info.log_error('Input error: %s does not exist' %(transd_file))
exit()
else:
log_info.log_error('Input error: no transfered file, please set analzye/file_trans/transd_file')
exit()
if ( 'trans_type' in file_trans_dic.keys() ):
trans_type = file_trans_dic['trans_type']
if ( trans_type == 'pdb2xyz' or trans_type == 'xyz2pdb' ):
pass
else:
log_info.log_error('Input error: only pbd2xyz and xyz2pdb are supported, please check or reset analyze/file_trans/trans_type')
exit()
else:
log_info.log_error('Input error: no transfer type, please set analyze/file_trans/trans_type')
exit()
return file_trans_dic
def check_geometry_inp(geometry_dic):
'''
check_geometry_inp: check the input of geometry.
Args:
geometry_dic: dictionary
geometry_dic contains parameters for geometry.
Returns:
geometry_dic: dictionary
geometry_dic is the revised geometry_dic
'''
if ( 'coord_num' in geometry_dic ):
coord_num_dic = geometry_dic['coord_num']
if ( 'traj_coord_file' in coord_num_dic.keys() ):
traj_coord_file = coord_num_dic['traj_coord_file']
if ( os.path.exists(os.path.abspath(traj_coord_file)) ):
geometry_dic['coord_num']['traj_coord_file'] = os.path.abspath(traj_coord_file)
atoms_num, pre_base_block, end_base_block, pre_base, frames_num, each, start_frame_id, end_frame_id, time_step = \
traj_info.get_traj_info(os.path.abspath(traj_coord_file), 'coord_xyz')
else:
log_info.log_error('Input error: %s does not exist' %(traj_coord_file))
exit()
else:
log_info.log_error('Input error: no coordination trajectory file, please set analyze/geometry/coord_num/traj_coord_file')
exit()
if ( 'init_step' in coord_num_dic.keys() ):
init_step = coord_num_dic['init_step']
if ( data_op.eval_str(init_step) == 1 ):
geometry_dic['coord_num']['init_step'] = int(init_step)
else:
log_info.log_error('Input error: init_step should be integer, please check or reset analyze/geometry/coord_num/init_step')
exit()
else:
geometry_dic['coord_num']['init_step'] = start_frame_id
if ( 'end_step' in coord_num_dic.keys() ):
end_step = coord_num_dic['end_step']
if ( data_op.eval_str(end_step) == 1 ):
geometry_dic['coord_num']['end_step'] = int(end_step)
else:
log_info.log_error('Input error: end_step should be integer, please check or reset analyze/geometry/coord_num/end_step')
exit()
else:
geometry_dic['coord_num']['end_step'] = end_frame_id
init_step = geometry_dic['coord_num']['init_step']
end_step = geometry_dic['coord_num']['end_step']
check_step(init_step, end_step, start_frame_id, end_frame_id)
if ( 'r_cut' in coord_num_dic.keys() ):
r_cut = coord_num_dic['r_cut']
if ( data_op.eval_str(r_cut) == 1 or data_op.eval_str(r_cut) ==2 ):
geometry_dic['coord_num']['r_cut'] = float(r_cut)
else:
log_info.log_error('Input error: r_cut must be float, please check or reset analyze/geometry/coord_num/r_cut')
else:
geometry_dic['coord_num']['r_cut'] = 6.0
if ( 'box' in coord_num_dic.keys() ):
A_exist = 'A' in | |
of type "logical" in section
"Utilities::oct-conductivity_spectrum"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConductivityFromForces'))
x_octopus_parserlog_ConductivitySpectrumTimeStepFactor = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConductivitySpectrumTimeStepFactor" of type "integer" in
section "Utilities::oct-conductivity_spectrum"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConductivitySpectrumTimeStepFactor'))
x_octopus_parserlog_ConvAbsDens = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvAbsDens" of type "float" in section
"SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvAbsDens'))
x_octopus_parserlog_ConvAbsEv = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvAbsEv" of type "float" in section "SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvAbsEv'))
x_octopus_parserlog_ConvEigenError = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "ConvEigenError" of type "logical" in section
"SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvEigenError'))
x_octopus_parserlog_ConvEnergy = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvEnergy" of type "float" in section
"SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvEnergy'))
x_octopus_parserlog_ConvertEnd = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertEnd" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertEnd'))
x_octopus_parserlog_ConvertEnergyMax = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvertEnergyMax" of type "float" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertEnergyMax'))
x_octopus_parserlog_ConvertEnergyMin = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvertEnergyMin" of type "float" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertEnergyMin'))
x_octopus_parserlog_ConvertEnergyStep = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvertEnergyStep" of type "float" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertEnergyStep'))
x_octopus_parserlog_ConvertFilename = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertFilename" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertFilename'))
x_octopus_parserlog_ConvertFolder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertFolder" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertFolder'))
x_octopus_parserlog_ConvertFTMethod = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertFTMethod" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertFTMethod'))
x_octopus_parserlog_ConvertHow = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertHow" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertHow'))
x_octopus_parserlog_ConvertIterateFolder = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "ConvertIterateFolder" of type "logical" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertIterateFolder'))
x_octopus_parserlog_ConvertOutputFilename = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertOutputFilename" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertOutputFilename'))
x_octopus_parserlog_ConvertOutputFolder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertOutputFolder" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertOutputFolder'))
x_octopus_parserlog_ConvertReadSize = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertReadSize" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertReadSize'))
x_octopus_parserlog_ConvertScalarOperation = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertScalarOperation" of type "block" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertScalarOperation'))
x_octopus_parserlog_ConvertStart = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertStart" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertStart'))
x_octopus_parserlog_ConvertStep = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertStep" of type "integer" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertStep'))
x_octopus_parserlog_ConvertSubtractFilename = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertSubtractFilename" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertSubtractFilename'))
x_octopus_parserlog_ConvertSubtractFolder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "ConvertSubtractFolder" of type "string" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertSubtractFolder'))
x_octopus_parserlog_ConvertSubtract = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "ConvertSubtract" of type "logical" in section
"Utilities::oct-convert"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvertSubtract'))
x_octopus_parserlog_ConvForce = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvForce" of type "float" in section "SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvForce'))
x_octopus_parserlog_ConvRelDens = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvRelDens" of type "float" in section
"SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvRelDens'))
x_octopus_parserlog_ConvRelEv = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "ConvRelEv" of type "float" in section "SCF::Convergence"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_ConvRelEv'))
x_octopus_parserlog_Coordinates = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "Coordinates" of type "block" in section
"System::Coordinates"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_Coordinates'))
x_octopus_parserlog_CurrentDensity = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "CurrentDensity" of type "integer" in section
"Hamiltonian"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurrentDensity'))
x_octopus_parserlog_CurrentThroughPlane = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "CurrentThroughPlane" of type "block" in section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurrentThroughPlane'))
x_octopus_parserlog_CurvGygiAlpha = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvGygiAlpha" of type "float" in section
"Mesh::Curvilinear::Gygi"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvGygiAlpha'))
x_octopus_parserlog_CurvGygiA = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvGygiA" of type "float" in section
"Mesh::Curvilinear::Gygi"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvGygiA'))
x_octopus_parserlog_CurvGygiBeta = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvGygiBeta" of type "float" in section
"Mesh::Curvilinear::Gygi"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvGygiBeta'))
x_octopus_parserlog_CurvMethod = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "CurvMethod" of type "integer" in section
"Mesh::Curvilinear"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvMethod'))
x_octopus_parserlog_CurvModineJBar = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvModineJBar" of type "float" in section
"Mesh::Curvilinear::Modine"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvModineJBar'))
x_octopus_parserlog_CurvModineJlocal = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvModineJlocal" of type "float" in section
"Mesh::Curvilinear::Modine"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvModineJlocal'))
x_octopus_parserlog_CurvModineJrange = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvModineJrange" of type "float" in section
"Mesh::Curvilinear::Modine"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvModineJrange'))
x_octopus_parserlog_CurvModineXBar = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "CurvModineXBar" of type "float" in section
"Mesh::Curvilinear::Modine"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_CurvModineXBar'))
x_octopus_parserlog_Debug = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "Debug" of type "flag" in section "Execution::Debug"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_Debug'))
x_octopus_parserlog_DegeneracyThreshold = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DegeneracyThreshold" of type "float" in section "States"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DegeneracyThreshold'))
x_octopus_parserlog_DeltaEFMM = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DeltaEFMM" of type "float" in section
"Hamiltonian::Poisson"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DeltaEFMM'))
x_octopus_parserlog_DensitytoCalc = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DensitytoCalc" of type "block" in section
"States::ModelMB"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DensitytoCalc'))
x_octopus_parserlog_DerivativesOrder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DerivativesOrder" of type "integer" in section
"Mesh::Derivatives"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DerivativesOrder'))
x_octopus_parserlog_DerivativesStencil = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DerivativesStencil" of type "integer" in section
"Mesh::Derivatives"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DerivativesStencil'))
x_octopus_parserlog_DescribeParticlesModelmb = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DescribeParticlesModelmb" of type "block" in section
"States::ModelMB"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DescribeParticlesModelmb'))
x_octopus_parserlog_Dimensions = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "Dimensions" of type "integer" in section "System"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_Dimensions'))
x_octopus_parserlog_DisableOpenCL = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "DisableOpenCL" of type "logical" in section
"Execution::OpenCL"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DisableOpenCL'))
x_octopus_parserlog_Displacement = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "Displacement" of type "float" in section "Linear
Response::Vibrational Modes"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_Displacement'))
x_octopus_parserlog_DOSEnergyMax = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DOSEnergyMax" of type "float" in section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DOSEnergyMax'))
x_octopus_parserlog_DOSEnergyMin = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DOSEnergyMin" of type "float" in section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DOSEnergyMin'))
x_octopus_parserlog_DOSEnergyPoints = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DOSEnergyPoints" of type "integer" in section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DOSEnergyPoints'))
x_octopus_parserlog_DOSGamma = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DOSGamma" of type "float" in section "Output"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DOSGamma'))
x_octopus_parserlog_DoubleFFTParameter = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "DoubleFFTParameter" of type "float" in section
"Mesh::FFTs"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DoubleFFTParameter'))
x_octopus_parserlog_DoubleGridOrder = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "DoubleGridOrder" of type "integer" in section "Mesh"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DoubleGridOrder'))
x_octopus_parserlog_DoubleGrid = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "DoubleGrid" of type "logical" in section "Mesh"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_DoubleGrid'))
x_octopus_parserlog_EigensolverArnoldiVectors = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "EigensolverArnoldiVectors" of type "integer" in section
"SCF::Eigensolver::ARPACK"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverArnoldiVectors'))
x_octopus_parserlog_EigensolverArpackInitialResid = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "EigensolverArpackInitialResid" of type "integer" in
section "SCF::Eigensolver::ARPACK"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverArpackInitialResid'))
x_octopus_parserlog_EigensolverArpackSort = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "EigensolverArpackSort" of type "string" in section
"SCF::Eigensolver::ARPACK"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverArpackSort'))
x_octopus_parserlog_EigensolverImaginaryTime = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "EigensolverImaginaryTime" of type "float" in section
"SCF::Eigensolver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverImaginaryTime'))
x_octopus_parserlog_EigensolverMaxIter = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "EigensolverMaxIter" of type "integer" in section
"SCF::Eigensolver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverMaxIter'))
x_octopus_parserlog_EigensolverMinimizationIter = Quantity(
type=str,
shape=[],
description='''
Octopus parser log entry "EigensolverMinimizationIter" of type "integer" in
section "SCF::Eigensolver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverMinimizationIter'))
x_octopus_parserlog_EigensolverParpack = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "EigensolverParpack" of type "logical" in section
"SCF::Eigensolver::ARPACK"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverParpack'))
x_octopus_parserlog_EigensolverSaveMemory = Quantity(
type=bool,
shape=[],
description='''
Octopus parser log entry "EigensolverSaveMemory" of type "logical" in section
"SCF::Eigensolver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverSaveMemory'))
x_octopus_parserlog_EigensolverTolerance = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Octopus parser log entry "EigensolverTolerance" of type "float" in section
"SCF::Eigensolver"
''',
categories=[x_octopus_parserlog],
a_legacy=LegacyDefinition(name='x_octopus_parserlog_EigensolverTolerance'))
x_octopus_parserlog_Eigensolver = Quantity(
type=str,
shape=[],
description='''
Octopus parser log | |
dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Journal DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Journal DataFrame.
"""
if show_progress:
show_progress='Loading Journals'
if preprocess and os.path.exists(os.path.join(self.path2database, 'journal')):
return load_preprocessed_data('journal', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_publications()
def load_references(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', noselfcite = False, dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Pub2Ref DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
noselfcite : bool, default False, Optional
If True, then the preprocessed pub2ref files with self-citations removed will be used.
Returns
-------
DataFrame
Pub2Ref DataFrame.
"""
if noselfcite:
fileprefix = 'pub2refnoself'
else:
fileprefix = 'pub2ref'
if show_progress:
show_progress='Loading {}'.format(fileprefix)
if preprocess and os.path.exists(os.path.join(self.path2database, fileprefix)):
return load_preprocessed_data(fileprefix, path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_references()
def load_publicationauthoraffiliation(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the PublicationAuthorAffilation DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
columns : list, default None, Optional
Load only this subset of columns
isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
PublicationAuthorAffilation DataFrame.
"""
if show_progress:
show_progress='Loading Publication Author Affiliation'
if preprocess and os.path.exists(os.path.join(self.path2database, 'publicationauthoraffiliation')):
return load_preprocessed_data('publicationauthoraffiliation', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_publicationauthoraffiliation()
def load_pub2field(self, preprocess = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the Pub2Field DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
:param preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
:param columns : list, default None, Optional
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
Pub2Field DataFrame.
"""
if show_progress:
show_progress='Loading Fields'
if preprocess and os.path.exists(os.path.join(self.path2database, 'pub2field')):
return load_preprocessed_data('pub2field', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=prefunc2apply, postfunc2apply=postfunc2apply, show_progress=show_progress)
else:
return self.parse_fields()
def load_fieldinfo(self, preprocess = True, columns = None, isindict = None, show_progress=False):
"""
Load the Field Information DataFrame from a preprocessed directory, or parse from the raw files.
Parameters
----------
:param preprocess : bool, default True, Optional
Attempt to load from the preprocessed directory.
:param columns : list, default None, Optional
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
FieldInformation DataFrame.
"""
if show_progress:
show_progress='Loading Field Info'
if preprocess and os.path.exists(os.path.join(self.path2database, 'fieldinfo')):
return pd.read_hdf(os.path.join(self.path2database, 'fieldinfo', 'fieldnames.hdf'))
else:
return self.parse_fields()
def load_impact(self, preprocess = True, include_yearnormed = True, columns = None, isindict = None, duplicate_subset = None,
duplicate_keep = 'last', dropna = None, prefunc2apply=None, postfunc2apply=None, show_progress=False):
"""
Load the precomputed impact DataFrame from a preprocessed directory.
Parameters
----------
:param preprocess : bool, default True
Attempt to load from the preprocessed directory.
:param include_yearnormed: bool, default True
Normalize all columns by yearly average.
:param columns : list, default None
Load only this subset of columns
:param isindict : dict, default None, Optional
Dictionary of format {"ColumnName":"ListofValues"} where "ColumnName" is a data column
and "ListofValues" is a sorted list of valid values. A DataFrame only containing rows that appear in
"ListofValues" will be returned.
:param duplicate_subset : list, default None, Optional
Drop any duplicate entries as specified by this subset of columns
:param duplicate_keep : str, default 'last', Optional
If duplicates are being dropped, keep the 'first' or 'last'
(see `pandas.DataFram.drop_duplicates <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.drop_duplicates.html>`_)
:param dropna : list, default None, Optional
Drop any NaN entries as specified by this subset of columns
Returns
-------
DataFrame
FieldInformation DataFrame.
"""
if show_progress:
show_progress='Loading Impact'
if include_yearnormed:
def normfunc(impactdf):
impactcolumns = [c for c in list(impactdf) if not c in ['PublicationId', 'Year']]
for c in impactcolumns:
impactdf[c+'_norm'] = impactdf[c]/impactdf[c].mean()
return impactdf
else:
def normfunc(impactdf):
return impactdf
if preprocess and os.path.exists(os.path.join(self.path2database, 'impact')):
return load_preprocessed_data('impact', path2database=self.path2database, columns=columns,
isindict=isindict, duplicate_subset=duplicate_subset, duplicate_keep=duplicate_keep, dropna=dropna,
prefunc2apply=normfunc, show_progress=show_progress)
else:
raise self.compute_impact()
"""
To be rewritten for each specific data source (MAG, WOS, etc.)
"""
def download_from_source(self):
raise NotImplementedError
def parse_affiliations(self, preprocess = False):
raise NotImplementedError
def parse_authors(self, preprocess = False, process_name = True, num_file_lines = 5*10**6):
raise NotImplementedError
def parse_publications(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_references(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_publicationauthoraffiliation(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
def parse_fields(self, preprocess = False, num_file_lines=10**7):
raise NotImplementedError
# Analysis
def author_productivity(self, df=None, colgroupby = 'AuthorId', colcountby = 'PublicationId', show_progress=False):
"""
Calculate the total number of publications for each author.
Parameters
----------
:param df : DataFrame, default None, Optional
A DataFrame with the author2publication information. If None then the database 'author2pub_df' is used.
:param colgroupby : str, default 'AuthorId', Optional
| |
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2012 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import six
from loopy.diagnostic import LoopyError, warn
from pytools import ImmutableRecord
import islpy as isl
from pytools.persistent_dict import WriteOncePersistentDict
from loopy.tools import LoopyKeyBuilder
from loopy.version import DATA_MODEL_VERSION
import logging
logger = logging.getLogger(__name__)
# {{{ implemented data info
class ImplementedDataInfo(ImmutableRecord):
"""
.. attribute:: name
The expanded name of the array. Note that, for example
in the case of separate-array-tagged axes, multiple
implemented arrays may correspond to one user-facing
array.
.. attribute:: dtype
.. attribute:: arg_class
.. attribute:: base_name
The user-facing name of the underlying array.
May be *None* for non-array arguments.
.. attribute:: shape
.. attribute:: strides
Strides in multiples of ``dtype.itemsize``.
.. attribute:: unvec_shape
.. attribute:: unvec_strides
Strides in multiples of ``dtype.itemsize`` that accounts for
:class:`loopy.kernel.array.VectorArrayDimTag` in a scalar
manner
.. attribute:: offset_for_name
.. attribute:: stride_for_name_and_axis
A tuple *(name, axis)* indicating the (implementation-facing)
name of the array and axis number for which this argument provides
the strides.
.. attribute:: allows_offset
.. attribute:: is_written
"""
def __init__(self, target, name, dtype, arg_class,
base_name=None,
shape=None, strides=None,
unvec_shape=None, unvec_strides=None,
offset_for_name=None, stride_for_name_and_axis=None,
allows_offset=None,
is_written=None):
from loopy.types import LoopyType
assert isinstance(dtype, LoopyType)
ImmutableRecord.__init__(self,
name=name,
dtype=dtype,
arg_class=arg_class,
base_name=base_name,
shape=shape,
strides=strides,
unvec_shape=unvec_shape,
unvec_strides=unvec_strides,
offset_for_name=offset_for_name,
stride_for_name_and_axis=stride_for_name_and_axis,
allows_offset=allows_offset,
is_written=is_written)
# }}}
# {{{ code generation state
class Unvectorizable(Exception):
pass
class VectorizationInfo(object):
"""
.. attribute:: iname
.. attribute:: length
.. attribute:: space
"""
def __init__(self, iname, length, space):
self.iname = iname
self.length = length
self.space = space
class SeenFunction(ImmutableRecord):
"""
.. attribute:: name
.. attribute:: c_name
.. attribute:: arg_dtypes
a tuple of arg dtypes
"""
def __init__(self, name, c_name, arg_dtypes):
ImmutableRecord.__init__(self,
name=name,
c_name=c_name,
arg_dtypes=arg_dtypes)
class CodeGenerationState(object):
"""
.. attribute:: kernel
.. attribute:: implemented_data_info
a list of :class:`ImplementedDataInfo` objects.
.. attribute:: implemented_domain
The entire implemented domain (as an :class:`islpy.Set`)
i.e. all constraints that have been enforced so far.
.. attribute:: implemented_predicates
A :class:`frozenset` of predicates for which checks have been
implemented.
.. attribute:: seen_dtypes
set of dtypes that were encountered
.. attribute:: seen_functions
set of :class:`SeenFunction` instances
.. attribute:: seen_atomic_dtypes
.. attribute:: var_subst_map
.. attribute:: allow_complex
.. attribute:: vectorization_info
None or an instance of :class:`VectorizationInfo`
.. attribute:: is_generating_device_code
.. attribute:: gen_program_name
None (indicating that host code is being generated)
or the name of the device program currently being
generated.
.. attribute:: schedule_index_end
"""
def __init__(self, kernel,
implemented_data_info, implemented_domain, implemented_predicates,
seen_dtypes, seen_functions, seen_atomic_dtypes, var_subst_map,
allow_complex,
vectorization_info=None, var_name_generator=None,
is_generating_device_code=None,
gen_program_name=None,
schedule_index_end=None):
self.kernel = kernel
self.implemented_data_info = implemented_data_info
self.implemented_domain = implemented_domain
self.implemented_predicates = implemented_predicates
self.seen_dtypes = seen_dtypes
self.seen_functions = seen_functions
self.seen_atomic_dtypes = seen_atomic_dtypes
self.var_subst_map = var_subst_map.copy()
self.allow_complex = allow_complex
self.vectorization_info = vectorization_info
self.var_name_generator = var_name_generator
self.is_generating_device_code = is_generating_device_code
self.gen_program_name = gen_program_name
self.schedule_index_end = schedule_index_end
# {{{ copy helpers
def copy(self, kernel=None, implemented_data_info=None,
implemented_domain=None, implemented_predicates=frozenset(),
var_subst_map=None, vectorization_info=None,
is_generating_device_code=None,
gen_program_name=None,
schedule_index_end=None):
if kernel is None:
kernel = self.kernel
if implemented_data_info is None:
implemented_data_info = self.implemented_data_info
if vectorization_info is False:
vectorization_info = None
elif vectorization_info is None:
vectorization_info = self.vectorization_info
if is_generating_device_code is None:
is_generating_device_code = self.is_generating_device_code
if gen_program_name is None:
gen_program_name = self.gen_program_name
if schedule_index_end is None:
schedule_index_end = self.schedule_index_end
return CodeGenerationState(
kernel=kernel,
implemented_data_info=implemented_data_info,
implemented_domain=implemented_domain or self.implemented_domain,
implemented_predicates=(
implemented_predicates or self.implemented_predicates),
seen_dtypes=self.seen_dtypes,
seen_functions=self.seen_functions,
seen_atomic_dtypes=self.seen_atomic_dtypes,
var_subst_map=var_subst_map or self.var_subst_map,
allow_complex=self.allow_complex,
vectorization_info=vectorization_info,
var_name_generator=self.var_name_generator,
is_generating_device_code=is_generating_device_code,
gen_program_name=gen_program_name,
schedule_index_end=schedule_index_end)
def copy_and_assign(self, name, value):
"""Make a copy of self with variable *name* fixed to *value*."""
var_subst_map = self.var_subst_map.copy()
var_subst_map[name] = value
return self.copy(var_subst_map=var_subst_map)
def copy_and_assign_many(self, assignments):
"""Make a copy of self with *assignments* included."""
var_subst_map = self.var_subst_map.copy()
var_subst_map.update(assignments)
return self.copy(var_subst_map=var_subst_map)
# }}}
@property
def expression_to_code_mapper(self):
return self.ast_builder.get_expression_to_code_mapper(self)
def intersect(self, other):
new_impl, new_other = isl.align_two(self.implemented_domain, other)
return self.copy(implemented_domain=new_impl & new_other)
def fix(self, iname, aff):
new_impl_domain = self.implemented_domain
impl_space = self.implemented_domain.get_space()
if iname not in impl_space.get_var_dict():
new_impl_domain = (new_impl_domain
.add_dims(isl.dim_type.set, 1)
.set_dim_name(
isl.dim_type.set,
new_impl_domain.dim(isl.dim_type.set),
iname))
impl_space = new_impl_domain.get_space()
from loopy.isl_helpers import iname_rel_aff
iname_plus_lb_aff = iname_rel_aff(impl_space, iname, "==", aff)
from loopy.symbolic import pw_aff_to_expr
cns = isl.Constraint.equality_from_aff(iname_plus_lb_aff)
expr = pw_aff_to_expr(aff)
new_impl_domain = new_impl_domain.add_constraint(cns)
return self.copy_and_assign(iname, expr).copy(
implemented_domain=new_impl_domain)
def try_vectorized(self, what, func):
"""If *self* is in a vectorizing state (:attr:`vectorization_info` is
not None), tries to call func (which must be a callable accepting a
single :class:`CodeGenerationState` argument). If this fails with
:exc:`Unvectorizable`, it unrolls the vectorized loop instead.
*func* should return a :class:`GeneratedCode` instance.
:returns: :class:`GeneratedCode`
"""
if self.vectorization_info is None:
return func(self)
try:
return func(self)
except Unvectorizable as e:
warn(self.kernel, "vectorize_failed",
"Vectorization of '%s' failed because '%s'"
% (what, e))
return self.unvectorize(func)
def unvectorize(self, func):
vinf = self.vectorization_info
result = []
novec_self = self.copy(vectorization_info=False)
for i in range(vinf.length):
idx_aff = isl.Aff.zero_on_domain(vinf.space.params()) + i
new_codegen_state = novec_self.fix(vinf.iname, idx_aff)
generated = func(new_codegen_state)
if isinstance(generated, list):
result.extend(generated)
else:
result.append(generated)
from loopy.codegen.result import merge_codegen_results
return merge_codegen_results(self, result)
@property
def ast_builder(self):
if self.is_generating_device_code:
return self.kernel.target.get_device_ast_builder()
else:
return self.kernel.target.get_host_ast_builder()
# }}}
code_gen_cache = WriteOncePersistentDict(
"loopy-code-gen-cache-v3-"+DATA_MODEL_VERSION,
key_builder=LoopyKeyBuilder())
class PreambleInfo(ImmutableRecord):
"""
.. attribute:: kernel
.. attribute:: seen_dtypes
.. attribute:: seen_functions
.. attribute:: seen_atomic_dtypes
.. attribute:: codegen_state
"""
# {{{ main code generation entrypoint
def generate_code_v2(kernel):
"""
:returns: a :class:`CodeGenerationResult`
"""
from loopy.kernel import kernel_state
if kernel.state == kernel_state.INITIAL:
from loopy.preprocess import preprocess_kernel
kernel = preprocess_kernel(kernel)
if kernel.schedule is None:
from loopy.schedule import get_one_scheduled_kernel
kernel = get_one_scheduled_kernel(kernel)
if kernel.state != kernel_state.SCHEDULED:
raise LoopyError("cannot generate code for a kernel that has not been "
"scheduled")
# {{{ cache retrieval
from loopy import CACHING_ENABLED
if CACHING_ENABLED:
input_kernel = kernel
try:
result = code_gen_cache[input_kernel]
logger.debug("%s: code generation cache hit" % kernel.name)
return result
except KeyError:
pass
# }}}
from loopy.type_inference import infer_unknown_types
kernel = infer_unknown_types(kernel, expect_completion=True)
from loopy.check import pre_codegen_checks
pre_codegen_checks(kernel)
logger.info("%s: generate code: start" % kernel.name)
# {{{ examine arg list
from loopy.kernel.data import ValueArg
from loopy.kernel.array import ArrayBase
implemented_data_info = []
for arg in kernel.args:
is_written = arg.name in kernel.get_written_variables()
if isinstance(arg, ArrayBase):
implemented_data_info.extend(
arg.decl_info(
kernel.target,
is_written=is_written,
index_dtype=kernel.index_dtype))
elif isinstance(arg, ValueArg):
implemented_data_info.append(ImplementedDataInfo(
target=kernel.target,
name=arg.name,
dtype=arg.dtype,
arg_class=ValueArg,
is_written=is_written))
else:
raise ValueError("argument type not understood: '%s'" % type(arg))
allow_complex = False
for var in kernel.args + list(six.itervalues(kernel.temporary_variables)):
if var.dtype.involves_complex():
allow_complex = True
# }}}
seen_dtypes = set()
seen_functions = set()
seen_atomic_dtypes = set()
initial_implemented_domain = isl.BasicSet.from_params(kernel.assumptions)
codegen_state = CodeGenerationState(
kernel=kernel,
implemented_data_info=implemented_data_info,
implemented_domain=initial_implemented_domain,
implemented_predicates=frozenset(),
seen_dtypes=seen_dtypes,
seen_functions=seen_functions,
seen_atomic_dtypes=seen_atomic_dtypes,
var_subst_map={},
allow_complex=allow_complex,
var_name_generator=kernel.get_var_name_generator(),
is_generating_device_code=False,
gen_program_name=(
kernel.target.host_program_name_prefix
+ kernel.name
+ kernel.target.host_program_name_suffix),
schedule_index_end=len(kernel.schedule))
from loopy.codegen.result import generate_host_or_device_program
codegen_result = generate_host_or_device_program(
codegen_state,
schedule_index=0)
device_code_str = codegen_result.device_code()
from loopy.check import check_implemented_domains
assert check_implemented_domains(kernel, codegen_result.implemented_domains,
device_code_str)
# {{{ handle preambles
for arg in kernel.args:
seen_dtypes.add(arg.dtype)
for tv in six.itervalues(kernel.temporary_variables):
seen_dtypes.add(tv.dtype)
preambles = kernel.preambles[:]
preamble_info = PreambleInfo(
kernel=kernel,
seen_dtypes=seen_dtypes,
seen_functions=seen_functions,
# a set of LoopyTypes (!)
seen_atomic_dtypes=seen_atomic_dtypes,
codegen_state=codegen_state
)
preamble_generators = (kernel.preamble_generators
+ kernel.target.get_device_ast_builder().preamble_generators())
for prea_gen in preamble_generators:
preambles.extend(prea_gen(preamble_info))
codegen_result = codegen_result.copy(device_preambles=preambles)
# }}}
# For faster unpickling in the common case when implemented_domains isn't needed.
from loopy.tools import LazilyUnpicklingDict
codegen_result = codegen_result.copy(
implemented_domains=LazilyUnpicklingDict(
codegen_result.implemented_domains))
logger.info("%s: generate code: done" % kernel.name)
if CACHING_ENABLED:
code_gen_cache.store_if_not_present(input_kernel, codegen_result)
return codegen_result
def generate_code(kernel, device=None):
if device is not None:
from warnings import warn
warn("passing 'device' to generate_code() is deprecated",
DeprecationWarning, stacklevel=2)
codegen_result = generate_code_v2(kernel)
if len(codegen_result.device_programs) > 1:
raise LoopyError("kernel passed to generate_code yielded multiple "
"device programs. Use generate_code_v2.")
return codegen_result.device_code(), codegen_result.implemented_data_info
# }}}
# {{{ generate function body
def generate_body(kernel):
codegen_result = generate_code_v2(kernel)
if len(codegen_result.device_programs) != 1:
raise LoopyError("generate_body cannot be used on programs "
"that yield more than | |
params["module_name"] = "activation_function_operators"
# params["operator_name"] = "operator_change_activation_function"
#
# return params
#
# def perform_mutation(self, elem):
# res, type, pos = mu.is_activation_assignment(elem)
# #TODO: chto vmesto if?
# if type == 'K':
# for ptn, kwd in enumerate(elem.value.args[0].keywords):
# if kwd.arg == 'activation':
# # kwd.value.s = act.operator_change_activation_function(kwd.value.s)
# del elem.value.args[0].keywords[ptn]
# print("type K")
# elif type == 'L':
# print("type L", elem.value.args[0].args[0].s)
# elem.value.args[0].args[0] = ast.NameConstant(value=None)
# # del elem.value.args[0].args[0]
# # elif type == 'A':
# # # elem.value.args[0].args[1].s = act.operator_change_activation_function(elem.value.args[0].args[1].s)
# # del elem.value.args[0].args[1]
# # print("type A")
# else:
# print("jopa")
# return None
#
# def apply_mutation(self, node, elem, ind, model_params = None):
# # print("be")
# self.perform_mutation(elem)
#########################################
########### Optimiser #################
class ChangeOptimisationFunction(Mutation):
mutationName = "change_optimisation_function"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, "compile")
def get_model_params(self, elem):
params = {}
return params
def get_mutation_params(self, optimiser_name = None):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "optimiser_operators"
params["operator_name"] = "operator_change_optimisation_function"
return params
def perform_mutation(self, elem):
params = self.get_mutation_params()
for keyword in elem.value.keywords:
if keyword.arg == "optimizer":
keyword.value = ast.Call(func=ast.Attribute(value=ast.Name(id=params["module_name"], ctx=ast.Load()),
attr=params["operator_name"], ctx=ast.Load()),
args=[keyword.value,],
keywords=[])
def apply_mutation(self, node, elem, ind, model_params = None):
self.perform_mutation(elem)
class ChangeGradientClip(Mutation):
mutationName = "change_gradient_clip"
# optimiser_definition_type = None
def dummy(self):
print("lalala")
def is_target_node(self, elem):
result, type = mu.is_optimiser_object(elem)
print(result)
return result
def get_model_params(self, elem):
params = {}
return params
def get_mutation_params(self, optimiser_name = None):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
return params
def perform_mutation(self, elem):
if hasattr(elem.value, 'keywords') and len(elem.value.keywords) > 0:
for k in elem.value.keywords:
if k.arg == 'clipnorm':
k.value = ast.Name(id="properties.change_gradient_clip['clipnorm']", ctx=ast.Load())
if k.arg == 'clipvalue':
k.value = ast.Name(id="properties.change_gradient_clip['clipvalue']", ctx=ast.Load())
else:
# TODO: add errrror
print("we have a problem here")
def apply_mutation(self, node, elem, ind, model_params = None):
self.perform_mutation(elem)
#########################################
########### Validation #################
class RemoveValidationSet(Mutation):
mutationName = "remove_validation_set"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def perform_mutation(self, elem):
if hasattr(elem.value, 'keywords') and len(elem.value.keywords) > 0:
for k in elem.value.keywords:
if k.arg == 'validation_data':
k.value = ast.NameConstant(value=None)
if k.arg == 'validation_split':
k.value = ast.Num(n=0.0)
else:
# TODO: add errrror
print("we have a problem here")
return None
def apply_mutation(self, node, elem, ind):
self.perform_mutation(elem)
#########################################
########### EarlyStopping #################
class ChangeEarlyStoppingPatience(Mutation):
mutationName = "change_earlystopping_patience"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_training_call(elem)
def get_model_params(self, elem):
params = {}
callbacks = None
if hasattr(elem.value, 'keywords') and len(elem.value.keywords) > 0:
for k in elem.value.keywords:
if k.arg == 'callbacks':
callbacks = k.value
params["callbacks"] = callbacks
return params
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
# params["mutation_name"] = mutation_name
# TODO: write the param extraction
# FOR NOW it will be like this, after, we read from the file given the mutation name
params["module_name"] = "training_process_operators"
params["operator_name"] = "operator_change_patience"
return params
def perform_mutation(self, elem):
params = self.get_mutation_params()
for keyword in elem.value.keywords:
if keyword.arg == "callbacks":
keyword.value = ast.Call(func=ast.Attribute(value=ast.Name(id=params["module_name"], ctx=ast.Load()),
attr=params["operator_name"], ctx=ast.Load()),
args=[keyword.value, ],
keywords=[])
def apply_mutation(self, node, elem, ind, model_params = None):
self.perform_mutation(elem)
#########################################
########### Bias #################
class AddBiasMut(Mutation):
mutationName = "add_bias"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, 'compile')
def get_model_params(self, elem):
params = {}
if isinstance(elem.value.func, ast.Attribute) \
and hasattr(elem.value.func.value, 'id'):
params["model_name"] = elem.value.func.value.id
else:
print("log, we have a problem")
return params
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "bias_operators"
params["operator_name"] = "operator_add_bias"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
model_params = self.get_model_params(elem)
mutation_params = self.get_mutation_params()
mutation_node = ast.Assign(targets=[ast.Name(id=model_params["model_name"], ctx=ast.Store()), ],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[ast.Name(id=model_params["model_name"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
mutation_node = self.generate_mutation_node(elem, model_params_ann)
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params = None):
self.insert_mutation(node, elem, ind)
class RemoveBiasMut(Mutation):
mutationName = "remove_bias"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, 'compile')
def get_model_params(self, elem):
params = {}
if isinstance(elem.value.func, ast.Attribute) \
and hasattr(elem.value.func.value, 'id'):
params["model_name"] = elem.value.func.value.id
else:
print("log, we have a problem")
return params
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "bias_operators"
params["operator_name"] = "operator_remove_bias"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
model_params = self.get_model_params(elem)
mutation_params = self.get_mutation_params()
mutation_node = ast.Assign(targets=[ast.Name(id=model_params["model_name"], ctx=ast.Store()), ],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[ast.Name(id=model_params["model_name"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
mutation_node = self.generate_mutation_node(elem, model_params_ann)
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params = None):
self.insert_mutation(node, elem, ind)
#########################################
########### Loss #################
# class ChangeLossFunction(Mutation):
# mutationName = "change_loss_function"
#
# def dummy(self):
# print("lalala")
#
# def is_target_node(self, elem):
# return mu.is_specific_call(elem, "compile")
#
# def get_model_params(self, elem):
# params = {}
# return params
#
# def get_mutation_params(self, optimiser_name = None):
# """Extract a dict of params needed for mutation from a params file
#
# Keyword arguments:
# mutation_name -- name of the mutation
#
# Returns: dics (params)
# """
#
# params = {}
# return params
#
# def perform_mutation(self, elem):
# for keyword in elem.value.keywords:
# if keyword.arg == "loss":
# if isinstance(keyword.value, ast.Str):
# old_loss = keyword.value.s
# elif isinstance(keyword.value, ast.Attribute) and hasattr(keyword.value, 'attr'):
# old_loss = keyword.value.attr
# elif isinstance(keyword.value, ast.Call) and hasattr(keyword.value.func, 'attr'):
# old_loss = keyword.value.func.attr
# else:
# old_loss = "custom"
# print("Custom loss detected")
#
# if props.change_loss_function["loss_function_udp"] is not None:
# new_loss_func = props.change_loss_function["loss_function_udp"]
# else:
# loss_functions = copy(const.keras_losses)
# if old_loss.lower() in loss_functions:
# loss_functions.remove(old_loss)
#
# new_loss_func = random.choice(loss_functions)
# print("New Loss Function is:" + str(new_loss_func))
#
# keyword.value = ast.Str(s=new_loss_func)
#
# def apply_mutation(self, node, elem, ind, model_params = None):
# self.perform_mutation(elem)
class ChangeLossFunction(Mutation):
mutationName = "change_loss_function"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, "compile")
def get_model_params(self, elem):
params = {}
return params
def get_mutation_params(self, optimiser_name = None):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "loss_operators"
params["operator_name"] = "operator_change_loss_function"
return params
def perform_mutation(self, elem):
params = self.get_mutation_params()
for keyword in elem.value.keywords:
if keyword.arg == "loss":
keyword.value = ast.Call(func=ast.Attribute(value=ast.Name(id=params["module_name"], ctx=ast.Load()),
attr=params["operator_name"], ctx=ast.Load()),
args=[keyword.value,],
keywords=[])
def apply_mutation(self, node, elem, ind, model_params = None):
self.perform_mutation(elem)
#########################################
########### Dropout #################
class ChangeDropoutRate(Mutation):
mutationName = "change_dropout_rate"
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, 'compile')
def get_model_params(self, elem):
params = {}
if isinstance(elem.value.func, ast.Attribute) \
and hasattr(elem.value.func.value, 'id'):
params["model_name"] = elem.value.func.value.id
else:
print("log, we have a problem")
return params
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
Keyword arguments:
mutation_name -- name of the mutation
Returns: dics (params)
"""
params = {}
params["module_name"] = "dropout_operators"
params["operator_name"] = "operator_change_dropout_rate"
return params
def generate_mutation_node(self, elem, model_params_ann = None):
"""Generate a mutation node
Keyword arguments:
mutation_name -- name of a mutation (str)
model_params -- params needed to build a mutation node. depend on the model (list)
Returns: ast node (mutation_node)
"""
model_params = self.get_model_params(elem)
mutation_params = self.get_mutation_params()
mutation_node = ast.Assign(targets=[ast.Name(id=model_params["model_name"], ctx=ast.Store()), ],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=mutation_params["module_name"], ctx=ast.Load()),
attr=mutation_params["operator_name"],
ctx=ast.Load()),
args=[ast.Name(id=model_params["model_name"], ctx=ast.Load()), ],
keywords=[]))
return mutation_node
def insert_mutation(self, node, elem, ind, model_params_ann = None):
mutation_node = self.generate_mutation_node(elem, model_params_ann)
node.body.insert(ind, mutation_node)
is_inserted = True
return None
def apply_mutation(self, node, elem, ind, model_params = None):
self.insert_mutation(node, elem, ind)
#########################################
########### Weights #################
class ChangeWeightsInitialisation(Mutation):
mutationName = "change_weights_initialisation"
# applyOnce = False
def dummy(self):
print("lalala")
def is_target_node(self, elem):
return mu.is_specific_call(elem, 'compile')
def get_model_params(self, elem):
params = {}
if isinstance(elem.value.func, ast.Attribute) \
and hasattr(elem.value.func.value, 'id'):
params["model_name"] = elem.value.func.value.id
else:
print("log, we have a problem")
return params
def get_mutation_params(self):
"""Extract a dict of params needed for mutation from a params file
| |
import array
import copy
import pickle
import numpy as np
import pytest
from mspasspy.ccore.seismic import (_CoreSeismogram,
_CoreTimeSeries,
Seismogram,
SeismogramEnsemble,
SlownessVector,
TimeSeries,
TimeSeriesEnsemble,
TimeReferenceType)
from mspasspy.ccore.utility import (AtomicType,
dmatrix,
ErrorLogger,
ErrorSeverity,
LogData,
Metadata,
MetadataDefinitions,
MsPASSError,
ProcessingHistory,
SphericalCoordinate)
from mspasspy.ccore.algorithms.basic import ExtractComponent
def make_constant_data_ts(d, t0=0.0, dt=0.1, nsamp=5, val=1.0):
"""
Fills TimeSeries (or _CoreTimeSeries) data vector with
a constant value of a specified length and start time.
Used for testing arithmetic operators.
Parameters
----------
d : TYPE
DESCRIPTION. TimeSeries or _CoreTimeSeries skeleton to build upon
t0 : TYPE, optional
DESCRIPTION. The default is 0.0. data start time
dt : TYPE, optional
DESCRIPTION. The default is 0.1. sample interval
nsamp : TYPE, optional
DESCRIPTION. The default is 5. length of data vector to generate
Returns
-------
None.
"""
d.npts = nsamp
d.t0 = t0
d.dt = dt
d.set_live()
for i in range(nsamp):
d.data[i] = val
return d
def make_constant_data_seis(d, t0=0.0, dt=0.1, nsamp=5, val=1.0):
"""
Fills Seismogram (or Seismogram) data vector with
a constant value of a specified length and start time.
Used for testing arithmetic operators.
Parameters
----------
d : TYPE
DESCRIPTION. TimeSeries or _CoreTimeSeries skeleton to build upon
t0 : TYPE, optional
DESCRIPTION. The default is 0.0. data start time
dt : TYPE, optional
DESCRIPTION. The default is 0.1. sample interval
nsamp : TYPE, optional
DESCRIPTION. The default is 5. length of data vector to generate
Returns
-------
None.
"""
d.npts = nsamp
d.t0 = t0
d.dt = dt
d.set_live()
for i in range(nsamp):
for k in range(3):
d.data[k, i] = val
return d
def setup_function(function):
pass
def test_dmatrix():
dm = dmatrix()
assert dm.rows() == 0
dm = dmatrix(9, 4)
assert dm.rows() == 9
assert dm.columns() == 4
assert dm.size == 4*9
assert len(dm) == 9
assert dm.shape == (9, 4)
md = [array.array('l', (0 for _ in range(5))) for _ in range(3)]
for i in range(3):
for j in range(5):
md[i][j] = i*5+j
dm = dmatrix(md)
assert np.equal(dm, md).all()
dm_c = dmatrix(dm)
assert (dm_c[:] == dm).all()
dm_c.zero()
assert not dm_c[:].any()
md = np.zeros((7, 4), dtype=np.double, order='F')
for i in range(7):
for j in range(4):
md[i][j] = i*4+j
dm = dmatrix(md)
assert (dm == md).all()
assert (dm.transpose() == md.transpose()).all()
assert (dm * 3.14 == md * 3.14).all()
assert (2.17 * dm == 2.17 * md).all()
assert (dm * dm.transpose() == np.matmul(md, md.transpose())).all()
with pytest.raises(MsPASSError, match='size mismatch'):
dm * dm
dm_c = dmatrix(dm)
dm += dm_c
assert (dm == md+md).all()
dm += md
assert (dm == md+md+md).all()
assert type(dm) == dmatrix
dm -= dm_c
dm -= dm_c
dm -= md
assert not dm[:].any()
assert type(dm) == dmatrix
dm_c = dmatrix(dm)
md = np.zeros((7, 4), dtype=np.single, order='C')
for i in range(7):
for j in range(4):
md[i][j] = i*4+j
dm = dmatrix(md)
assert (dm == md).all()
md = np.zeros((7, 4), dtype=np.int, order='F')
for i in range(7):
for j in range(4):
md[i][j] = i*4+j
dm = dmatrix(md)
assert (dm == md).all()
md = np.zeros((7, 4), dtype=np.unicode_, order='C')
for i in range(7):
for j in range(4):
md[i][j] = i*4+j
dm = dmatrix(md)
assert (dm == np.float_(md)).all()
md = np.zeros((53, 37), dtype=np.double, order='C')
for i in range(53):
for j in range(37):
md[i][j] = i*37+j
dm = dmatrix(md)
assert dm[17, 23] == md[17, 23]
assert (dm[17] == md[17]).all()
assert (dm[::] == md[::]).all()
assert (dm[3::] == md[3::]).all()
assert (dm[:5:] == md[:5:]).all()
assert (dm[::7] == md[::7]).all()
assert (dm[-3::] == md[-3::]).all()
assert (dm[:-5:] == md[:-5:]).all()
assert (dm[::-7] == md[::-7]).all()
assert (dm[11:41:7] == md[11:41:7]).all()
assert (dm[-11:-41:-7] == md[-11:-41:-7]).all()
assert (dm[3::, 13] == md[3::, 13]).all()
assert (dm[19, :5:] == md[19, :5:]).all()
assert (dm[::-7, ::-11] == md[::-7, ::-11]).all()
with pytest.raises(IndexError, match='out of bounds for axis 1'):
dummy = dm[3, 50]
with pytest.raises(IndexError, match='out of bounds for axis 0'):
dummy = dm[80]
with pytest.raises(IndexError, match='out of bounds for axis 1'):
dm[3, 50] = 1.0
with pytest.raises(IndexError, match='out of bounds for axis 0'):
dm[60, 50] = 1
dm[7, 17] = 3.14
assert dm[7, 17] == 3.14
dm[7, 17] = '6.28'
assert dm[7, 17] == 6.28
dm[7] = 10
assert (dm[7] == 10).all()
dm[::] = md
assert (dm == md).all()
dm[:, -7] = 3.14
assert (dm[:, -7] == 3.14).all()
dm[17, :] = 3.14
assert (dm[17, :] == 3.14).all()
dm[3:7, -19:-12] = 3.14
assert (dm[3:7, -19:-12] == 3.14).all()
def test_ErrorLogger():
errlog = ErrorLogger()
assert errlog.log_error('1', '2', ErrorSeverity(3)) == 1
assert errlog[0].algorithm == '1'
assert errlog[0].message == '2'
assert errlog[0].badness == ErrorSeverity.Complaint
assert errlog[0].job_id == errlog.get_job_id()
def test_LogData():
ld = LogData({"job_id": 0, "p_id": 1, "algorithm": "alg",
"message": "msg", "badness": ErrorSeverity(2)})
assert ld.job_id == 0
assert ld.p_id == 1
assert ld.algorithm == "alg"
assert ld.message == "msg"
assert ld.badness == ErrorSeverity.Suspect
assert str(ld) == str(LogData(eval(str(ld))))
def test_Metadata():
md = Metadata()
assert repr(md) == 'Metadata({})'
dic = {1: 1}
md.put('dict', dic)
val = md.get('dict')
val[2] = 2
del val
dic[3] = 3
del dic
md['dict'][4] = 4
assert md['dict'] == {1: 1, 2: 2, 3: 3, 4: 4}
md = Metadata({'array': np.array([3, 4])})
md['dict'] = {1: 1, 2: 2}
md['str\'i"ng'] = 'str\'i"ng'
md["str'ing"] = "str'ing"
md['double'] = 3.14
md['bool'] = True
md['int'] = 7
md["string"] = "str\0ing"
md["string"] = "str\ning"
md["str\ting"] = "str\ting"
md["str\0ing"] = "str\0ing"
md["str\\0ing"] = "str\\0ing"
md_copy = pickle.loads(pickle.dumps(md))
for i in md:
if i == 'array':
assert (md[i] == md_copy[i]).all()
else:
assert md[i] == md_copy[i]
md_copy2 = Metadata(dict(md))
assert not md_copy2.modified()
assert md.modified() == md_copy.modified()
md = Metadata({
"<class 'numpy.ndarray'>": np.array([3, 4]),
"<class 'dict'>": {1: 1, 2: 2},
'string': 'string',
'double': 3.14,
'bool': True,
'long': 7,
"<class 'bytes'>": b'\xba\xd0\xba\xd0',
"<class 'NoneType'>": None})
for i in md:
assert md.type(i) == i
md[b'\xba\xd0'] = b'\xba\xd0'
md_copy = pickle.loads(pickle.dumps(md))
for i in md:
if i == "<class 'numpy.ndarray'>":
assert (md[i] == md_copy[i]).all()
else:
assert md[i] == md_copy[i]
del md["<class 'numpy.ndarray'>"]
md_copy.erase("<class 'numpy.ndarray'>")
assert not "<class 'numpy.ndarray'>" in md
assert not "<class 'numpy.ndarray'>" in md_copy
assert md.keys() == md_copy.keys()
with pytest.raises(TypeError, match='Metadata'):
reversed(md)
md = Metadata({1: 1, 3: 3})
md_copy = Metadata({2: 2, 3: 30})
md += md_copy
assert md.__repr__() == "Metadata({'1': 1, '2': 2, '3': 30})"
# Test with real data
dic = {'_format': 'MSEED', 'arrival.time': 1356901212.242550, 'calib': 1.000000,
'chan': 'BHZ', 'delta': 0.025000, 'deltim': -1.000000, 'endtime': 1356904168.544538,
'iphase': 'P', 'loc': '',
'mseed': {'dataquality': 'D', 'number_of_records': 36, 'encoding': 'STEIM2',
'byteorder': '>', 'record_length': 4096, 'filesize': 726344704},
'net': 'CI', 'npts': 144000, 'phase': 'P', 'sampling_rate': 40.000000,
'site.elev': 0.258000, 'site.lat': 35.126900, 'site.lon': -118.830090,
'site_id': '5fb6a67b37f8eef2f0658e9a', 'sta': 'ARV', 'starttime': 1356900568.569538
}
md = Metadata(dic)
md['mod'] = 'mod'
md_copy = pickle.loads(pickle.dumps(md))
for i in md:
assert md[i] == md_copy[i]
assert md.modified() == md_copy.modified()
@pytest.fixture(params=[Seismogram, SeismogramEnsemble,
TimeSeries, TimeSeriesEnsemble])
def MetadataBase(request):
return request.param
def test_MetadataBase(MetadataBase):
md = MetadataBase()
assert MetadataBase.__name__ + "({" in repr(md)
dic = {1: 1}
md.put('dict', dic)
val = md.get('dict')
val[2] = 2
del val
dic[3] = 3
del dic
md['dict'][4] = 4
assert md['dict'] == {1: 1, 2: 2, 3: 3, 4: 4}
md = MetadataBase()
md["<class 'numpy.ndarray'>"] = np.array([3, 4])
md["<class 'dict'>"] = {1: 1, 2: 2}
md['string'] = 'str\'i"ng'
md["str'ing"] = "str'ing"
md['double'] = 3.14
md['bool'] = True
md['long'] = 7
md["str\ning"] = "str\0ing"
md["str\ning"] = "str\ning"
md["str\ting"] = "str\ting"
md["str\0ing"] = "str\0ing"
md["str\\0ing"] = "str\\0ing"
md["<class 'bytes'>"] = b'\xba\xd0\xba\xd0'
md["<class 'NoneType'>"] = None
md[b'\xba\xd0'] = b'\xba\xd0'
md_copy = MetadataBase(md)
for i in md:
if i == 'array' or i == "<class 'numpy.ndarray'>":
assert (md[i] == md_copy[i]).all()
else:
assert md[i] == md_copy[i]
del md["str'ing"], md["str\ning"], md["str\ting"], md["str\0ing"], md["str\\0ing"], md["b'\\xba\\xd0'"]
for i in md:
if i != 'delta' and i != 'npts' and i != 'starttime':
assert md.type(i) == i
md_copy = MetadataBase(md)
del md["<class 'numpy.ndarray'>"]
md_copy.erase("<class 'numpy.ndarray'>")
assert not "<class 'numpy.ndarray'>" in md
assert not "<class 'numpy.ndarray'>" in md_copy
assert md.keys() == md_copy.keys()
with pytest.raises(TypeError, match=MetadataBase.__name__):
reversed(md)
def test_TimeSeries():
ts = TimeSeries()
ts.npts = 100
ts.t0 = 0.0
ts.dt = 0.001
ts.live = 1
ts.tref = TimeReferenceType.Relative
ts.data.append(1.0)
ts.data.append(2.0)
ts.data.append(3.0)
ts.data.append(4.0)
ts.sync_npts()
assert ts.npts == 104
assert ts.npts == ts['npts']
ts += ts
for i in range(4):
ts.data[i] = i * 0.5
ts_copy = pickle.loads(pickle.dumps(ts))
assert ts.data == ts_copy.data
assert ts.data[3] == 1.5
| |
{
"random_values": "007aaf954de7435d87abe06fb957cbade21732c6ceafb80335e89a5ca322440e",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
False,
"dxsm_multiplier",
11400714819323198485,
): {
"random_values": "96ca57f03589459f29b4a68276f18793d3612465767ec9a888770c77cf2cb28c",
"initial_state_hash": "0a6c90201a97fca483be5c3e52322c6a7098872430ea776ea8b4beff483fa0d6",
"final_state_hash": "35f013d09a45fdaddeda8087468d3945cb3fece180ea3372b1072d49dcebb6ef",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
False,
"dxsm_multiplier",
15750249268501108917,
): {
"random_values": "9d176eaab6fd671d0be8f9cff0b349c2bb641431efa940836d5b8b7ffb16339f",
"initial_state_hash": "79173a557692071064f5b0c194886d55493edca411e4e2f96e3107093fbe0857",
"final_state_hash": "77411ad01fb648f72232c1bf560f2cf79f1aa73ba8ce9908f2a728e53e46350e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
11400714819323198485,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
15750249268501108917,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
False,
"dxsm_multiplier",
11400714819323198485,
): {
"random_values": "cf6b9277a5ee53cf902657c7643b36c1dee8ac99f1e0cdc8772e770f2fcb0db7",
"initial_state_hash": "9e351985349d11cdb6fbb878da2700d7808ab8bac6fd88d2beeb15b7f40729c1",
"final_state_hash": "1fe897f9eb3aa93d1221ecb240749a0345b407dffac48bda1bc0957cf29c8caa",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
False,
"dxsm_multiplier",
15750249268501108917,
): {
"random_values": "cf6b9277a5ee53cf902657c7643b36c1dee8ac99f1e0cdc8772e770f2fcb0db7",
"initial_state_hash": "23d9a1abc4ff389811d5a0dbd7ff4ac2eacea3dce401e93c0f1683335446e47f",
"final_state_hash": "d80562332576fc44ac9dcb507c29c55310b51be1cfb0bc5255fa5daefd316f5f",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
True,
"dxsm_multiplier",
11400714819323198485,
): {
"random_values": "d8f99b108d5237dcac86ee6a250b3e9ab5fc3930da8eb83f046a3230c19a48aa",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
True,
"dxsm_multiplier",
15750249268501108917,
): {
"random_values": "0e3a373bb1700e5e1111ce1dbadb7c1448b0c51aad2770b838668f7b9d5d36fa",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
False,
"dxsm_multiplier",
11400714819323198485,
): {
"random_values": "6e511264f1d5e4bb2dea1d2f60b3c9b324152456dd71378a674e8f3d054e6e2e",
"initial_state_hash": "9e351985349d11cdb6fbb878da2700d7808ab8bac6fd88d2beeb15b7f40729c1",
"final_state_hash": "1fe897f9eb3aa93d1221ecb240749a0345b407dffac48bda1bc0957cf29c8caa",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
False,
"dxsm_multiplier",
15750249268501108917,
): {
"random_values": "2791046aaabba88b68fa95e6d9dadf0cc322d9b902a1801fb2fa2c459ba67b2b",
"initial_state_hash": "23d9a1abc4ff389811d5a0dbd7ff4ac2eacea3dce401e93c0f1683335446e47f",
"final_state_hash": "d80562332576fc44ac9dcb507c29c55310b51be1cfb0bc5255fa5daefd316f5f",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"post",
True,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "ede41b3536534143acdaaec7534a59ad55449653124ec90ee7d5ce8d7efabcec",
"initial_state_hash": "1d368ef772bde6cefb26bb2f3727bcdd48dd9564c5f008eeda73063af74151b4",
"final_state_hash": "69181a61eb6f9f0af060db3a0f8e4b4227117f61e721a9b4c8f3aeda4baa2748",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"post",
True,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4d8bbff64c61c5f3158a043946b78d9c556091df421baf1dd5fbd506d7610dd2",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"post",
False,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "462e06e6fc463e3fcec72e798a9370b072b858422d82791d9f3340cdefdf08f8",
"initial_state_hash": "af38467b99290f4b7b3e3048202c550976333159b61d8803b79525f1a54de2be",
"final_state_hash": "75a3d23165a86aebfceb12ff94c0642b35bf1e89311a9a624baace34b65e9674",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"post",
False,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4b028448a5d5bdc72a80201203ffabd31c9680eb79fce732afd8e29ebc785a42",
"initial_state_hash": "79173a557692071064f5b0c194886d55493edca411e4e2f96e3107093fbe0857",
"final_state_hash": "77411ad01fb648f72232c1bf560f2cf79f1aa73ba8ce9908f2a728e53e46350e",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
True,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "77b07de4a4cc4ac74049e30e30a082adac173a707fbcaf86a176982cf16d5cee",
"initial_state_hash": "1d368ef772bde6cefb26bb2f3727bcdd48dd9564c5f008eeda73063af74151b4",
"final_state_hash": "69181a61eb6f9f0af060db3a0f8e4b4227117f61e721a9b4c8f3aeda4baa2748",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
True,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "007aaf954de7435d87abe06fb957cbade21732c6ceafb80335e89a5ca322440e",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
False,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "6ec85177435066e38d2c9af8774b6f4c2731a452748f2a80e704aaef8575505a",
"initial_state_hash": "af38467b99290f4b7b3e3048202c550976333159b61d8803b79525f1a54de2be",
"final_state_hash": "75a3d23165a86aebfceb12ff94c0642b35bf1e89311a9a624baace34b65e9674",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"post",
False,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "9d176eaab6fd671d0be8f9cff0b349c2bb641431efa940836d5b8b7ffb16339f",
"initial_state_hash": "79173a557692071064f5b0c194886d55493edca411e4e2f96e3107093fbe0857",
"final_state_hash": "77411ad01fb648f72232c1bf560f2cf79f1aa73ba8ce9908f2a728e53e46350e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
True,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
True,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
False,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "87ab6ac622e6f257e4bc75bfb5468d2c71ff8bfa62aa82b2f85a506d67d2a656",
"initial_state_hash": "26ce9c76f2b688eee1c37be3715b7170703fd93d934e253d2c8cd8cb71001ee0",
"final_state_hash": "14cc3923fc978a92b978f814aad03fe0a8027e144e1a64ebeaf4a818f965ce0e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"post",
False,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "cf6b9277a5ee53cf902657c7643b36c1dee8ac99f1e0cdc8772e770f2fcb0db7",
"initial_state_hash": "23d9a1abc4ff389811d5a0dbd7ff4ac2eacea3dce401e93c0f1683335446e47f",
"final_state_hash": "d80562332576fc44ac9dcb507c29c55310b51be1cfb0bc5255fa5daefd316f5f",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
True,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "276e7d4dc3aa01b839e87c20b35e65d016532d0a11deddcb45a651a9d43d22f5",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
True,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "0e3a373bb1700e5e1111ce1dbadb7c1448b0c51aad2770b838668f7b9d5d36fa",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
False,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "53c420abe3a0edc40a4cd288e1e197b612e0759c308a692c2c56022001c5e361",
"initial_state_hash": "26ce9c76f2b688eee1c37be3715b7170703fd93d934e253d2c8cd8cb71001ee0",
"final_state_hash": "14cc3923fc978a92b978f814aad03fe0a8027e144e1a64ebeaf4a818f965ce0e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"post",
False,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "2791046aaabba88b68fa95e6d9dadf0cc322d9b902a1801fb2fa2c459ba67b2b",
"initial_state_hash": "23d9a1abc4ff389811d5a0dbd7ff4ac2eacea3dce401e93c0f1683335446e47f",
"final_state_hash": "d80562332576fc44ac9dcb507c29c55310b51be1cfb0bc5255fa5daefd316f5f",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "ede41b3536534143acdaaec7534a59ad55449653124ec90ee7d5ce8d7efabcec",
"initial_state_hash": "6fdce1cee3122ea6cf7c81ff7970359b21fcee0ae3cec5b63d9defc4027bdcda",
"final_state_hash": "808f052d237bee6e7f7d83806876e227293ea9c41e99f7d2a03aed67f8fc1fcb",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4d8bbff64c61c5f3158a043946b78d9c556091df421baf1dd5fbd506d7610dd2",
"initial_state_hash": "0a1ab006c181853151094a76e18495865725f89af54fb2d448063aef8a7aea66",
"final_state_hash": "37110aba91957d5a865a3949e6506a223cb8e50655b9081e047a2ce7a1a7db43",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "ede41b3536534143acdaaec7534a59ad55449653124ec90ee7d5ce8d7efabcec",
"initial_state_hash": "1d368ef772bde6cefb26bb2f3727bcdd48dd9564c5f008eeda73063af74151b4",
"final_state_hash": "69181a61eb6f9f0af060db3a0f8e4b4227117f61e721a9b4c8f3aeda4baa2748",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"xsl-rr",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4d8bbff64c61c5f3158a043946b78d9c556091df421baf1dd5fbd506d7610dd2",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "f5d2c541bfa044d50b4b9a349fbfa28a7978e38e9f36c08ff4f8dbdb1489be21",
"initial_state_hash": "6fdce1cee3122ea6cf7c81ff7970359b21fcee0ae3cec5b63d9defc4027bdcda",
"final_state_hash": "808f052d237bee6e7f7d83806876e227293ea9c41e99f7d2a03aed67f8fc1fcb",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "b46203b843da37d8ea3fe684c825eea2302f19bff2b6171239289c9aae6d8a94",
"initial_state_hash": "0a1ab006c181853151094a76e18495865725f89af54fb2d448063aef8a7aea66",
"final_state_hash": "37110aba91957d5a865a3949e6506a223cb8e50655b9081e047a2ce7a1a7db43",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "77b07de4a4cc4ac74049e30e30a082adac173a707fbcaf86a176982cf16d5cee",
"initial_state_hash": "1d368ef772bde6cefb26bb2f3727bcdd48dd9564c5f008eeda73063af74151b4",
"final_state_hash": "69181a61eb6f9f0af060db3a0f8e4b4227117f61e721a9b4c8f3aeda4baa2748",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"output",
"dxsm",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "007aaf954de7435d87abe06fb957cbade21732c6ceafb80335e89a5ca322440e",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "6e9c58608860e843cc701f628d8699870a7701ff8ef71c1143ed23111dcf6439",
"final_state_hash": "087707bacce00c40551708f1dadbf8450e2ddf7b253d06d1898e024eac30b49e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"xsl-rr",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "fc6a8edc61787863c2ba7336bf07f33018e36b8723efa4ab4e6cee48b89be28f",
"initial_state_hash": "6e9c58608860e843cc701f628d8699870a7701ff8ef71c1143ed23111dcf6439",
"final_state_hash": "087707bacce00c40551708f1dadbf8450e2ddf7b253d06d1898e024eac30b49e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "d8f99b108d5237dcac86ee6a250b3e9ab5fc3930da8eb83f046a3230c19a48aa",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "276e7d4dc3aa01b839e87c20b35e65d016532d0a11deddcb45a651a9d43d22f5",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"output",
"dxsm",
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "0e3a373bb1700e5e1111ce1dbadb7c1448b0c51aad2770b838668f7b9d5d36fa",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "ede41b3536534143acdaaec7534a59ad55449653124ec90ee7d5ce8d7efabcec",
"initial_state_hash": "6fdce1cee3122ea6cf7c81ff7970359b21fcee0ae3cec5b63d9defc4027bdcda",
"final_state_hash": "808f052d237bee6e7f7d83806876e227293ea9c41e99f7d2a03aed67f8fc1fcb",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4d8bbff64c61c5f3158a043946b78d9c556091df421baf1dd5fbd506d7610dd2",
"initial_state_hash": "0a1ab006c181853151094a76e18495865725f89af54fb2d448063aef8a7aea66",
"final_state_hash": "37110aba91957d5a865a3949e6506a223cb8e50655b9081e047a2ce7a1a7db43",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "ede41b3536534143acdaaec7534a59ad55449653124ec90ee7d5ce8d7efabcec",
"initial_state_hash": "1d368ef772bde6cefb26bb2f3727bcdd48dd9564c5f008eeda73063af74151b4",
"final_state_hash": "69181a61eb6f9f0af060db3a0f8e4b4227117f61e721a9b4c8f3aeda4baa2748",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4d8bbff64c61c5f3158a043946b78d9c556091df421baf1dd5fbd506d7610dd2",
"initial_state_hash": "c30cc6dddf17d6d9a66b1d662ff8a7960670853b1c6172100e2fd3719c98e8c2",
"final_state_hash": "459beeb952c7dcbe8632a9684684321e1408b5bebdf5a474d9cc85e7d19efdf6",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "462e06e6fc463e3fcec72e798a9370b072b858422d82791d9f3340cdefdf08f8",
"initial_state_hash": "ed34e82e2d30bc8e3e0edd5bbac196e9088c2d042c80aa941c4d5fdfba19af73",
"final_state_hash": "0fe3cc8e1e072afbc3408aeccace40f2b46a7e0d06563f41ce3b56ec01c90f95",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4b028448a5d5bdc72a80201203ffabd31c9680eb79fce732afd8e29ebc785a42",
"initial_state_hash": "0a6c90201a97fca483be5c3e52322c6a7098872430ea776ea8b4beff483fa0d6",
"final_state_hash": "35f013d09a45fdaddeda8087468d3945cb3fece180ea3372b1072d49dcebb6ef",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
False,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "462e06e6fc463e3fcec72e798a9370b072b858422d82791d9f3340cdefdf08f8",
"initial_state_hash": "af38467b99290f4b7b3e3048202c550976333159b61d8803b79525f1a54de2be",
"final_state_hash": "75a3d23165a86aebfceb12ff94c0642b35bf1e89311a9a624baace34b65e9674",
},
(
"LCG128Mix",
"seed",
"inc",
0,
"post",
False,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "4b028448a5d5bdc72a80201203ffabd31c9680eb79fce732afd8e29ebc785a42",
"initial_state_hash": "79173a557692071064f5b0c194886d55493edca411e4e2f96e3107093fbe0857",
"final_state_hash": "77411ad01fb648f72232c1bf560f2cf79f1aa73ba8ce9908f2a728e53e46350e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "6e9c58608860e843cc701f628d8699870a7701ff8ef71c1143ed23111dcf6439",
"final_state_hash": "087707bacce00c40551708f1dadbf8450e2ddf7b253d06d1898e024eac30b49e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "87ab6ac622e6f257e4bc75bfb5468d2c71ff8bfa62aa82b2f85a506d67d2a656",
"initial_state_hash": "a53bc99840c68cbc5a80bddccabf725b49e3777a217caae8411148811e73d96d",
"final_state_hash": "21e3145806cd18fec247ce8bf632815c56eaf5d1318f9fef6f2c26671a86fbfc",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "cf6b9277a5ee53cf902657c7643b36c1dee8ac99f1e0cdc8772e770f2fcb0db7",
"initial_state_hash": "9e351985349d11cdb6fbb878da2700d7808ab8bac6fd88d2beeb15b7f40729c1",
"final_state_hash": "1fe897f9eb3aa93d1221ecb240749a0345b407dffac48bda1bc0957cf29c8caa",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
False,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "87ab6ac622e6f257e4bc75bfb5468d2c71ff8bfa62aa82b2f85a506d67d2a656",
"initial_state_hash": "26ce9c76f2b688eee1c37be3715b7170703fd93d934e253d2c8cd8cb71001ee0",
"final_state_hash": "14cc3923fc978a92b978f814aad03fe0a8027e144e1a64ebeaf4a818f965ce0e",
},
(
"LCG128Mix",
"seed",
"inc",
None,
"post",
False,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "cf6b9277a5ee53cf902657c7643b36c1dee8ac99f1e0cdc8772e770f2fcb0db7",
"initial_state_hash": "23d9a1abc4ff389811d5a0dbd7ff4ac2eacea3dce401e93c0f1683335446e47f",
"final_state_hash": "d80562332576fc44ac9dcb507c29c55310b51be1cfb0bc5255fa5daefd316f5f",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "6e9c58608860e843cc701f628d8699870a7701ff8ef71c1143ed23111dcf6439",
"final_state_hash": "087707bacce00c40551708f1dadbf8450e2ddf7b253d06d1898e024eac30b49e",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c50dcc6bc71efe1af580ec8f139de7f6e6478017f775d25ec4901e321ddf7ba",
"final_state_hash": "275314216146881660a4851271ab303848fb6c1ee281ae436777484bc2c9a548",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "e6571719b3b423787db61e2ce79065f4ce7dbcc00a149207188fb29165bda27a",
"initial_state_hash": "357a515aa5a1da541c19f34bd07642c4780c2360c69324e49219f1070c51d04c",
"final_state_hash": "42284476ec5bc9eded19471847d64d951fbe71ffd69639398b5f616db6982374",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
True,
"dxsm_multiplier",
15750249268501108917,
"multiplier",
47026247687942121848144207491837523525,
): {
"random_values": "78b425d06f86a7f99d2dbd1a37792a4611c246c80aebe49b8071b6002d081e76",
"initial_state_hash": "8c1ff32d89688ea14e1db0c4623556e7bc7bf34aa27e74e9251fa1170800bc20",
"final_state_hash": "374b2625f6c5dcca495a19ef372031a1f9b7edb6375d945ca9b96d628083ae68",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
52583122484843402430317208685168068605,
): {
"random_values": "87ab6ac622e6f257e4bc75bfb5468d2c71ff8bfa62aa82b2f85a506d67d2a656",
"initial_state_hash": "a53bc99840c68cbc5a80bddccabf725b49e3777a217caae8411148811e73d96d",
"final_state_hash": "21e3145806cd18fec247ce8bf632815c56eaf5d1318f9fef6f2c26671a86fbfc",
},
(
"LCG128Mix",
"seed",
"output",
"xsl-rr",
"post",
False,
"dxsm_multiplier",
11400714819323198485,
"multiplier",
47026247687942121848144207491837523525,
): | |
<reponame>vhn0912/Finance<filename>Portfolio_Strategies/backtest_strategies.py
import numpy as np
import pandas as pd
import yfinance as yf
import datetime as dt
import warnings
from yahoo_fin import stock_info as si
import talib
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
stock = input('Enter a stock ticker: ')
num_of_years = input('Enter number of years: ')
num_of_years = float(num_of_years)
start = dt.date.today() - dt.timedelta(days = int(365.25*num_of_years))
end = dt.datetime.now()
current_price = round(si.get_live_price(stock), 2)
df = yf.download(stock,start, end, interval='1d')
signals = ['Moving Average', 'Relative Strength Index', 'Bollinger Bands', 'MACD', 'Commodity Channel Index', 'Extended Market Calculator', 'Red White Blue']
change = []
num_of_trades = []
last_sell = []
last_buy = []
average_gain = []
average_loss = []
max_return = []
max_loss = []
gain_loss = []
battling_avg = []
for signal in signals:
if signal.lower() == 'moving average':
print ('-'*60)
print ('Simple Moving Average: ')
short_sma= 20
long_sma = 50
SMAs=[short_sma, long_sma]
for i in SMAs:
df["SMA_"+str(i)]= df.iloc[:,4].rolling(window=i).mean()
position=0
counter=0
percentChange=[]
for i in df.index:
SMA_short=df['SMA_20']
SMA_long =df['SMA_50']
close=df['Adj Close'][i]
if(SMA_short[i] > SMA_long[i]):
if(position==0):
buyP=close
position=1
elif(SMA_short[i] < SMA_long[i]):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("SMAs used: "+str(SMAs))
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'relative strength index':
print ('-'*60)
print ('Relative Strength Index: ')
df["RSI"] = talib.RSI(df["Close"])
values = df["RSI"].tail(14)
value = values.mean()
position=0
counter=0
percentChange=[]
for i in df.index:
rsi=df['RSI']
close=df['Adj Close'][i]
if(rsi[i] <= 30):
if(position==0):
buyP=close
position=1
elif(rsi[i] >= 70):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'bollinger bands':
print ('-'*60)
print ('Bollinger Bands: ')
position=0
counter=0
percentChange=[]
df['upper_band'], df['middle_band'], df['lower_band'] = talib.BBANDS(df['Adj Close'], timeperiod =20)
for i in df.index:
BBAND_upper =df['upper_band']
BBAND_lower =df['lower_band']
close_price = df['Adj Close']
close=df['Adj Close'][i]
if(BBAND_lower[i] > close_price[i]):
if(position==0):
buyP=close
position=1
elif(BBAND_upper[i] < close_price[i]):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'macd':
print ('-'*60)
print ('MACD: ')
position=0
counter=0
percentChange=[]
df['macd'], df['macdsignal'], df['macdhist'] = talib.MACD(df['Adj Close'], fastperiod=12, slowperiod=26, signalperiod=9)
for i in df.index:
macd = df['macd']
macdsignal = df['macdsignal']
close=df['Adj Close'][i]
if(macd[i] > macdsignal[i]):
if(position==0):
buyP=close
position=1
elif(macd[i] < macdsignal[i]):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'commodity channel index':
print ('-'*60)
print ('Commodity Channel Index: ')
position=0
counter=0
percentChange=[]
cci = talib.CCI(df['High'], df['Low'], df['Close'], timeperiod=14)
for i in df.index:
cci = cci
close=df['Adj Close'][i]
if(cci[i] > 0):
if(position==0):
buyP=close
position=1
elif(cci[i] < 0):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'extended market calculator':
sma = 50
limit = 10
df['SMA'+str(sma)] = df.iloc[:,4].rolling(window=sma).mean()
df['PC'] = ((df["Adj Close"]/df['SMA'+str(sma)])-1)*100
position=0
counter=0
percentChange=[]
n = -1
for i in df.index:
n = n + 1
mean =df["PC"].mean()
stdev=df["PC"].std()
current=df["PC"][n]
close=df['Adj Close'][i]
if(current < -2*stdev+mean):
if(position==0):
buyP=close
position=1
elif(current > 2*stdev+mean):
if(position==1):
position=0
sellP=close
perc=(sellP/buyP-1)*100
percentChange.append(perc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with "+str(numGains+numLosses)+" trades:")
print("Total return over "+str(numGains+numLosses)+ " trades: "+ str(totReturn)+"%")
if (numGains>0):
avgGain=gains/numGains
maxReturn= str(max(percentChange))
else:
avgGain=0
maxReturn=np.nan
if(numLosses>0):
avgLoss=losses/numLosses
maxLoss=str(min(percentChange))
ratioRR=str(-avgGain/avgLoss)
else:
avgLoss=0
maxLoss=np.nan
ratioRR='inf'
df['PC'] = df['Close'].pct_change()
hold = round(df['PC'].sum() * 100, 2)
print ("Total return for a B&H strategy: " + str(hold)+'%')
print("Average Gain: "+ str(round(avgGain, 2)))
print("Average Loss: "+ str(round(avgLoss, 2)))
print("Max Return: "+ str(maxReturn))
print("Max Loss: "+ str(maxLoss))
print("Gain/loss ratio: "+ str(ratioRR))
if(numGains>0 or numLosses>0):
batAvg=numGains/(numGains+numLosses)
else:
batAvg=0
print("Batting Avg: "+ str(batAvg))
change.append(totReturn)
trades = numGains+numLosses
num_of_trades.append(trades)
last_sell.append(sellP)
last_buy.append(buyP)
average_gain.append(avgGain)
average_loss.append(avgLoss)
max_return.append(float(maxReturn))
max_loss.append(float(maxLoss))
gain_loss.append(float(ratioRR))
battling_avg.append(batAvg)
elif signal.lower() == 'red white blue':
print ('-'*60)
print ('Red White Blue: ')
position=0
counter=0
percentChange=[]
emasUsed=[3,5,8,10,12,15,30,35,40,45,50,60]
for x in emasUsed:
ema=x
df["Ema_"+str(ema)]=round(df.iloc[:,4].ewm(span=ema, adjust=False).mean(),2)
df=df.iloc[60:]
for i in df.index:
cmin=min(df["Ema_3"][i],df["Ema_5"][i],df["Ema_8"][i],df["Ema_10"][i],df["Ema_12"][i],df["Ema_15"][i],)
cmax=max(df["Ema_30"][i],df["Ema_35"][i],df["Ema_40"][i],df["Ema_45"][i],df["Ema_50"][i],df["Ema_60"][i],)
close=df["Adj Close"][i]
if(cmin>cmax):
if(position==0):
bp=close
position=1
print("Buying now at "+str(bp))
elif(cmin<cmax):
if(position==1):
position=0
sp=close
print("Selling now at "+str(sp))
pc=(sp/bp-1)*100
percentChange.append(pc)
if(counter==df["Adj Close"].count()-1 and position==1):
position=0
sp=close
print("Selling now at "+str(sp))
pc=(sp/bp-1)*100
percentChange.append(pc)
counter+=1
gains=0
numGains=0
losses=0
numLosses=0
totReturn=1
for i in percentChange:
if(i>0):
gains+=i
numGains+=1
else:
losses+=i
numLosses+=1
totReturn = totReturn*((i/100)+1)
totReturn=round((totReturn-1)*100,2)
print("These statistics are from "+str(start)+" up till now with | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'BudgetActionActionThreshold',
'BudgetActionDefinition',
'BudgetActionDefinitionIamActionDefinition',
'BudgetActionDefinitionScpActionDefinition',
'BudgetActionDefinitionSsmActionDefinition',
'BudgetActionSubscriber',
'BudgetCostTypes',
'BudgetNotification',
]
@pulumi.output_type
class BudgetActionActionThreshold(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionThresholdType":
suggest = "action_threshold_type"
elif key == "actionThresholdValue":
suggest = "action_threshold_value"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionActionThreshold. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionActionThreshold.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionActionThreshold.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_threshold_type: str,
action_threshold_value: float):
"""
:param str action_threshold_type: The type of threshold for a notification. Valid values are `PERCENTAGE` or `ABSOLUTE_VALUE`.
:param float action_threshold_value: The threshold of a notification.
"""
pulumi.set(__self__, "action_threshold_type", action_threshold_type)
pulumi.set(__self__, "action_threshold_value", action_threshold_value)
@property
@pulumi.getter(name="actionThresholdType")
def action_threshold_type(self) -> str:
"""
The type of threshold for a notification. Valid values are `PERCENTAGE` or `ABSOLUTE_VALUE`.
"""
return pulumi.get(self, "action_threshold_type")
@property
@pulumi.getter(name="actionThresholdValue")
def action_threshold_value(self) -> float:
"""
The threshold of a notification.
"""
return pulumi.get(self, "action_threshold_value")
@pulumi.output_type
class BudgetActionDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "iamActionDefinition":
suggest = "iam_action_definition"
elif key == "scpActionDefinition":
suggest = "scp_action_definition"
elif key == "ssmActionDefinition":
suggest = "ssm_action_definition"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
iam_action_definition: Optional['outputs.BudgetActionDefinitionIamActionDefinition'] = None,
scp_action_definition: Optional['outputs.BudgetActionDefinitionScpActionDefinition'] = None,
ssm_action_definition: Optional['outputs.BudgetActionDefinitionSsmActionDefinition'] = None):
"""
:param 'BudgetActionDefinitionIamActionDefinitionArgs' iam_action_definition: The AWS Identity and Access Management (IAM) action definition details. See IAM Action Definition.
:param 'BudgetActionDefinitionScpActionDefinitionArgs' scp_action_definition: The service control policies (SCPs) action definition details. See SCP Action Definition.
:param 'BudgetActionDefinitionSsmActionDefinitionArgs' ssm_action_definition: The AWS Systems Manager (SSM) action definition details. See SSM Action Definition.
"""
if iam_action_definition is not None:
pulumi.set(__self__, "iam_action_definition", iam_action_definition)
if scp_action_definition is not None:
pulumi.set(__self__, "scp_action_definition", scp_action_definition)
if ssm_action_definition is not None:
pulumi.set(__self__, "ssm_action_definition", ssm_action_definition)
@property
@pulumi.getter(name="iamActionDefinition")
def iam_action_definition(self) -> Optional['outputs.BudgetActionDefinitionIamActionDefinition']:
"""
The AWS Identity and Access Management (IAM) action definition details. See IAM Action Definition.
"""
return pulumi.get(self, "iam_action_definition")
@property
@pulumi.getter(name="scpActionDefinition")
def scp_action_definition(self) -> Optional['outputs.BudgetActionDefinitionScpActionDefinition']:
"""
The service control policies (SCPs) action definition details. See SCP Action Definition.
"""
return pulumi.get(self, "scp_action_definition")
@property
@pulumi.getter(name="ssmActionDefinition")
def ssm_action_definition(self) -> Optional['outputs.BudgetActionDefinitionSsmActionDefinition']:
"""
The AWS Systems Manager (SSM) action definition details. See SSM Action Definition.
"""
return pulumi.get(self, "ssm_action_definition")
@pulumi.output_type
class BudgetActionDefinitionIamActionDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "policyArn":
suggest = "policy_arn"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionDefinitionIamActionDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionDefinitionIamActionDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionDefinitionIamActionDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
policy_arn: str,
groups: Optional[Sequence[str]] = None,
roles: Optional[Sequence[str]] = None,
users: Optional[Sequence[str]] = None):
"""
:param str policy_arn: The Amazon Resource Name (ARN) of the policy to be attached.
:param Sequence[str] groups: A list of groups to be attached. There must be at least one group.
:param Sequence[str] roles: A list of roles to be attached. There must be at least one role.
:param Sequence[str] users: A list of users to be attached. There must be at least one user.
"""
pulumi.set(__self__, "policy_arn", policy_arn)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if roles is not None:
pulumi.set(__self__, "roles", roles)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter(name="policyArn")
def policy_arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the policy to be attached.
"""
return pulumi.get(self, "policy_arn")
@property
@pulumi.getter
def groups(self) -> Optional[Sequence[str]]:
"""
A list of groups to be attached. There must be at least one group.
"""
return pulumi.get(self, "groups")
@property
@pulumi.getter
def roles(self) -> Optional[Sequence[str]]:
"""
A list of roles to be attached. There must be at least one role.
"""
return pulumi.get(self, "roles")
@property
@pulumi.getter
def users(self) -> Optional[Sequence[str]]:
"""
A list of users to be attached. There must be at least one user.
"""
return pulumi.get(self, "users")
@pulumi.output_type
class BudgetActionDefinitionScpActionDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "policyId":
suggest = "policy_id"
elif key == "targetIds":
suggest = "target_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionDefinitionScpActionDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionDefinitionScpActionDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionDefinitionScpActionDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
policy_id: str,
target_ids: Sequence[str]):
"""
:param str policy_id: The policy ID attached.
:param Sequence[str] target_ids: A list of target IDs.
"""
pulumi.set(__self__, "policy_id", policy_id)
pulumi.set(__self__, "target_ids", target_ids)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> str:
"""
The policy ID attached.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="targetIds")
def target_ids(self) -> Sequence[str]:
"""
A list of target IDs.
"""
return pulumi.get(self, "target_ids")
@pulumi.output_type
class BudgetActionDefinitionSsmActionDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "actionSubType":
suggest = "action_sub_type"
elif key == "instanceIds":
suggest = "instance_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionDefinitionSsmActionDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionDefinitionSsmActionDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionDefinitionSsmActionDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
action_sub_type: str,
instance_ids: Sequence[str],
region: str):
"""
:param str action_sub_type: The action subType. Valid values are `STOP_EC2_INSTANCES` or `STOP_RDS_INSTANCES`.
:param Sequence[str] instance_ids: The EC2 and RDS instance IDs.
:param str region: The Region to run the SSM document.
"""
pulumi.set(__self__, "action_sub_type", action_sub_type)
pulumi.set(__self__, "instance_ids", instance_ids)
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="actionSubType")
def action_sub_type(self) -> str:
"""
The action subType. Valid values are `STOP_EC2_INSTANCES` or `STOP_RDS_INSTANCES`.
"""
return pulumi.get(self, "action_sub_type")
@property
@pulumi.getter(name="instanceIds")
def instance_ids(self) -> Sequence[str]:
"""
The EC2 and RDS instance IDs.
"""
return pulumi.get(self, "instance_ids")
@property
@pulumi.getter
def region(self) -> str:
"""
The Region to run the SSM document.
"""
return pulumi.get(self, "region")
@pulumi.output_type
class BudgetActionSubscriber(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "subscriptionType":
suggest = "subscription_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetActionSubscriber. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetActionSubscriber.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetActionSubscriber.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
address: str,
subscription_type: str):
"""
:param str address: The address that AWS sends budget notifications to, either an SNS topic or an email.
:param str subscription_type: The type of notification that AWS sends to a subscriber. Valid values are `SNS` or `EMAIL`.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "subscription_type", subscription_type)
@property
@pulumi.getter
def address(self) -> str:
"""
The address that AWS sends budget notifications to, either an SNS topic or an email.
"""
return pulumi.get(self, "address")
@property
@pulumi.getter(name="subscriptionType")
def subscription_type(self) -> str:
"""
The type of notification that AWS sends to a subscriber. Valid values are `SNS` or `EMAIL`.
"""
return pulumi.get(self, "subscription_type")
@pulumi.output_type
class BudgetCostTypes(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "includeCredit":
suggest = "include_credit"
elif key == "includeDiscount":
suggest = "include_discount"
elif key == "includeOtherSubscription":
suggest = "include_other_subscription"
elif key == "includeRecurring":
suggest = "include_recurring"
elif key == "includeRefund":
suggest = "include_refund"
elif key == "includeSubscription":
suggest = "include_subscription"
elif key == "includeSupport":
suggest = "include_support"
elif key == "includeTax":
suggest = "include_tax"
elif key == "includeUpfront":
suggest = "include_upfront"
elif key == "useAmortized":
suggest = "use_amortized"
elif key == "useBlended":
suggest = "use_blended"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BudgetCostTypes. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BudgetCostTypes.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BudgetCostTypes.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
include_credit: Optional[bool] = None,
include_discount: Optional[bool] = None,
include_other_subscription: Optional[bool] = None,
include_recurring: Optional[bool] = None,
include_refund: | |
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.release
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_release_with_empty_client_id_ERROR():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-300fc00:e968:6179::de52:7100')
srv_control.configure_loggers('kea-dhcp6', 'ERROR', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.release
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_release_with_empty_client_id_WARN():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'WARN', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.release
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_release_with_empty_client_id_INFO():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'INFO', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.release
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_release_with_empty_client_id_DEBUG():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'DEBUG', '99')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '13')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_renew_with_empty_client_id_FATAL():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'FATAL', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_renew_with_empty_client_id_ERROR():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'ERROR', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_renew_with_empty_client_id_WARN():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'WARN', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_renew_with_empty_client_id_INFO():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'INFO', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.renew
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_renew_with_empty_client_id_DEBUG():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'DEBUG', '99')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.request
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_request_with_empty_client_id_FATAL():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'FATAL', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.request
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_request_with_empty_client_id_ERROR():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'ERROR', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.request
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_request_with_empty_client_id_WARN():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'WARN', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.request
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_request_with_empty_client_id_INFO():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'INFO', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.request
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_request_with_empty_client_id_DEBUG():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'DEBUG', '99')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('server-id')
srv_msg.client_save_option('IA_NA')
srv_msg.client_add_saved_option('DONT ')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.solicit
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_solicit_with_empty_client_id_FATAL():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'FATAL', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.solicit
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_solicit_with_empty_client_id_ERROR():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'ERROR', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.solicit
@pytest.mark.CVE2015
def test_v6_CVE_2015_8373_solicit_with_empty_client_id_WARN():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', 'fc00:e968:6179::de52:7100-3000::ff')
srv_control.configure_loggers('kea-dhcp6', 'WARN', 'None')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'empty-client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_dont_wait_for_message()
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '1')
srv_msg.response_check_include_option('Response', None, '2')
srv_msg.response_check_include_option('Response', None, '3')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
| |
import pytest
from rundoc import BadInterpreter, BadEnv, RundocException, CodeFailed
import rundoc.block as rb
import rundoc.commander as rc
import rundoc.parsers as rp
import rundoc.__main__ as rm
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from pygments.lexers import get_lexer_by_name
from pygments.styles.manni import ManniStyle
from pygments.styles.native import NativeStyle
from types import *
import inspect
import io
import json
import os
import re
import stat
import tempfile
import threading
import time
###
# Fixtures
###
@pytest.fixture
def environment():
e = {
'custom_var1': '1',
'CUSTOM_VAR2': '2',
'custom_var3': 'some text',
}
for key in e:
os.environ[key] = e[key]
return e
@pytest.fixture
def orderedenv(environment):
oenv = rc.OrderedEnv()
for var in environment:
oenv.append(var, environment[var])
return oenv
@pytest.fixture
def test_vars():
return [
('test1', 'value111'),
('test2', 'value222'),
('test3', 'value333'),
]
@pytest.yield_fixture
def sandbox():
with tempfile.TemporaryDirectory() as directory:
yield directory
@pytest.yield_fixture
def dummy_file(sandbox, environment):
fpath = os.path.join(sandbox, 'dummy_file')
with open(fpath, 'a+') as f:
f.write('some {dummy} data\n')
for key in environment:
f.write(' abc %:' + key + ':%')
yield fpath
@pytest.fixture
def docblock_bash():
code = 'echo "it is working"'
# use bash as interpreter
tags = [ 'bash', 'test', 'main' ]
light = False
return rb.DocBlock(code, tags, light)
@pytest.fixture
def docblock_bash_light():
code = 'echo "it is working"'
# use bash as interpreter
tags = [ 'bash', 'test', 'main' ]
# color print optimized for light background terminal
light = True
return rb.DocBlock(code, tags, light)
@pytest.fixture
def docblock_unknown():
code = 'echo "it is working"'
# use binary in path as interpreter but one that has no code highlighting
tags = [ 'cd', 'test', 'main' ]
light = False
return rb.DocBlock(code, tags, light)
@pytest.fixture
def mkd_file():
data = b'bash#test\nls\n```\n\n```bash#test\nls -al\n```'
f = io.BytesIO()
f.write(data)
f.seek(0)
return f
###
# Tests for block.py
###
REGISTERED_BLOCK_ACTIONS = 5
def test_block_action():
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS
def dummy_block_action(args, contents):
return 0
rb.block_action(dummy_block_action)
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS + 1
assert type(rb.block_actions['dummy-block-action']) == FunctionType
assert rb.block_actions['dummy-block-action'] == dummy_block_action
del(rb.block_actions['dummy-block-action'])
assert len(rb.block_actions) == REGISTERED_BLOCK_ACTIONS
def test_fill_env_placeholders__valid(environment):
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
assert rb.fill_env_placeholders(before) == after
def test_fill_env_placeholders__unclosed(environment):
invalid_env = 'Text %:invalid_var '
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
before = invalid_env + before + invalid_env
after = invalid_env + after + invalid_env
assert rb.fill_env_placeholders(before) == after
def test_fill_env_placeholders__unopened(environment):
invalid_env = 'Text invalid_var:% '
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
before = invalid_env + before + invalid_env
after = invalid_env + after + invalid_env
assert rb.fill_env_placeholders(before) == after
def test_write_file_action__no_fill(sandbox):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = 'some random text\nmore text'
rb._write_file_action({0:testfile, 1:'774'}, before, fill=False)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_write_file_action__fill(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = 'some random text\nmore text'
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._write_file_action({0:testfile, 1:'774'}, before, fill=True)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_create_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_create_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_r_create_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_r_create_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_create_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._create_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_r_create_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_create_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_append_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
def test_append_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
with open(dummy_file, 'r') as f:
initial_contents = f.read()
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == initial_contents + before + '\n'
def test_r_append_file__fresh(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
def test_r_append_file__existing(sandbox, environment, dummy_file):
testfile = dummy_file
with open(dummy_file, 'r') as f:
initial_contents = f.read()
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile}, before)
with open(testfile, 'r') as f:
assert f.read() == initial_contents + after + '\n'
def test_append_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._append_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == before + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_r_append_file__permissions(sandbox, environment):
testfile = os.path.join(sandbox, inspect.currentframe().f_code.co_name)
permissions = '777'
before = ''
for key in environment:
before += ' abc %:' + key + ':%'
after = before.replace('%:', '{').replace(':%', '}').format(**environment)
rb._r_append_file({0:testfile, 1:permissions}, before)
with open(testfile, 'r') as f:
assert f.read() == after + '\n'
assert str(oct(os.stat(testfile)[stat.ST_MODE]))[-3:] == permissions
def test_docblock_init_with_bad_interpreter():
with pytest.raises(BadInterpreter):
rb.DocBlock(tags=['bad_interpreter'], code='')
def test_get_block_action__known_actions():
for action in {
'create-file',
'r-create-file',
'append-file',
'r-append-file',
}:
assert isinstance(rb.get_block_action(action + ':text'), LambdaType)
def test_get_block_action__undefined_action():
assert rb.get_block_action('unknown:text') == None
def test_docblock__get_lexer__bash(docblock_bash):
db_lexer = docblock_bash.get_lexer()
pygments_lexer = get_lexer_by_name('bash')
assert db_lexer.__class__ == pygments_lexer.__class__
def test_docblock__get_lexer__unknown(docblock_unknown):
db_lexer = docblock_unknown.get_lexer()
assert db_lexer == None
def test_docblock__str(docblock_bash):
code = docblock_bash.code
interpreter = docblock_bash.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(code, lexer_class, Terminal256Formatter(style=NativeStyle))
assert str(docblock_bash) == s
def test_docblock_str__last_run(docblock_bash):
user_code = 'echo "changed"'
docblock_bash.runs.append(
{
'user_code': user_code,
'output': '',
'retcode': None,
'time_start': None,
'time_stop': None,
}
)
docblock_bash.last_run['user_code'] = user_code
interpreter = docblock_bash.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(user_code, lexer_class, Terminal256Formatter(style=NativeStyle))
assert str(docblock_bash) == s
def test_docblock__str__light(docblock_bash_light):
code = docblock_bash_light.code
interpreter = docblock_bash_light.interpreter
lexer_class = get_lexer_by_name(interpreter)
s = highlight(code, lexer_class, Terminal256Formatter(style=ManniStyle))
assert str(docblock_bash_light) == s
def test_docblock__get_dict(docblock_bash):
assert type(docblock_bash.get_dict()) == type({})
bash_block_dict = {
'interpreter': 'bash',
'code': 'echo "this is a test"',
'tags': [ 'bash', 'test', 'main' ],
'runs': []
}
docblock = rb.DocBlock(
bash_block_dict['code'],
bash_block_dict['tags'],
)
actual_dict = docblock.get_dict()
assert bash_block_dict == actual_dict
docblock.run(prompt=False)
while docblock.process:
time.sleep(0.1)
actual_dict = docblock.get_dict()
for key in ('interpreter', 'code', 'tags'):
assert bash_block_dict[key] == actual_dict[key]
assert actual_dict['runs'][0]['user_code'] == docblock.code
assert actual_dict['runs'][0]['output'] == 'this is a test\n'
assert actual_dict['runs'][0]['retcode'] == 0
assert actual_dict['runs'][0]['time_start'] > 0
assert actual_dict['runs'][0]['time_stop'] > 0
def docblock_worker(docblock):
docblock.run(prompt=False)
def test_docblock__run_and_kill():
# Note that kill will only send SIGKILL to the running process without
# any knowledge on how this will be handeled. What is guaranteed is that
# process.poll() will contain some exitcode.
docblock = rb.DocBlock(
'echo "start"\nsleep 2\necho "this is test"',
['bash', 'test'],
)
assert docblock.process == None
t = threading.Thread(target=docblock_worker, args=(docblock,))
t.start()
time.sleep(1)
assert docblock.process and docblock.process.poll() is None
docblock.kill()
time.sleep(0.1)
assert docblock.process and type(docblock.process.poll()) is int
def test_docblock__run_action(dummy_file):
docblock = rb.DocBlock(
'some content',
['r-create-file:{}'.format(dummy_file), 'test'],
)
docblock.run(prompt=False)
assert docblock.last_run['retcode'] == 0
def test_docblock__run_unknown_action():
with pytest.raises(BadInterpreter):
docblock = rb.DocBlock(
'some content',
['unknown-action:bad-data', | |
from __future__ import print_function
import torch
from scipy.ndimage.filters import gaussian_filter
import numpy as np
from PIL import Image
import math
import cv2
import matplotlib.pyplot as plt
import torch.nn.functional as functional
import os
from torch.autograd import Variable
def load_heatmap(hm_path):
hm_array = np.load(hm_path)
torch_heatmap = torch.transpose(torch.transpose(torch.from_numpy(hm_array), 1, 2), 0, 1)
returned_mat = torch_heatmap[0:18, :, :]
return returned_mat
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def hmp2pose_by_numpy(hmp_numpy):
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = hmp_numpy[:, :, part]
map = gaussian_filter(map_ori, sigma=5)
map_left = np.zeros(map.shape)
map_left[1:, :] = map[:-1, :]
map_right = np.zeros(map.shape)
map_right[:-1, :] = map[1:, :]
map_up = np.zeros(map.shape)
map_up[:, 1:] = map[:, :-1]
map_down = np.zeros(map.shape)
map_down[:, :-1] = map[:, 1:]
peaks_binary = np.logical_and.reduce(
(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > 0.01))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
if len(peaks) > 0:
max = 0
for index, peak in enumerate(peaks):
score = map_ori[peak[1], peak[0]]
current_max_score = map_ori[peaks[max][1], peaks[max][0]]
if score > current_max_score:
max = index
peaks_with_score = [(peaks[max][0], peaks[max][1], map_ori[peaks[max][1], peaks[max][0]], peak_counter)]
all_peaks.append(peaks_with_score)
peak_counter += len(peaks_with_score)
else:
all_peaks.append([])
return all_peaks
def hmp2pose(hmp_tensor):
hmp_numpy = hmp_tensor[0].cpu().float().numpy()
hmp_numpy = np.transpose(hmp_numpy, (1, 2, 0))
return hmp2pose_by_numpy(hmp_numpy)
def hmp2im(heatmap_tensor):
all_peaks = hmp2pose(heatmap_tensor)
return pose2im_all(all_peaks)
def pose2im_all(all_peaks):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
[1, 16], [16, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
image = pose2im(all_peaks, limbSeq, colors)
return image
def pose2im_limb(all_peaks):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
image = pose2im(all_peaks, limbSeq, colors, _circle=False)
return image
def pose2im_limb_filter(all_peaks, error, threshold):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
for error_index, error_value in enumerate(error):
if error_value > threshold:
colors[error_index] = [0, 0, 0]
image = pose2im(all_peaks, limbSeq, colors, _circle=False)
return image
def pose2im(all_peaks, limbSeq, colors, _circle=True, _limb=True, imtype=np.uint8):
canvas = np.zeros(shape=(256, 256, 3))
canvas.fill(255)
if _circle:
for i in range(18):
for j in range(len(all_peaks[i])):
cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
if _limb:
stickwidth = 4
for i in range(len(limbSeq)):
limb = limbSeq[i]
cur_canvas = canvas.copy()
point1_index = limb[0] - 1
point2_index = limb[1] - 1
if len(all_peaks[point1_index]) > 0 and len(all_peaks[point2_index]) > 0:
point1 = all_peaks[point1_index][0][0:2]
point2 = all_peaks[point2_index][0][0:2]
X = [point1[1], point2[1]]
Y = [point1[0], point2[0]]
mX = np.mean(X)
mY = np.mean(Y)
# cv2.line()
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
return canvas.astype(imtype)
def pose2limb(pose):
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14]]
limbs = []
for seq_index, limb in enumerate(limbSeq):
point1_index = limb[0] - 1
point2_index = limb[1] - 1
if len(pose[point1_index]) > 0 and len(pose[point2_index]) > 0:
offset_x = pose[point2_index][0][0] - pose[point1_index][0][0]
offset_y = pose[point2_index][0][1] - pose[point1_index][0][1]
limbs.append([offset_x, offset_y])
else:
limbs.append([])
return limbs
def distance_limb(limbs1, limbs2):
assert len(limbs1) == len(limbs2)
error_all = 0
error_list = []
count = 0
for lamb_index in range(len(limbs1)):
limb1 = limbs1[lamb_index]
limb2 = limbs2[lamb_index]
if len(limb1)>1 and len(limb2)>1:
distance = (limb1[0] - limb2[0])**2 + (limb1[1] - limb2[1]) ** 2
error_all += distance
count += 1
else:
distance = None
error_list.append(float(distance))
for i, error in enumerate(error_list):
if error is not None:
error = math.sqrt(error)
else:
error = None
error_list[i] = error
error_list.append(math.sqrt(error_all/count))
return np.array(error_list)
def distance_point(all_peaks, index1, index2):
try:
x1 = all_peaks[index1][0][1]
y1 = all_peaks[index1][0][0]
x2 = all_peaks[index2][0][1]
y2 = all_peaks[index2][0][0]
except IndexError:
return 0
return math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
def crop_head(original_tensor, heatmap_tensor, length):
ear_offset = 10
tensor_numpy = heatmap_tensor[0].cpu().float().numpy()
tensor_numpy = np.transpose(tensor_numpy, (1, 2, 0))
all_peaks = hmp2pose_by_numpy(tensor_numpy)
center = [0, 0]
count = 0
for i in [0, 14, 15, 16, 17]:
if len(all_peaks[i]) > 0:
center[0] += all_peaks[i][0][1]
center[1] += all_peaks[i][0][0]
count += 1
center[0] /= count
center[1] /= count
center[0] += (length/6)
if length == None:
a = distance_point(all_peaks, 0, 16) + ear_offset
b = distance_point(all_peaks, 0, 17) + ear_offset
c = distance_point(all_peaks, 1, 0)
length = max(int(a), int(b), int(c))
crop_regeion = crop_patch(original_tensor, center, length)
return crop_regeion, center
def crop_patch(I, patch_center, patch_radius):
[px, py] = [patch_center[0], patch_center[1]]
r = patch_radius
up_boundary = int(px - r) if px - r > 0 else 0
down_boundary = int(px + r + 1) if px + r + 1 < I.size(2) else I.size(2)
left_boundary = int(py - r) if py - r > 0 else 0
right_boundary = int(py + r + 1) if py + r + 1 < I.size(3) else I.size(3)
return I[:, :, up_boundary-1:down_boundary, left_boundary-1:right_boundary]
def paste_patch(I, patch, patch_center, patch_radius):
[px, py] = [patch_center[0], patch_center[1]]
r = patch_radius
up_boundary = int(px - r) if px - r > 0 else 0
down_boundary = int(px + r + 1) if px + r + 1 < I.size(2) else I.size(2)
left_boundary = int(py - r) if py - r > 0 else 0
right_boundary = int(py + r + 1) if py + r + 1 < I.size(3) else I.size(3)
I[:, :, up_boundary+1:down_boundary+2, left_boundary-1:right_boundary] = patch[:, :, :, :]
return I
def padRightDownCorner(img, stride, padValue):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h%stride==0) else stride - (h % stride) # down
pad[3] = 0 if (w%stride==0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1,:,:]*0 + padValue, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:,0:1,:]*0 + padValue, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1,:,:]*0 + padValue, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:,-2:-1,:]*0 + padValue, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
def get_height(poses):
height = 0
top = 1000
bottom = 0
for pose in poses:
_top = 1000
_bottom = 0
for joint_index in [0, 14, 15, 16, 17]:
if len(pose[joint_index]) > 0:
if pose[joint_index][0][1] < _top:
_top = pose[joint_index][0][1]
for joint_index in [10, 13]:
if len(pose[joint_index]) > 0:
if pose[joint_index][0][1] > _bottom:
_bottom = pose[joint_index][0][1]
if _bottom > bottom:
bottom = _bottom
_height = _bottom - _top + 40
if _height > height:
height = _height
top = _top
return min(height, 255), max(0, top-20), min(bottom+20, 255)
def get_center_from_all(poses):
center_x = 0
center_y = 0
count = 0
for pose | |
: , optional (default : None)
A palette (list) of colors to use for coloring categorical values. Only
applied if `cmap` is set to 'categorical'.
colorbar : boolean, optional (default : True)
If True, plot the colorbar next to the figure.
ticks : boolean (default: True)
If True, show tickmarks along x and y axes indicated spatial coordinates.
dsize : int (default : 37)
The size of the dots in the scatterplot.
title : string (default : None)
The plot title.
spot_borders : boolean (default : False)
If True, draw a border line around each spot.
border_color : string (default : 'black')
The color of the border line around each spot. Only used if `spot_borders`
is True.
border_size : float (default : 0.3)
The thickness of the border line around each spot. Only used if `spot_borders`
is True.
Returns
-------
None
"""
y = -1 * np.array(df[row_key])
x = df[col_key]
if ax is None:
if colorbar:
width = 7
else:
width = 5
figure, ax = plt.subplots(
1,
1,
figsize=(width,5)
)
#if spot_borders:
# if border_size is None:
# border_size = dsize+5
# _plot_slide_one_color(
# df,
# border_color,
# row_key=row_key,
# col_key=col_key,
# dsize=border_size,
# ax=ax
# )
if cmap == 'categorical':
if cat_palette is None:
pal = PALETTE_MANY
else:
pal = cat_palette
val_to_index = {
val: ind
for ind, val in enumerate(sorted(set(values)))
}
colors = [
pal[val_to_index[val]]
for val in values
]
patches = [
mpatches.Patch(color=pal[val_to_index[val]], label=val)
for val in sorted(set(values))
]
if spot_borders:
ax.scatter(x,y,c=colors, s=dsize, edgecolors=border_color, linewidths=border_size)
else:
ax.scatter(x,y,c=colors, s=dsize)
if colorbar:
ax.legend(handles=patches, bbox_to_anchor=(1.05, 1), loc='upper left',)
else:
if spot_borders:
im = ax.scatter(x,y,c=values, cmap=cmap, s=dsize, vmin=vmin, vmax=vmax, edgecolors=border_color, linewidths=border_size)
else:
im = ax.scatter(x,y,c=values, cmap=cmap, s=dsize, vmin=vmin, vmax=vmax)
if colorbar:
if vmin is None or vmax is None:
figure.colorbar(im, ax=ax, ticks=colorticks)
else:
figure.colorbar(im, ax=ax, boundaries=np.linspace(vmin,vmax,100), ticks=colorticks)
if title is not None:
ax.set_title(title)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
def plot_neighborhood(
df,
sources,
bc_to_neighbs,
plot_vals,
plot=False,
ax=None,
keep_inds=None,
dot_size=30,
vmin=0,
vmax=1,
cmap='RdBu_r',
ticks=True,
title=None,
condition=False,
region_key=None,
title_size=12,
neighb_color='black',
row_key='row',
col_key='col'
):
# Get all neighborhood spots
all_neighbs = set()
for source in sources:
neighbs = set(bc_to_neighbs[source])
if condition:
ct_spots = set(df.loc[df[region_key] == df.loc[source][region_key]].index)
neighbs = neighbs & ct_spots
all_neighbs.update(neighbs)
if keep_inds is not None:
all_neighbs &= set(keep_inds)
y = -1 * np.array(df[row_key])
x = df[col_key]
colors=plot_vals
ax.scatter(x,y,c=colors, s=dot_size, cmap=cmap, vmin=vmin, vmax=vmax)
colors = []
plot_inds = []
for bc_i, bc in enumerate(df.index):
if bc in sources:
plot_inds.append(bc_i)
colors.append(neighb_color)
elif bc in all_neighbs:
plot_inds.append(bc_i)
colors.append(neighb_color)
if ax is None:
figure, ax = plt.subplots(
1,
1,
figsize=(5,5)
)
y = -1 * np.array(df.iloc[plot_inds][row_key])
x = df.iloc[plot_inds][col_key]
ax.scatter(x,y,c=colors, s=dot_size)
# Re-plot the colored dots over the highlighted neighborhood. Make
# the dots smaller so that the highlights stand out.
colors=np.array(plot_vals)[plot_inds]
ax.scatter(x,y,c=colors, cmap=cmap, s=dot_size*0.25, vmin=vmin, vmax=vmax)
if not title:
ax.set_title(
'Neighborhood around ({}, {})'.format(
df.loc[source][row_key],
df.loc[source][col_key]
),
fontsize=title_size
)
else:
ax.set_title(title, fontsize=title_size)
if not ticks:
ax.set_xticks([])
ax.set_yticks([])
if plot:
plt.show()
return ax
def mult_genes_plot_correlation(
adata,
plot_genes,
cond_key,
estimate='local',
bandwidth=5,
kernel_matrix=None,
contrib_thresh=10,
row_key='row',
col_key='col',
dsize=7,
fig_path=None,
fig_format='png',
fig_dpi=150
):
"""
Create a grid of plots for displaying the correlations between pairs of genes across all spots.
That is, each spot in the grid displays the spot-specific correlation between a given pair of
genes.
Parameters
----------
adata : AnnData
Spatial gene expression dataset with spatial coordinates
stored in `adata.obs`.
plot_genes : list
List of gene names or IDs. This function will consider the spot-specific
correlation for every pair of genes in this list.
estimate : string, optional (default : 'local')
One of {'local', 'regional', 'local_ci'}. The estimation method used to estimate the
correlation at each spot. If 'local', use Gaussian kernel estimation. If
'regional', use all of the spots in the given spot's histological region. If 'local_ci'
is used, then each spot will be colored based on whether the 95% confidence interval
of the Gaussian kernel estimate overlaps zero.
kernel_matrix : ndarray, optional (default : None)
NxN matrix representing the spatial kernel (i.e., pairwise weights between spatial
locations). If not provided, one will be computed using the `bandwidth` and
`contrib_thresh` arguments. Only applied if `estimate` is set to 'local' or 'local_ci'.
bandwidth : int, optional (default : 5)
The kernel bandwidth used by the test. Only applied if `estimate` is set to 'local'.
Only applied if `kernel_matrix` is not provided and `estimate` is set to 'local' or
'local_ci'.
contrib_thresh : integer, optional (default: 10)
Threshold for the total weight of all samples contributing to the correlation estimate
at each spot. Spots with total weight less than this value will be filtered. Only applied
if `estimate` is set to 'local'. Only applied if `kernel_matrix` is not provided and
`estimate` is set to 'local' or 'local_ci'.
row_key : string, optional (default : 'row')
The name of the column in `adata.obs` storing the row coordinates of each spot.
col_key : string, optional (default : 'col')
The name of the column in `adata.obs` storing the column coordinates of each spot.
dsize : int, optional (default : 7)
The size of the dots in each plot.
fig_path : string, optional (default : None)
Path to save figure as file.
fig_format : string, optional (default : 'pdf')
File format to save figure.
fig_dpi : string, optional (default : 150)
Resolution of figure.
Returns
-------
None
"""
condition = cond_key is not None
if kernel_matrix is None:
kernel_matrix = st.compute_kernel_matrix(
adata.obs,
bandwidth=bandwidth,
region_key=cond_key,
condition_on_region=condition,
y_col=row_key,
x_col=col_key
)
# Select all genes that are in the data
plot_genes = [
gene for gene in plot_genes
if gene in adata.var.index
]
fig, axarr = plt.subplots(
len(plot_genes),
len(plot_genes),
figsize=(2*len(plot_genes),2*len(plot_genes))
)
# Compute kept indices
corrs, keep_inds = utils.compute_local_correlation(
adata,
plot_genes[0], plot_genes[1],
kernel_matrix=kernel_matrix,
row_key=row_key,
col_key=col_key,
condition=cond_key,
bandwidth=bandwidth,
contrib_thresh=contrib_thresh
)
# Filter kernel matrix, if it's provided
kernel_matrix = kernel_matrix[keep_inds,:]
kernel_matrix = kernel_matrix[:,keep_inds]
# Get range of expression values for colormap
# of expression
all_expr = []
for gene in plot_genes:
expr = adata[keep_inds,:].obs_vector(gene)
all_expr += list(expr)
min_expr = min(all_expr)
max_expr = max(all_expr)
for row, ax_row in enumerate(axarr):
for col, ax in enumerate(ax_row):
gene_1 = plot_genes[row]
gene_2 = plot_genes[col]
if row == 0:
title = gene_2
else:
title = None
if col == row:
plot_slide(
adata[keep_inds,:].obs,
adata[keep_inds,:].obs_vector(gene_1),
cmap='turbo',
title=title,
dsize=dsize,
ax=ax,
figure=fig,
ticks=False,
vmin=min_expr,
vmax=max_expr,
row_key=row_key,
col_key=col_key
)
ax.set_ylabel(gene_1, fontsize=13)
elif col > row:
if estimate in ['local', 'regional']:
corrs, kept_inds, _ = plot_correlation(
adata[keep_inds,:],
gene_1, gene_2,
bandwidth=bandwidth,
contrib_thresh=contrib_thresh,
kernel_matrix=kernel_matrix,
row_key=row_key,
col_key=col_key,
condition=cond_key,
cmap='RdBu_r',
colorbar=False,
ticks=False,
ax=ax,
figure=None,
estimate=estimate,
dsize=dsize,
title=title
)
elif estimate == 'local_ci':
plot_ci_overlap(
adata,
gene_1,
gene_2,
cond_key,
kernel_matrix=None,
bandwidth=bandwidth,
row_key=row_key,
col_key=col_key,
title=None,
ax=ax,
figure=None,
ticks=False,
dsize=dsize,
colorticks=None,
neigh_thresh=contrib_thresh
)
else:
ax.set_visible(False)
if fig_path:
plt.tight_layout()
fig.savefig(
fig_path,
format=fig_format,
dpi=fig_dpi
)
plt.show()
def _compute_pairwise_corrs(
gene_pairs,
adata,
cond_key,
bandwidth=5,
row_key='row',
col_key='col'
):
gps = []
all_corrs = []
for g1, g2 in gene_pairs:
corrs, keep_inds = utils.compute_local_correlation(
adata,
g1, g2,
kernel_matrix=None,
row_key=row_key,
col_key=col_key,
condition=cond_key,
bandwidth=bandwidth
)
gps.append((g1, g2))
all_corrs.append(corrs)
return all_corrs
def cluster_pairwise_correlations(
adata,
plot_genes,
cond_key,
bandwidth=5,
row_key='row',
col_key='col',
color_thresh=19,
title=None,
remove_y_ticks=False,
fig_path=None,
fig_size=(6,4),
fig_format='png',
fig_dpi=150
):
"""
Cluster the patterns of correlations across all spots between pairs of genes. Plot a
dendrogram of the clustering. Each leaf in the dendrogram represents a single pair of
genes. Two pairs will cluster together if their pattern of correlation, across all of
the spots, are similar.
Parameters
----------
adata : AnnData
Spatial gene expression dataset with spatial coordinates
stored in `adata.obs`.
plot_genes : list
List of gene names or IDs. This function will consider the spot-specific
correlation for every pair of genes in this list.
color_thresh : float, optional, default: 19
The value along the y-axis of the dendrogram to use as a threshold for coloring
the subclusters. The sub-dendrograms below this threshold will | |
<reponame>S0mbre/proxen
# -*- coding: utf-8 -*-
## @package proxen.gui
# @brief The GUI app main window implementation -- see MainWindow class.
import os, json, struct, webbrowser
import traceback
from qtimports import *
import utils
import sysproxy
# ******************************************************************************** #
## `list` proxy variable names
PROXY_OBJS = ['http_proxy', 'https_proxy', 'ftp_proxy', 'rsync_proxy', 'noproxy']
# ******************************************************************************** #
# ***** QThreadStump
# ******************************************************************************** #
## Customized thread class (based on QThread) that adds
# progress, error etc. signals and mutex locking to avoid thread racing.
class QThreadStump(QtCore.QThread):
## Error signal (args are: instance of this thread and the error message)
sig_error = Signal(QtCore.QThread, str)
## @param priority `int` thread default priority (default = normal)
# @param on_start `callable` callback function called before the main
# operation is executed (callback has no args or returned result)
# @param on_finish `callable` callback function called after the main
# operation completes (callback has no args or returned result)
# @param on_run `callable` callback function for the main
# operation (callback has no args or returned result)
# @param on_error `callable` callback function to handle exceptions
# raised during the thread operation (see QThreadStump::sig_error)
# @param start_signal `Signal` signal that can be connected to
# the `start` slot (if not `None`)
# @param stop_signal `Signal` signal that can be connected to
# the `terminate` slot (if not `None`)
# @param free_on_finish `bool` whether the thread instance will be deleted
# from memory after it completes its operation (default = `False`)
# @param can_terminate `bool` whether the thread can be terminated (default = `True`)
# @param start_now `bool` whether to start the thread upon creation (default = `False`)
def __init__(self, priority=QtCore.QThread.NormalPriority,
on_start=None, on_finish=None, on_run=None, on_error=None,
start_signal=None, stop_signal=None,
free_on_finish=False, can_terminate=True, start_now=False):
super().__init__()
## `int` thread default priority (default = normal)
self.priority = priority
## `callable` callback function executed before the thread runs
self.on_start = on_start
## `callable` callback function executed after the thread finishes
self.on_finish = on_finish
## `callable` callback function for the main operation
self.on_run = on_run
## `callable` callback function executed when an exception occurs
self.on_error = on_error
## `bool` whether the thread instance will be deleted from memory after it completes
self.free_on_finish = free_on_finish
## `bool` whether the thread can be terminated
self.can_terminate = can_terminate
## `Signal` signal that can be connected to the `start` slot (if not `None`)
self.start_signal = start_signal
## `Signal` signal that can be connected to the `terminate` slot (if not `None`)
self.stop_signal = stop_signal
## `QtCore.QMutex` mutex lock used by QThreadStump::lock() and QThreadStump::unlock()
self.mutex = QtCore.QMutex()
if start_now: self.start()
## Destructor: waits for the thread to complete.
def __del__(self):
try:
self.wait()
except:
pass
## `int` getter for `QtCore.QThread.default_priority` (thread priority)
@property
def priority(self):
return self.default_priority
## sets `QtCore.QThread.default_priority` (thread priority)
@priority.setter
def priority(self, _priority):
try:
self.default_priority = _priority if _priority != QtCore.QThread.InheritPriority else QtCore.QThread.NormalPriority
except:
pass
## `callable` getter for QThreadStump::_on_start
@property
def on_start(self):
return self._on_start
## setter for QThreadStump::_on_start
@on_start.setter
def on_start(self, _on_start):
try:
self.started.disconnect()
except:
pass
## `callable` callback function executed before the thread runs
self._on_start = _on_start
if self._on_start:
self.started.connect(self._on_start)
## `callable` getter for QThreadStump::_on_finish
@property
def on_finish(self):
return self._on_finish
## setter for QThreadStump::_on_finish
@on_finish.setter
def on_finish(self, _on_finish):
try:
self.finished.disconnect()
except:
pass
## `callable` callback function executed after the thread finishes
self._on_finish = _on_finish
if self._on_finish:
self.finished.connect(self._on_finish)
if getattr(self, '_free_on_finish', False):
self.finished.connect(self.deleteLater)
## `bool` getter for QThreadStump::_free_on_finish
@property
def free_on_finish(self):
return self._free_on_finish
## setter for QThreadStump::_free_on_finish
@free_on_finish.setter
def free_on_finish(self, _free_on_finish):
try:
self.finished.disconnect()
except:
pass
## `bool` whether the thread instance will be deleted from memory after it completes
self._free_on_finish = _free_on_finish
if getattr(self, '_on_finish', None):
self.finished.connect(self._on_finish)
if self._free_on_finish:
self.finished.connect(self.deleteLater)
## `callable` getter for QThreadStump::_on_error
@property
def on_error(self):
return self._on_error
## setter for QThreadStump::_on_error
@on_error.setter
def on_error(self, _on_error):
try:
self.sig_error.disconnect()
except:
pass
## `callable` callback function executed when an exception occurs
self._on_error = _on_error
if self._on_error:
self.sig_error.connect(self._on_error)
## `bool` getter for QThreadStump::_can_terminate
@property
def can_terminate(self):
return self._can_terminate
## setter for QThreadStump::_can_terminate
@can_terminate.setter
def can_terminate(self, _can_terminate):
self.setTerminationEnabled(_can_terminate)
## `bool` whether the thread can be terminated
self._can_terminate = _can_terminate
## `Signal` getter for QThreadStump::_start_signal
@property
def start_signal(self):
return self._start_signal
## setter for QThreadStump::_start_signal
@start_signal.setter
def start_signal(self, _start_signal):
## `Signal` signal that can be connected to the `start` slot
self._start_signal = _start_signal
if self._start_signal:
self._start_signal.connect(self.start)
## `Signal` getter for QThreadStump::_stop_signal
@property
def stop_signal(self):
return self._stop_signal
## setter for QThreadStump::_stop_signal
@stop_signal.setter
def stop_signal(self, _stop_signal):
## `Signal` signal that can be connected to the `terminate` slot
self._stop_signal = _stop_signal
if self._stop_signal:
self._stop_signal.connect(self.terminate)
## Locks the internal mutex to preclude data racing.
def lock(self):
self.mutex.lock()
## Releases the mutex lock.
def unlock(self):
self.mutex.unlock()
## Executes the worker function pointed to by QThreadStump::on_run.
def run(self):
try:
self.setPriority(self.priority)
except:
pass
if self.on_run and not self.isInterruptionRequested():
try:
self.on_run()
except Exception as err:
traceback.print_exc(limit=None)
self.sig_error.emit(self, str(err))
# ******************************************************************************** #
# ***** BrowseEdit
# ******************************************************************************** #
## @brief Edit field with internal 'Browse' button to file or folder browsing.
# Inherited from `QtWidgets.QLineEdit`
class BrowseEdit(QtWidgets.QLineEdit):
## @param text `str` initial text in edit field (default = empty)
# @param parent `QtWidgets.QWidget` parent widget (default = `None`, i.e. no parent)
# @param dialogtype `str` path and dialog type:
# * 'fileopen' = open file browse dialog
# * 'filesave' = save file browse dialog
# * 'folder' = folder browse dialog
# `None` = 'fileopen' (default)
# @param btnicon `str` icon file name in 'resources' directory
# `None` = 'resources/folder.png' (default)
# @param btnposition `int` browse button position:
# * 0 (`QtWidgets.QLineEdit.LeadingPosition`) = left-aligned
# * 1 (`QtWidgets.QLineEdit.TrailingPosition`) = right-aligned
# `None` = `QtWidgets.QLineEdit.TrailingPosition` (default)
# @param opendialogtitle `str` dialog title (`None` will use a default title)
# @param filefilters `str` file filters for file browse dialog, e.g.
# `"Images (*.png *.xpm *.jpg);;Text files (*.txt);;XML files (*.xml)"`\n
# `None` sets the default filter: `"All files (*.*)"`
# @param fullpath `bool` whether the full file / folder path will be returned
def __init__(self, text='', parent=None,
dialogtype=None, btnicon=None, btnposition=None,
opendialogtitle=None, filefilters=None, fullpath=True):
super().__init__(text, parent)
## `str` path and dialog type ('file' or 'folder')
self.dialogtype = dialogtype or 'fileopen'
## `str` icon file name in 'resources' directory
self.btnicon = btnicon or 'folder.png'
## `int` browse button position (0 or 1)
self.btnposition = btnposition or QtWidgets.QLineEdit.TrailingPosition
## `str` dialog title
self._opendialogtitle = opendialogtitle
## `str` file filters for file browse dialog
self._filefilters = filefilters
## `bool` whether the full file / folder path will be returned
self.fullpath = fullpath
## `QtWidgets.QWidget` the component edit delegate
self.delegate = None
self._set_title_and_filters()
self.reset_action()
## Updates the dialog's title and file filters.
def _set_title_and_filters(self):
self.opendialogtitle = getattr(self, 'opendialogtitle', None) or self._opendialogtitle or \
('Select file' if self.dialogtype.startswith('file') else 'Select folder')
self.filefilters = getattr(self, 'filefilters', None) or self._filefilters or 'All files (*.*)'
## Gets the start directory for the browse dialog.
def _get_dir(self, text=None):
if text is None: text = self.text()
if text and not (os.path.isfile(text) or os.path.isdir(text)):
text = os.path.join(os.getcwd(), text)
if os.path.isfile(text) or os.path.isdir(text):
return text #os.path.dirname(text)
else:
return os.getcwd()
## Clears previous actions from the underlying object.
def _clear_actions(self):
for act_ in self.actions():
self.removeAction(act_)
## Resets the browse action (after setting options).
def reset_action(self):
self._clear_actions()
self.btnaction = QAction(QtGui.QIcon(f"resources/{self.btnicon}"), '')
self.btnaction.setToolTip(self.opendialogtitle)
self.btnaction.triggered.connect(self.on_btnaction)
self.addAction(self.btnaction, self.btnposition)
## Triggered slot for the browse action: opens dialog and sets the edit text.
@Slot()
def on_btnaction(self):
if self.delegate: self.delegate.blockSignals(True)
opendialogdir = self._get_dir()
if self.dialogtype == 'fileopen':
selected_path = QtWidgets.QFileDialog.getOpenFileName(self.window(), self.opendialogtitle, opendialogdir, self.filefilters)
selected_path = selected_path[0]
elif self.dialogtype == 'filesave':
selected_path = QtWidgets.QFileDialog.getSaveFileName(self.window(), self.opendialogtitle, opendialogdir, self.filefilters)
selected_path = selected_path[0]
elif self.dialogtype == 'folder':
selected_path = QtWidgets.QFileDialog.getExistingDirectory(self.window(), self.opendialogtitle, opendialogdir)
else:
if self.delegate: self.delegate.blockSignals(False)
return
if not selected_path:
if self.delegate: self.delegate.blockSignals(False)
return
selected_path = selected_path.replace('/', os.sep)
if not self.fullpath:
selected_path = os.path.basename(selected_path)
self.setText(selected_path)
if self.delegate: self.delegate.blockSignals(False)
# ******************************************************************************** #
# ***** BasicDialog
# ******************************************************************************** #
## @brief Base class for simple dialog windows.
# Creates the basic layout for controls (leaving the central area free to add controls),
# and declares the validate() method to validate correctness of user input before accepting.
class BasicDialog(QtWidgets.QDialog):
## @param geometry `4-tuple` window geometry data: `(left, top, width, height)`.
# | |
== "NAME" and \
(value == "deployment_settings" or \
value == "settings"):
self.fflag = 1
# Get module name from deployment_setting.modules list
elif self.tflag == 0 and self.func_name == "modules" and \
token.tok_name[id] == "STRING":
if value[1:-1] in modlist:
self.mod_name = value[1:-1]
# If 'T' is encountered, set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
# If sflag is set and '(' is found, set tflag
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# Check if inside 'T()'
elif self.tflag == 1:
# If '(' is encountered, append it to outstr
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
# If it's not the last ')' of 'T()',
# append to outstr
if self.bracket > 0:
self.outstr += ")"
# If it's the last ')', add string to list
else:
if spmod == "core":
if self.func_name != "modules" and \
self.func_name not in modlist:
strings.append((entry[2], self.outstr))
elif (self.func_name == "modules" and \
self.mod_name == spmod) or \
(self.func_name == spmod):
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
# If we are inside 'T()', append value to outstr
elif self.bracket > 0:
self.outstr += value
# ---------------------------------------------------------------------
def parseS3cfg(self, spmod, strings, entry, modlist):
""" Function to extract the strings from s3cfg.py """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseS3cfg = self.parseS3cfg
for element in entry:
parseS3cfg(spmod, strings, element, modlist)
else:
# If value is a function name, store it in func_name
if self.fflag == 1:
self.func_name = value
self.fflag = 0
# If value is 'def', set fflag to store func_name next
elif token.tok_name[id] == "NAME" and value == "def":
self.fflag = 1
# If 'T' is encountered, set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
# If core module is requested
if spmod == "core":
# If extracted data doesn't belong
# to any other module, append to list
if "_" not in self.func_name or \
self.func_name.split("_")[1] not in modlist:
strings.append((entry[2], self.outstr))
# If 'module' in 'get_module_variable()'
# is the requested module, append to list
elif "_" in self.func_name and \
self.func_name.split("_")[1] == spmod:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
# ---------------------------------------------------------------------
def parseMenu(self, spmod, strings, entry, level):
""" Function to extract the strings from menus.py """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseMenu = self.parseMenu
for element in entry:
parseMenu(spmod, strings, element, level + 1)
else:
# If value is a class name, store it in class_name
if self.cflag == 1:
self.class_name = value
self.cflag = 0
# If value is 'class', set cflag to store class name next
elif token.tok_name[id] == "NAME" and value == "class":
self.cflag = 1
elif self.fflag == 1:
# Here func_name is used to store the function names
# which are in 'S3OptionsMenu' class
self.func_name = value
self.fflag = 0
# If value is "def" and it's the first function in the
# S3OptionsMenu class or its indentation level is equal
# to the first function in 'S3OptionsMenu class', then
# set fflag and store the indentation level in findent
elif token.tok_name[id] == "NAME" and value == "def" and \
(self.findent == -1 or level == self.findent):
if self.class_name == "S3OptionsMenu":
self.findent = level
self.fflag = 1
else:
self.func_name = ""
# If current element is 'T', set sflag
elif token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# If inside 'T()', extract the data accordingly
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
# If the requested module is 'core' and
# extracted data doesn't lie inside the
# S3OptionsMenu class, append it to list
if spmod == "core":
if self.func_name == "":
strings.append((entry[2], self.outstr))
# If the function name (in S3OptionsMenu class)
# is equal to the module requested,
# then append it to list
elif self.func_name == spmod:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
else:
# Get strings inside 'M()'
# If value is 'M', set mflag
if token.tok_name[id] == "NAME" and value == "M":
self.mflag = 1
elif self.mflag == 1:
# If mflag is set and argument inside is a string,
# append it to list
if token.tok_name[id] == "STRING":
if spmod == "core":
if self.func_name == "":
strings.append((entry[2], value))
elif self.func_name == spmod:
strings.append((entry[2], value))
# If current argument in 'M()' is of type arg = var
# or if ')' is found, unset mflag
elif token.tok_name[id] == "EQUAL" or \
token.tok_name[id] == "RPAR":
self.mflag = 0
# ---------------------------------------------------------------------
def parseAll(self, strings, entry):
""" Function to extract all the strings from a file """
if isinstance(entry, list):
id = entry[0]
value = entry[1]
if isinstance(value, list):
parseAll = self.parseAll
for element in entry:
parseAll(strings, element)
else:
# If current element is 'T', set sflag
if token.tok_name[id] == "NAME" and value == "T":
self.sflag = 1
elif self.sflag == 1:
if token.tok_name[id] == "LPAR":
self.tflag = 1
self.bracket = 1
self.sflag = 0
# If inside 'T', extract data accordingly
elif self.tflag == 1:
if token.tok_name[id] == "LPAR":
self.bracket += 1
if self.bracket > 1:
self.outstr += "("
elif token.tok_name[id] == "RPAR":
self.bracket -= 1
if self.bracket > 0:
self.outstr += ")"
else:
strings.append((entry[2], self.outstr))
self.outstr = ""
self.tflag = 0
elif self.bracket > 0:
self.outstr += value
else:
# If current element is 'M', set mflag
if token.tok_name[id] == "NAME" and value == "M":
self.mflag = 1
elif self.mflag == 1:
# If inside 'M()', extract string accordingly
if token.tok_name[id] == "STRING":
strings.append((entry[2], value))
elif token.tok_name[id] == "EQUAL" or \
token.tok_name[id] == "RPAR":
self.mflag = 0
# =============================================================================
class TranslateReadFiles(object):
""" Class to read code files """
# ---------------------------------------------------------------------
@staticmethod
def findstr(fileName, spmod, modlist):
"""
Using the methods in TranslateParseFiles to extract the strings
fileName -> the file to be used for extraction
spmod -> the required module
modlist -> a list of all modules in Eden
"""
try:
f = open(fileName, "rb")
except:
path = os.path.split(__file__)[0]
fileName = os.path.join(path, fileName)
try:
f = open(fileName, "rb")
except:
return
# Read all contents of file
fileContent = f.read().decode("utf-8")
f.close()
# Remove CL-RF and NOEOL characters
fileContent = "%s\n" % fileContent.replace("\r", "")
try:
st = parser.suite(fileContent)
except:
return []
# Create a parse tree list for traversal
stList = parser.st2list(st, line_info=1)
P = TranslateParseFiles()
# List which holds the extracted strings
strings = []
if spmod == "ALL":
# If all strings are to be extracted, call ParseAll()
parseAll = P.parseAll
for element in stList:
parseAll(strings, element)
else:
# Handle cases for special files which contain
# strings belonging to different modules
fileName = os.path.basename(fileName)
if fileName == "s3menus.py":
parseMenu = P.parseMenu
for element in stList:
parseMenu(spmod, strings, element, 0)
elif fileName == "s3cfg.py":
parseS3cfg = P.parseS3cfg
for element in stList:
parseS3cfg(spmod, strings, element, modlist)
elif fileName in ("000_config.py", "config.py"):
parseConfig = P.parseConfig
for element in stList:
parseConfig(spmod, strings, element, modlist)
# Extract strings from deployment_settings.variable() calls
final_strings = []
fsappend = final_strings.append
settings = current.deployment_settings
for (loc, s) in strings:
if s[0] | |
from __future__ import division
import os
import time
import inspect
import logging
import itertools
import sys
import dolfin as df
import numpy as np
import cProfile
import pstats
from aeon import timer
from finmag.field import Field
from finmag.physics.llg import LLG
from finmag.physics.llg_stt import LLG_STT
from finmag.physics.llb.sllg import SLLG
from finmag.sim import sim_details
from finmag.sim import sim_relax
from finmag.sim import sim_savers
from finmag.util.meshes import mesh_volume, mesh_size_plausible, \
describe_mesh_size, plot_mesh, plot_mesh_with_paraview
from finmag.util.fileio import Tablewriter, FieldSaver
from finmag.util import helpers
from finmag.util.vtk_saver import VTKSaver
from finmag.sim.hysteresis import hysteresis as hyst, hysteresis_loop as hyst_loop
from finmag.sim import sim_helpers, magnetisation_patterns
from finmag.drivers.llg_integrator import llg_integrator
from finmag.drivers.sundials_integrator import SundialsIntegrator
from finmag.scheduler import scheduler
from finmag.util.pbc2d import PeriodicBoundary1D, PeriodicBoundary2D
from finmag.energies import Exchange, Zeeman, TimeZeeman, Demag, UniaxialAnisotropy, DMI, MacroGeometry
# used for parallel testing
#from finmag.native import cvode_petsc, llg_petsc
log = logging.getLogger(name="finmag")
class Simulation(object):
"""
Unified interface to finmag's micromagnetic simulations capabilities.
Attributes:
t the current simulation time
"""
# see comment at end of file on 'INSTANCE'
instance_counter_max = 0
instances = {}
@timer.method
def __init__(self, mesh, Ms, unit_length=1, name='unnamed', kernel='llg', integrator_backend="sundials", pbc=None, average=False, parallel=False):
"""Simulation object.
*Arguments*
mesh : a dolfin mesh
Ms : Magnetisation saturation (in A/m) of the material.
unit_length: the distance (in metres) associated with the
distance 1.0 in the mesh object.
name : the Simulation name (used for writing data files, for examples)
pbc : Periodic boundary type: None or '2d'
kernel : 'llg', 'sllg' or 'llg_stt'
average : take the cell averaged effective field, only for test, will delete it if doesn't work.
"""
# Store the simulation name and a 'sanitized' version of it which
# contains only alphanumeric characters and underscores. The latter
# will be used as a prefix for .log/.ndt files etc.
self.name = name
#log.debug("__init__:sim-object '{}' refcount 1={}".format(self.name, sys.getrefcount(self)))
self.sanitized_name = helpers.clean_filename(name)
self.logfilename = self.sanitized_name + '.log'
self.ndtfilename = self.sanitized_name + '.ndt'
self.logging_handler = helpers.start_logging_to_file(
self.logfilename, mode='w', level=logging.DEBUG)
#log.debug("__init__:sim-object '{}' refcount 30={}".format(self.name, sys.getrefcount(self)))
# instance booking
self.instance_id = Simulation.instance_counter_max
Simulation.instance_counter_max += 1
assert self.instance_id not in Simulation.instances.keys()
Simulation.instances[self.instance_id] = self
# Create a Tablewriter object for ourselves which will be used
# by various methods to save the average magnetisation at given
# timesteps.
self.tablewriter = Tablewriter(self.ndtfilename, self, override=True)
# Note that we pass the simulation object ("self") to the Tablewrite in the line above, and
# that the table writer stores a reference. This is just a cyclic reference. If we want
# the garbage collection to be able to collect this simulation object, we need to remove
# that cyclic reference. This is what the 'delete()' method attempts to do.
#log.debug("__init__:sim-object '{}' refcount 31={}".format(self.name, sys.getrefcount(self)))
self.tablewriter.add_entity('E_total', {
'unit': '<J>',
'get': lambda sim: sim.total_energy(),
'header': 'E_total'})
self.tablewriter.add_entity('H_total', {
'unit': '<A/m>',
'get': lambda sim: helpers.average_field(sim.effective_field()),
'header': ('H_total_x', 'H_total_y', 'H_total_z')})
#log.debug("__init__:sim-object '{}' refcount 32={}".format(self.name, sys.getrefcount(self)))
log.info("Creating Sim object name='{}', instance_id={} (rank={}/{}).".format(
self.name, self.instance_id, df.MPI.rank(df.mpi_comm_world()), df.MPI.size(df.mpi_comm_world())))
log.debug(" Total number of Sim objects in this session: {}".format(self.instances_alive_count()))
log.info(mesh)
self.pbc = pbc
if pbc == '2d':
log.debug(
'Setting 2d periodic boundary conditions (in the xy-plane).')
self.pbc = PeriodicBoundary2D(mesh)
elif pbc == '1d':
log.debug(
'Setting 1d periodic boundary conditions (along the x-axis)')
self.pbc = PeriodicBoundary1D(mesh)
elif pbc != None:
raise ValueError("Argument 'pbc' must be one of None, '1d', '2d'.")
#log.debug("__init__:sim-object '{}' refcount 35={}".format(self.name, sys.getrefcount(self)))
if not mesh_size_plausible(mesh, unit_length):
log.warning(
"The mesh is {}.".format(describe_mesh_size(mesh, unit_length)))
log.warning(
"unit_length is set to {}. Are you sure this is correct?".format(unit_length))
#log.debug("__init__:sim-object '{}' refcount 50={}".format(self.name, sys.getrefcount(self)))
self.mesh = mesh
self.unit_length = unit_length
self.integrator_backend = integrator_backend
self._integrator = None
self.S1 = df.FunctionSpace(
mesh, "Lagrange", 1, constrained_domain=self.pbc)
self.S3 = df.VectorFunctionSpace(
mesh, "Lagrange", 1, dim=3, constrained_domain=self.pbc)
#log.debug("__init__:sim-object '{}' refcount 40={}".format(self.name, sys.getrefcount(self)))
if kernel == 'llg':
self.llg = LLG(
self.S1, self.S3, average=average, unit_length=unit_length)
elif kernel == 'sllg':
self.llg = SLLG(self.S1, self.S3, unit_length=unit_length)
elif kernel == 'llg_stt':
self.llg = LLG_STT(self.S1, self.S3, unit_length=unit_length)
else:
raise ValueError("kernel must be one of llg, sllg or llg_stt.")
#log.debug("__init__:sim-object '{}' refcount 41={}".format(self.name, sys.getrefcount(self)))
self.Ms = Ms
self.kernel = kernel
self.Volume = mesh_volume(mesh)
self.scheduler = scheduler.Scheduler()
self.callbacks_at_scheduler_events = []
self.domains = df.CellFunction("uint", self.mesh)
self.domains.set_all(0)
self.region_id = 0
#log.debug("__init__:sim-object '{}' refcount 80={}".format(self.name, sys.getrefcount(self)))
# XXX TODO: this separation between vtk_savers and
# field_savers is artificial and should/will be removed once
# we have a robust, unified field saving mechanism.
self.vtk_savers = {}
self.field_savers = {}
self._render_scene_indices = {}
#log.debug("__init__:sim-object '{}' refcount 85={}".format(self.name, sys.getrefcount(self)))
self.scheduler_shortcuts = {
'eta': sim_helpers.eta,
'ETA': sim_helpers.eta,
'plot_relaxation': sim_helpers.plot_relaxation,
'render_scene': Simulation._render_scene_incremental,
'save_averages': sim_helpers.save_ndt,
'save_field': sim_savers._save_field_incremental,
'save_m': sim_savers._save_m_incremental,
'save_ndt': sim_helpers.save_ndt,
'save_restart_data': sim_helpers.save_restart_data,
'save_vtk': sim_savers.save_vtk, # <- this line creates a reference to the simulation object. Why?
'switch_off_H_ext': Simulation.switch_off_H_ext,
}
#log.debug("__init__:sim-object '{}' refcount 86={}".format(self.name, sys.getrefcount(self)))
# At the moment, we can only have cvode as the driver, and thus do
# time development of a system. We may have energy minimisation at some
# point (the driver would be an optimiser), or something else.
self.driver = 'cvode'
# let's use 1e-6 as default and we can change it later
self.reltol = 1e-6
self.abstol = 1e-6
#log.debug("__init__:sim-object '{}' refcount 88={}".format(self.name, sys.getrefcount(self)))
self.parallel = parallel
if self.parallel:
self.m_petsc = self.llg._m_field.petsc_vector()
#df.parameters.reorder_dofs_serial = True
#log.debug("__init__:sim-object '{}' refcount 100={}".format(self.name, sys.getrefcount(self)))
def shutdown(self):
"""Attempt to clear all cyclic dependencies and close all files.
The simulation object is unusable after this has been called, but
should be garbage collected if going out of scope subsequently.
Returns the number of references to self -- in my tests in March 2015,
this number was 4 when all cyclic references were removed, and thus
the next GC did work."""
log.info("Shutting down Simulation object {}".format(self.__str__()))
# instance book keeping
assert self.instance_id in Simulation.instances.keys()
# remove reference to this simulation object from dictionary
del Simulation.instances[self.instance_id]
log.debug("{} other Simulation instances alive.".format(
self.instances_alive_count()))
# now start to remove (potential) references to 'self':
log.debug(" shutdown(): 1-refcount {} for {}".format(sys.getrefcount(self), self.name))
self.tablewriter.delete_entity_get_methods()
#'del self.tablewriter' would be sufficient?
log.debug(" shutdown(): 2-refcount {} for {}".format(sys.getrefcount(self), self.name))
del self.tablewriter.sim
log.debug(" shutdown(): 3-refcount {} for {}".format(sys.getrefcount(self), self.name))
self.clear_schedule()
log.debug(" shutdown(): 4-refcount {} for {}".format(sys.getrefcount(self), self.name))
del self.scheduler
log.debug(" shutdown(): 5-refcount {} for {}".format(sys.getrefcount(self), self.name))
del self.scheduler_shortcuts
log.debug(" shutdown(): 6-refcount {} for {}".format(sys.getrefcount(self), self.name))
self.close_logfile()
log.debug(" shutdown(): 7-refcount {} for {}".format(sys.getrefcount(self), self.name))
return sys.getrefcount(self)
def instances_delete_all_others(self):
for id_ in sorted(Simulation.instances.keys()):
if id_ != None: # can happen if instances have been deleted
if id_ != self.instance_id: # do not delete ourselves, here
sim = Simulation.instances[id_]
sim.shutdown()
del sim
@staticmethod
def instances_list_all():
log.info("Showing all Simulation object instances:")
for id_ in sorted(Simulation.instances.keys()):
if id_ != None: # can happen if instances have been deleted
log.info(" sim instance_id={}: name='{}'".format(id_, Simulation.instances[id_].name))
@staticmethod
def instances_delete_all():
log.info("instances_delete_all() starting:")
if len(Simulation.instances) == 0:
log.debug(" no instances found")
return # no objects exist
else:
for id_ in sorted(Simulation.instances.keys()):
sim = Simulation.instances[id_]
sim.shutdown()
del sim
log.debug("instances_delete_all() ending")
def instances_alive_count(self):
return sum([1 for id_ in Simulation.instances.keys() if id_ != None])
def __del__(self):
print "Simulation object about to be destroyed."
def __str__(self):
"""String briefly describing simulation object"""
return "finmag.Simulation(name='%s', instance_id=%s) with %s" % (self.name, self.instance_id, self.mesh)
def __get_m(self):
"""The unit magnetisation"""
return self.llg._m_field.get_numpy_array_debug()
def set_m(self, value, normalise=True, **kwargs):
"""
Set the magnetisation (if `normalise` is True, it is automatically
normalised to unit length).
`value` can have any of the forms accepted by the function
'finmag.util.helpers.vector_valued_function' (see its
docstring for details).
You can call this method anytime during the simulation. However, when
providing a numpy array during time integration, the use of
the attribute m instead of this method is advised for performance
reasons and because the attribute m doesn't normalise the vector.
"""
# TODO: Remove debug flag again once we are sure that re-initialising
# the integrator doesn't cause a performance overhead.
debug = kwargs.pop('debug', True)
self.llg.set_m(value, normalise=normalise, **kwargs)
if self.has_integrator():
self.reinit_integrator(debug=debug)
m = property(__get_m, set_m)
@property
def Ms(self):
return self._Ms
@Ms.setter
def Ms(self, value):
self._Ms = Field(df.FunctionSpace(self.mesh, 'DG', 0), value)
self.llg.Ms = self._Ms
# XXX TODO: Do we also need to reset Ms in the interactions or is this
# automatically done by the llg or effective field?!?
@property
def m_field(self):
return self.llg.m_field
@property
def m_average(self):
"""
Compute and return the average magnetisation over the entire
mesh, | |
[](http://rpi.analyticsdojo.com)
<center><h1>Boston Housing</h1></center>
<center><h3><a href = 'http://rpi.analyticsdojo.com'>rpi.analyticsdojo.com</a></h3></center>
#This uses the same mechansims.
%matplotlib inline
# Boston Housing
- Getting the Data
- Reviewing Data
- Modeling
- Model Evaluation
- Using Model
- Storing Model
## Getting Data
- Available in the [sklearn package](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) as a Bunch object (dictionary).
- From FAQ: ["Don’t make a bunch object! They are not part of the scikit-learn API. Bunch objects are just a way to package some numpy arrays. As a scikit-learn user you only ever need numpy arrays to feed your model with data."](http://scikit-learn.org/stable/faq.html)
- Available in the UCI data repository.
- Better to convert to Pandas dataframe.
#From sklearn tutorial.
from sklearn.datasets import load_boston
boston = load_boston()
print( "Type of boston dataset:", type(boston))
#A bunch is you remember is a dictionary based dataset. Dictionaries are addressed by keys.
#Let's look at the keys.
print(boston.keys())
#DESCR sounds like it could be useful. Let's print the description.
print(boston['DESCR'])
# Let's change the data to a Panda's Dataframe
import pandas as pd
boston_df = pd.DataFrame(boston['data'] )
boston_df.head()
#Now add the column names.
boston_df.columns = boston['feature_names']
boston_df.head()
#Add the target as PRICE.
boston_df['PRICE']= boston['target']
boston_df.head()
## Attribute Information (in order):
Looks like they are all continuous IV and continuous DV.
- CRIM per capita crime rate by town
- ZN proportion of residential land zoned for lots over 25,000 sq.ft.
- INDUS proportion of non-retail business acres per town
- CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
- NOX nitric oxides concentration (parts per 10 million)
- RM average number of rooms per dwelling
- AGE proportion of owner-occupied units built prior to 1940
- DIS weighted distances to five Boston employment centres
- RAD index of accessibility to radial highways
- TAX full-value property-tax rate per 10,000
- PTRATIO pupil-teacher ratio by town
- B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
- LSTAT % lower status of the population
- MEDV Median value of owner-occupied homes in 1000's
Let's check for missing values.
import numpy as np
#check for missing values
print(np.sum(np.isnan(boston_df)))
## What type of data are there?
- First let's focus on the dependent variable, as the nature of the DV is critical to selection of model.
- *Median value of owner-occupied homes in $1000's* is the Dependent Variable (continuous variable).
- It is relevant to look at the distribution of the dependent variable, so let's do that first.
- Here there is a normal distribution for the most part, with some at the top end of the distribution we could explore later.
#Let's us seaborn, because it is pretty. ;)
#See more here. http://seaborn.pydata.org/tutorial/distributions.html
import seaborn as sns
sns.distplot(boston_df['PRICE']);
#We can quickly look at other data.
#Look at the bottom row to see thinks likely coorelated with price.
#Look along the diagonal to see histograms of each.
sns.pairplot(boston_df);
## Preparing to Model
- It is common to separate `y` as the dependent variable and `X` as the matrix of independent variables.
- Here we are using `train_test_split` to split the test and train.
- This creates 4 subsets, with IV and DV separted: `X_train, X_test, y_train, y_test`
#This will throw and error at import if haven't upgraded.
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
#y is the dependent variable.
y = boston_df['PRICE']
#As we know, iloc is used to slice the array by index number. Here this is the matrix of
#independent variables.
X = boston_df.iloc[:,0:13]
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
## Modeling
- First import the package: `from sklearn.linear_model import LinearRegression`
- Then create the model object.
- Then fit the data.
- This creates a trained model (an object) of class regression.
- The variety of methods and attributes available for regression are shown [here](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit( X_train, y_train )
## Evaluating the Model Results
- You have fit a model.
- You can now store this model, save the object to disk, or evaluate it with different outcomes.
- Trained regression objects have coefficients (`coef_`) and intercepts (`intercept_`) as attributes.
- R-Squared is determined from the `score` method of the regression object.
- For Regression, we are going to use the coefficient of determination as our way of evaluating the results, [also referred to as R-Squared](https://en.wikipedia.org/wiki/Coefficient_of_determination)
print('labels\n',X.columns)
print('Coefficients: \n', lm.coef_)
print('Intercept: \n', lm.intercept_)
print('R2 for Train)', lm.score( X_train, y_train ))
print('R2 for Test (cross validation)', lm.score(X_test, y_test))
#Alternately, we can show the results in a dataframe using the zip command.
pd.DataFrame( list(zip(X.columns, lm.coef_)),
columns=['features', 'estimatedCoeffs'])
## Cross Validation and Hyperparameter Tuning
- The basic way of having a train and a test set can result in overfitting if there are parameters within the model that are being optimized. [Further described here](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation).
- Because of this, a third validation set can be partitioned, but at times there isn't enough data.
- So Cross validation can split the data into (`cv`) different datasets and check results.
- Returning MSE rather than R2.
from sklearn.model_selection import cross_val_score
scores = cross_val_score(lm, X_train, y_train, cv=8)
print("R2:", scores, "\n R2_avg: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
## Calculation of Null Model
- We also want to compare a null model (baseline model) with our result.
- To do this, we have to generate an array of equal size to the train and test set.
#Here we need to constructor our Base model
#This syntax multiplies a list by a number, genarating a list of length equal to that number.
#Then we can cast it as a Pandas series.
y_train_base = pd.Series([np.mean(y_train)] * y_train.size)
y_test_base = pd.Series([np.mean(y_train)] * y_test.size)
print(y_train_base.head(), '\n Size:', y_train_base.size)
print(y_test_base.head(), '\n Size:', y_test_base.size)
## Scoring of Null Model
- While previously we generated the R2 score from the `fit` method, passing X and Y, we can also score the r2 using the `r2_score` method, which is imported from sklearn.metrix.
- The `r2_score` method accepts that true value and the predicted value.
from sklearn.metrics import r2_score
r2_train_base= r2_score(y_train, y_train_base)
r2_train_reg = r2_score(y_train, lm.predict(X_train))
r2_test_base = r2_score(y_test, y_test_base)
r2_test_reg = r2_score(y_test, lm.predict(X_test))
print(r2_train_base, r2_train_reg,r2_test_base,r2_test_reg )
## Scoring of Null Model
- We got a 0 R-squared for our model. Why 0?
- This is where it is important to understand what R-squared is actually measuring.
- On the left side you see the total sum of squared values (ss_tot_train below).
- On the right you see the sum of squares regression (ss_reg_train).
- For the null model, the ss_tot_train = ss_reg_train, so R-squared = 0.
<br>

- By Orzetto (Own work) [CC BY-SA 3.0 (http://creativecommons.org/licenses/by-sa/3.0) or GFDL (http://www.gnu.org/copyleft/fdl.html)], via Wikimedia Commons
#total sum of squares
ss_tot_train=np.sum((y_train-np.mean(y_train))**2)
ss_res_train=np.sum((y_train-lm.predict(X_train))**2)
ss_reg_train=np.sum((lm.predict(X_train)-np.mean(y_train))**2)
r2_train_reg_manual= 1-(ss_res_train/ss_tot_train)
print(r2_train_reg, r2_train_reg_manual, ss_tot_train, ss_res_train, ss_reg_train )
## Predict Outcomes
- The regression predict uses the trained coefficients and accepts input.
- Here, by passing the origional from boston_df, we can create a new column for the predicted value.
boston_df['PRICE_REG']=lm.predict(boston_df.iloc[:,0:13])
boston_df[['PRICE', 'PRICE_REG']].head()
## Graph Outcomes
- Common to grapy predicted vs actual.
- Results should show a randomly distributed error function.
- Note that there seem to be much larger errors on right side of grant, suggesting something else might be impacting highest values.
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter( boston_df['PRICE'], boston_df['PRICE_REG'], s=5 )
plt.xlabel( "Prices")
plt.ylabel( "Predicted Prices")
plt.title( "Real vs Predicted Housing Prices")
#Let's make it look pretty with pickle
import seaborn as sns; sns.set(color_codes=True)
ax = sns.regplot(x="PRICE", y="PRICE_REG", data=boston_df[['PRICE','PRICE_REG']])
## Graph Residuals
- Common to graph predicted - actual (error term).
- Results should show a randomly distributed error function.
- Here we are showing train and test as different
#
plt.scatter( lm.predict(X_train), lm.predict(X_train) - y_train,
c ='b', s=30, alpha=0.4 )
plt.scatter( lm.predict(X_test), lm.predict(X_test) - y_test,
c ='g', s=30 )
#The expected error is 0.
plt.hlines( y=0, xmin=-5, xmax=55)
plt.title( "Residuals" )
plt.ylabel( "Residuals" )
## Persistent Models
- I could be that you would want to maintain
- The `pickle` package enables storing objects to disk and then retreive them.
- For example, for a trained model we might want to store it, and then use it to score additional data.
#save the data
boston_df.to_csv('boston.csv')
import pickle
pickle.dump( lm, open( 'lm_reg_boston.p', 'wb' ) )
#Load the pickled object.
lm_pickled = pickle.load( open( "lm_reg_boston.p", "rb" ) )
lm_pickled.score(X_train, y_train)
Copyright [AnalyticsDojo](http://rpi.analyticsdojo.com) 2016.
This work is licensed under the [Creative Commons Attribution 4.0 | |
open('show\\all-time.txt', 'w')
file_all_name = open('show\\all-name.txt', 'w')
file_all_subscribers = open('show\\all-subscribers.txt', 'w')
file_dirty_time = open('show\\dirty-time.txt', 'w')
file_dirty_name = open('show\\dirty-name.txt', 'w')
file_dirty_subscribers = open('show\\dirty-subscribers.txt', 'w')
file_jumble_sfw = open('show\\jumble.txt', 'w')
file_jumble_nsfw = open('show\\jumble-nsfw.txt', 'w')
file_duplicates = open('show\\duplicates.txt', 'w')
file_missing = open('show\\missing.txt', 'w')
file_stats = open('show\\statistics.txt', 'w')
file_readme = open('README.md', 'r')
cur.execute('SELECT COUNT(idint) FROM subreddits WHERE created != 0')
itemcount_valid = cur.fetchone()[0]
itemcount_nsfw = 0
name_lengths = {}
print(itemcount_valid, 'subreddits')
print('Writing time files.')
cur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
print(itemf, file=file_all_time)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_time)
itemcount_nsfw += 1
file_all_time.close()
file_dirty_time.close()
print('Writing name files and duplicates.')
previousitem = None
inprogress = False
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')
for item in fetchgenerator(cur):
if previousitem is not None and item[SQL_SUBREDDIT['name']] == previousitem[SQL_SUBREDDIT['name']]:
print(memberformat(previousitem), file=file_duplicates)
inprogress = True
elif inprogress:
print(memberformat(previousitem), file=file_duplicates)
inprogress = False
previousitem = item
name_length = len(item[SQL_SUBREDDIT['name']])
name_lengths[name_length] = name_lengths.get(name_length, 0) + 1
itemf = memberformat(item)
print(itemf, file=file_all_name)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_name)
file_duplicates.close()
file_all_name.close()
file_dirty_name.close()
name_lengths = {'%02d'%k: v for (k,v) in name_lengths.items()}
print('Writing subscriber files.')
ranks = {'all':1, 'nsfw':1}
def write_with_rank(itemf, ranktype, filehandle):
index = ranks[ranktype]
if index <= RANKS_UP_TO:
itemf += '{:>9,}'.format(index)
print(itemf, file=filehandle)
ranks[ranktype] += 1
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
write_with_rank(itemf, 'all', file_all_subscribers)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
write_with_rank(itemf, 'nsfw', file_dirty_subscribers)
file_all_subscribers.close()
file_dirty_subscribers.close()
print('Writing jumble.')
cur.execute('SELECT * FROM subreddits WHERE jumble == 1 ORDER BY subscribers DESC')
for item in fetchgenerator(cur):
if int(item[SQL_SUBREDDIT['nsfw']]) == 0:
print(itemf, file=file_jumble_sfw)
else:
print(itemf, file=file_jumble_nsfw)
file_jumble_sfw.close()
file_jumble_nsfw.close()
print('Writing missing.')
cur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idint ASC')
for item in fetchgenerator(cur):
print(item[SQL_SUBREDDIT['idstr']], file=file_missing)
file_missing.close()
print('Writing statistics.')
headline = 'Collected {0:,} subreddits\n'.format(itemcount_valid)
statisticoutput = headline + '\n\n'
statisticoutput += ' SFW: {0:,}\n'.format(itemcount_valid - itemcount_nsfw)
statisticoutput += 'NSFW: {0:,}\n\n\n'.format(itemcount_nsfw)
statisticoutput += 'Subreddit type:\n'
subreddit_types = list(SUBREDDIT_TYPE_REVERSE.keys())
subreddit_types.sort()
subreddit_types = [SUBREDDIT_TYPE_REVERSE[k] for k in subreddit_types]
for subreddit_type in subreddit_types:
index = SUBREDDIT_TYPE[subreddit_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND subreddit_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(subreddit_type), count)
statisticoutput += '\n'
statisticoutput += 'Submission type (None means approved submitters only or inaccessible):\n'
submission_types = list(SUBMISSION_TYPE_REVERSE.keys())
submission_types.sort()
submission_types = [SUBMISSION_TYPE_REVERSE[k] for k in submission_types]
for submission_type in submission_types:
index = SUBMISSION_TYPE[submission_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND submission_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(submission_type), count)
statisticoutput += '\n\n'
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')
last20k = cur.fetchall()
timediff = last20k[0][SQL_SUBREDDIT['created']] - last20k[-1][SQL_SUBREDDIT['created']]
statisticoutput += 'Over the last 20,000 subreddits:\n'
statisticoutput += '%.2f subs are created each hour\n' % (20000 / (timediff/3600))
statisticoutput += '%.2f subs are created each day\n\n\n' % (20000 / (timediff/86400))
################################
# Breakdown by time period
# hour of day, day of week, day of month, month of year, month-year, year
def datetimedict(statsdict, strf):
statsdict[strf] = statsdict.get(strf, 0) + 1
hoddict = {}
dowdict = {}
domdict = {}
moydict = {}
myrdict = {}
yerdict = {}
print(' performing time breakdown')
cur.execute('SELECT * FROM subreddits WHERE created != 0')
for item in fetchgenerator(cur):
dt = datetime.datetime.utcfromtimestamp(item[SQL_SUBREDDIT['created']])
datetimedict(hoddict, dt.strftime('%H')) # 01
datetimedict(dowdict, dt.strftime('%A')) # Monday
datetimedict(domdict, dt.strftime('%d')) # 01
datetimedict(moydict, dt.strftime('%B')) # January
datetimedict(myrdict, dt.strftime('%b%Y')) # Jan2015
datetimedict(yerdict, dt.strftime('%Y')) # 2015
print(' forming columns')
plotnum = 0
labels = ['hour of day', 'day of week', 'day of month', 'month of year', 'year', 'month-year', 'name length']
modes = [None, 'day', None, 'month', None, 'monthyear', None]
dicts = [hoddict, dowdict, domdict, moydict, yerdict, myrdict, name_lengths]
mapping = [
{'label': 'hour of day', 'specialsort': None, 'dict': hoddict,},
{'label': 'day of week', 'specialsort': 'day', 'dict': dowdict,},
{'label': 'day of month', 'specialsort': None, 'dict': domdict,},
{'label': 'month of year', 'specialsort': 'month', 'dict': moydict,},
{'label': 'year', 'specialsort': None, 'dict': yerdict,},
{'label': 'month-year', 'specialsort': 'monthyear', 'dict': myrdict,},
{'label': 'name length', 'specialsort': None, 'dict': name_lengths,},
]
for collection in mapping:
d = collection['dict']
dkeys_primary = list(d.keys())
dkeys_primary.sort(key=d.get)
dkeys_secondary = specialsort(dkeys_primary, collection['specialsort'])
dvals = [d[x] for x in dkeys_secondary]
statisticoutput += labels[index] + '\n'
for (keyindex, key) in enumerate(dkeys_primary):
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += ' ' * 8
key = dkeys_secondary[keyindex]
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += '\n'
statisticoutput += '\n'
if d is name_lengths:
upperlabel = 'Name Lengths'
else:
upperlabel = 'Subreddits created - %s' % collection['label']
plotbars(
filename=upperlabel,
upperlabel=upperlabel,
inputdata=[dkeys_secondary, dvals],
colormid='#43443a',
forcezero=True,
)
plotnum += 1
if d is myrdict:
# In addition to the total month graph, plot the last 15 months
plotbars(
filename=upperlabel + ' short',
upperlabel=upperlabel + ' short',
inputdata=[dkeys_secondary[-15:], dvals[-15:]],
colorbg='#272822',
colorfg='#000',
colormid='#43443a',
forcezero=True,
)
plotnum += 1
#
# Breakdown by time period
################################
print(statisticoutput, file=file_stats)
file_stats.close()
print('Updating Readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####' + headline
readmelines[5] = '#####[Today\'s jumble](http://reddit.com/r/%s)\n' % jumble(doreturn=True)[0]
file_readme = open('README.md', 'w')
file_readme.write(''.join(readmelines))
file_readme.close()
time.sleep(2)
x = subprocess.call('PNGCREATOR.bat', shell=True, cwd='spooky')
print()
def memberformat(member):
member = FORMAT_MEMBER.format(
idstr=member[SQL_SUBREDDIT['idstr']],
human=member[SQL_SUBREDDIT['human']],
nsfw=member[SQL_SUBREDDIT['nsfw']],
name=member[SQL_SUBREDDIT['name']],
subscribers=member[SQL_SUBREDDIT['subscribers']],
)
return member
def dictadding(targetdict, item):
if item not in targetdict:
targetdict[item] = 1
else:
targetdict[item] = targetdict[item] + 1
return targetdict
def specialsort(inlist, mode=None):
if mode == 'month':
return ['January', 'February', 'March',
'April', 'May', 'June', 'July',
'August', 'September', 'October',
'November', 'December']
if mode == 'day':
return ['Sunday', 'Monday', 'Tuesday',
'Wednesday', 'Thursday', 'Friday',
'Saturday']
if mode == 'monthyear':
td = {}
for item in inlist:
nitem = item
nitem = item.replace(item[:3], monthnumbers[item[:3]])
nitem = nitem[3:] + nitem[:3]
td[item] = nitem
tdkeys = list(td.keys())
#print(td)
tdkeys.sort(key=td.get)
#print(tdkeys)
return tdkeys
if mode is None:
return sorted(inlist)
def search(query="", casesense=False, filterout=[], subscribers=0, nsfwmode=2, doreturn=False, sort=None):
"""
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
"""
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%{term}%%'.format(term=querys)
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0,1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_SUBREDDIT['name']]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
| |
<filename>ummon/features/portilla_simoncelli_tm/filterbank_simoncelli.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by <NAME> at 09.08.2018
"""
from __future__ import division
import numpy as np
from scipy.special import factorial
def buildSCFpyr(im, ht=-1, order=3, twidth=1):
# default
max_ht = np.floor(np.log2(np.min(im.shape)) + 2)
if ht == -1:
ht = max_ht
print("ht: ", ht)
else:
if ht > max_ht:
print('Cannot build pyramid higher than ', max_ht, ' levels.')
nbands = order + 1
# Steering stuff:
if np.mod(nbands, 2) == 0:
harmonics = np.array([i for i in range(0, np.int(nbands / 2))]).T * 2 + 1
else:
harmonics = np.array([i for i in range(0, np.int((nbands - 1) / 2 + 1))]).T * 2
# ----------------------------------------------------------------
dims = im.shape
ctr = np.ceil(np.array([dims[0] + 0.5, dims[1] + 0.5]) / 2)
m = np.divide(np.array([i for i in range(1, dims[1] + 1)]) - ctr[1], dims[1] / 2)
n = np.divide(np.array([i for i in range(1, dims[0] + 1)]) - ctr[0], dims[0] / 2)
[xramp, yramp] = np.meshgrid(m, n)
angle = np.arctan2(yramp, xramp)
log_rad = np.sqrt(xramp ** 2 + yramp ** 2)
log_rad[int(ctr[0] - 1), int(ctr[1] - 1)] = log_rad[int(ctr[0] - 1), int(ctr[1] - 2)]
log_rad = np.log2(log_rad)
# Radial transition function (a raised cosine in log-frequency):
[Xrcos, Yrcos] = rcosFn(twidth, (-twidth / 2), np.array([0, 1]))
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1.0 - Yrcos ** 2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
imdft = np.fft.fftshift(np.fft.fft2(im))
lo0dft = np.multiply(imdft, lo0mask)
pyr = buildSCFpyrLevs(lo0dft, log_rad, Xrcos, Yrcos, angle, ht, nbands)
hi0mask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
hi0dft = np.multiply(imdft, hi0mask)
hi0 = np.fft.ifft2(np.fft.ifftshift(hi0dft))
ret_pyr = []
ret_pyr.append([hi0.real])
for b in pyr:
ret_pyr.append(b)
return ret_pyr # , steermtx, harmonics
def buildSCFpyrLevs(lodft, log_rad, Xrcos, Yrcos, angle, ht, nbands):
"""
Returns:
function [pyr,pind] = buildSCFpyrLevs(lodft,log_rad,Xrcos,Yrcos,angle,ht,nbands);
"""
if ht <= 0:
lo0 = np.fft.ifft2(np.fft.ifftshift(lodft))
pyr = [[lo0.real]]
else:
orients = []
log_rad = log_rad + 1
lutsize = 1024
Xcosn = np.pi * np.array([i for i in range(-(2 * lutsize + 1), (lutsize + 1) + 1)]) / lutsize # [-2*pi:pi]
order = nbands - 1
# divide by sqrt(sum_(n=0)^(N-1) cos(pi*n/N)^(2(N-1)) )
const = np.divide(np.multiply(2 ** (2 * order), factorial(order) ** 2), nbands * factorial(2 * order))
# Ycosn = sqrt(const) * (cos(Xcosn)).^order;
# analityc version: only take one lobe
alfa = np.mod(np.pi + Xcosn, 2 * np.pi) - np.pi
Ycosn = 2 * np.sqrt(const) * np.multiply((np.cos(Xcosn) ** order), (np.absolute(alfa) < np.pi / 2))
himask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
# ori = 0
for b in range(0, nbands):
anglemask = pointOp(angle, Ycosn, Xcosn[0] + np.pi * (b) / nbands, Xcosn[1] - Xcosn[0])
banddft = np.multiply(np.multiply(np.multiply(((-1j) ** (nbands - 1)), lodft), anglemask), himask)
band = np.fft.ifft2(np.fft.ifftshift(banddft))
# band_mask = anglemask * himask
# path_name = "result/filter_masks/band_mask"+ str(ht)+"-"+ str(ori) + ".png"
# ip.save_img(band_mask, path_name)
# ori = ori +1
# bands(:,b) = real(band(:));
# analytic version: full complex value
# bands[:,:,b] = band
orients.append(band)
# bind[b,:] = size(band);
dims = lodft.shape
ctr = np.ceil(np.array([dims[0] + 0.5, dims[1] + 0.5]) / 2)
# ctr = np.ceil((dims+0.5)/2)
lodims = np.ceil(np.array([dims[0] - 0.5, dims[1] - 0.5]) / 2)
# lodims = np.ceil((dims-0.5)/2)
loctr = np.ceil((lodims + 0.5) / 2)
lostart = ctr - loctr
loend = lostart + lodims
log_rad = log_rad[int(lostart[0]):int(loend[0]), int(lostart[1]):int(loend[1])]
angle = angle[int(lostart[0]):int(loend[0]), int(lostart[1]):int(loend[1])]
lodft = lodft[int(lostart[0]):int(loend[0]), int(lostart[1]):int(loend[1])]
YIrcos = np.absolute(np.sqrt(1.0 - Yrcos ** 2))
lomask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
lodft = np.multiply(lomask, lodft)
npyr = buildSCFpyrLevs(lodft, log_rad, Xrcos, Yrcos, angle, ht - 1, nbands)
pyr = []
pyr.append(orients)
for b in npyr:
pyr.append(b)
return pyr
def reconSFpyr(pyr, levs):
"""
Returns:
res
"""
nbands = len(pyr[1]) # number orientations
dims = pyr[0][0].shape
ctr = np.ceil(np.array([dims[0] + 0.5, dims[1] + 0.5]) / 2)
m = np.divide(np.array([i for i in range(1, dims[1] + 1)]) - ctr[1], dims[1] / 2)
n = np.divide(np.array([i for i in range(1, dims[0] + 1)]) - ctr[0], dims[0] / 2)
[xramp, yramp] = np.meshgrid(m, n)
angle = np.arctan2(yramp, xramp)
log_rad = np.sqrt(xramp ** 2 + yramp ** 2)
log_rad[int(ctr[0] - 1), int(ctr[1] - 1)] = log_rad[int(ctr[0] - 1), int(ctr[1] - 2)]
log_rad = np.log2(log_rad)
# Radial transition function (a raised cosine in log-frequency):
[Xrcos, Yrcos] = rcosFn(1, (-1 / 2), np.array([0, 1]))
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(np.absolute(1.0 - Yrcos ** 2))
z = 0
for subband in pyr:
for band in subband:
z += 1
if (z == 2):
if ((levs == 1).any()):
resdft = np.fft.fftshift(np.fft.fft2(pyr[1][0]))
else:
resdft = np.zeros((pyr[1][0].shape))
else:
resdft = reconSFpyrLevs(pyr[1:].copy(), log_rad, Xrcos, Yrcos, angle, levs, nbands) # levs / [1]
lo0mask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
resdft = np.multiply(resdft, lo0mask)
# residual highpass subband
if (np.array(levs) == 0).any():
hi0mask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
hidft = np.fft.fftshift(np.fft.fft2(pyr[0][0].copy()))
resdft = resdft + np.multiply(hidft, hi0mask)
res = np.real(np.fft.ifft2(np.fft.ifftshift(resdft)))
return res
def reconSFpyrLevs(pyr, log_rad, Xrcos, Yrcos, angle, levs, nbands):
"""
Returns:
resdft
"""
lo_ind = nbands + 1
dims = pyr[0][0].shape
ctr = np.ceil(np.array([dims[0] + 0.5, dims[1] + 0.5]) / 2)
# log_rad = log_rad + 1;
Xrcos = Xrcos - np.log2(2) # shift origin of lut by 1 octave.
if (np.array(levs) > 1).any():
lodims = np.ceil(np.array([dims[0] - 0.5, dims[1] - 0.5]) / 2)
loctr = np.ceil((lodims + 0.5) / 2)
lostart = ctr - loctr + 1
loend = lostart + lodims - 1
nlog_rad = log_rad[lostart[0] - 1:loend[0], lostart[1] - 1:loend[1]]
nangle = angle[lostart[0] - 1:loend[0], lostart[1] - 1:loend[1]]
z = 0
for band in pyr:
for subband in band:
z += 1
if z > lo_ind:
nresdft = reconSFpyrLevs(pyr[1:].copy(), nlog_rad, Xrcos, Yrcos, nangle, np.array(levs) - 1, nbands)
else:
nresdft = np.fft.fftshift(np.fft.fft2(pyr[1][0].copy()))
YIrcos = np.sqrt(np.absolute(1.0 - Yrcos ** 2))
lomask = pointOp(nlog_rad, YIrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
resdft = np.zeros((dims))
resdft[lostart[0] - 1:loend[0], lostart[1] - 1:loend[1]] = np.multiply(nresdft,
lomask) # ?? complex to real cast?
else:
resdft = np.zeros((dims))
if (np.array(levs) == 1).any():
lutsize = 1024
Xcosn = np.pi * np.array([i for i in range(-(2 * lutsize + 1), (lutsize + 2))]) / lutsize # [-2*pi:pi]
order = nbands - 1
# %% divide by sqrt(sum_(n=0)^(N-1) cos(pi*n/N)^(2(N-1)) )
const = np.multiply((2 ** (2 * order)), np.divide((factorial(order) ** 2), (nbands * factorial(2 * order))))
Ycosn = np.sqrt(const) * (np.cos(Xcosn)) ** order
himask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1] - Xrcos[0])
for b in range(0, nbands):
# if (bands==b).any:
anglemask = pointOp(angle, Ycosn, Xcosn[0] + np.pi * (b) / nbands, Xcosn[1] - Xcosn[0])
banddft = np.fft.fftshift(np.fft.fft2(pyr[0][b].copy()))
resdft = resdft + (np.sqrt(complex(-1))) ** (nbands - 1) * np.multiply(np.multiply(banddft, anglemask),
himask)
# end
return resdft
def steer2HarmMtx(harmonics, angles=-1, evenorodd='even'):
"""
mtx = steer2HarmMtx(harmonics, angles, evenorodd)
"""
# Make HARMONICS a row vector
# print("harmonics.shape: ", harmonics.shape)
# harmonics = harmonics.T
numh = 2 * np.atleast_2d(harmonics).shape[1] - (harmonics == 0).any()
# if angles == -1:
# angles = np.pi * np.array([i for i in range(0,numh)]).T/numh
# =================================================================
if evenorodd == 'even':
evenorodd = 0
elif evenorodd == 'odd':
evenorodd = 1
else:
print('EVEN_OR_ODD should be the string EVEN or ODD')
# Compute inverse matrix, which maps Fourier components onto
# steerable basis.
imtx = np.zeros((angles.shape[0], numh))
col = 0
for h in harmonics:
args = h * angles
if h == 0:
imtx[:, col] = np.ones((angles.shape))
col = col + 1
elif evenorodd:
imtx[:, col] = np.sin(args)
imtx[:, col + 1] = -np.cos(args)
col = col + 2
else:
imtx[:, col] = np.cos(args)
imtx[:, col + 1] = np.sin(args)
col = col + 2
r = rank(imtx)
if (r != numh) and (r != angles.shape[0]):
print('WARNING: matrix is not full rank')
mtx = np.linalg.pinv(imtx)
return mtx
def rank(A, tol=-1):
S, V, D = np.linalg.svd(A)
if tol == -1:
m = np.max(V, axis=0)
tol = np.multiply(np.max(A.shape), np.spacing(m))
r = np.sum(V > tol)
return r
def rcosFn(width=1, position=0, values=[0, 1]):
"""
Args:
width:
position:
values:
Returns:
X:
Y:
"""
sz = 256 # arbitrary!
X = np.pi * np.array([i for i in range(-sz - 1, 2)]) / (2 * sz)
Y = values[0] | |
(move.uid, move.parent):
self.add_option(
req,
'move here',
'here',
hint='move page %s here' % move.uid)
else:
if req.user.can('admin page'):
self.add_option(
req,
'move/copy',
'move',
hint='mark for moving or copying')
# temporarily disable Export/Imprt until it can be fully tested... (IHM Dec 2015)
# if self.stage!='draft':
# self.add_option(req,'export','export')
# self.add_option(req,'import','import_eve')
# remove single tabs
if len(req.pageoptions) == 1:
req.pageoptions = []
# pass back the result
return req.pageoptions
############### actions ######################
def add_act(self,
req,
label,
method="",
confirm="",
url="",
hint="",
hilite=False,
key=""):
"""adds act if it is permitted (but if url is used in pace of method, it is not checked for permission)
url will override method, but method can still be given to check permits
"""
if (not method
) or req.user.can(getattr(self, method.split('#', 1)[0])):
# url=method and self.url(method) or self.abs_url(url)
url = url and self.abs_url(url) or self.url(method)
act = [
label, url, hint or confirm or method, confirm and
("return confirm('are you sure you wish to %s?')" % confirm)
or "", hilite, key
]
if 'actions' in req:
req.actions.append(act)
else:
req.actions = [act]
def add_delete(self, req):
self.add_act(req, 'delete', 'kill', 'delete this %s' % self.kind)
# def set_listing_actions(self,req):
# ""
def get_actions(self, req):
"actions - note that action button forms should use method='get', as action parameters are passed in the URL"
# stage changes
if self.stage == 'posted':
self.add_act(req, 'withdraw', 'withdraw',
'withdraw this %s and all its contents' % self.kind)
elif self.stage == 'draft':
if (self.text or self.get_images() or req.pages
or req.contents) and not req.edit:
self.add_act(
req,
'post',
'post',
hint='make this %s public' % self.kind,
hilite=True)
self.add_delete(req)
return req.actions # TEMPRARY DISABLING OF MOVE/COPY/EXPORT/IMPORT
# move, copy, export, import
move = self.get_move(req)
if move:
self.add_act(
req, 'cancel move', 'cancel_move', hint='cancel page move')
if self.can_move_here(req):
self.add_act(req, 'copy here', 'copy',
'copy page %s here' % move.uid)
if self.uid not in (move.uid, move.parent):
self.add_act(req, 'move here', 'here',
'move page %s here' % move.uid)
else:
if req.user.can('admin page'):
self.add_act(
req,
'move/copy',
'move',
hint='mark for moving or copying')
# temrarily disable Export/Imprt until it can be fully tested... (IHM Dec 2015)
# if self.stage!='draft':
# self.add_act(req,'export','export')
# self.add_act(req,'import','import_eve')
# and return
return req.actions
def can_move_here(self, req):
"""is it okay to move or copy the move object here?
- this is a hook for override by inheriting classes"
- default: can move anything here, provided we have a valid move uid
"""
return self.get_move(req)
def _posted(self, req):
"""post a draft (inner workings)
"""
if self.stage != 'posted': #safety valve
self.stage = 'posted'
self.stamp()
# store it all
self.flush()
req.message = 'your %s is posted' % (self.kind, )
return True
return False
_posted.permit = 'NOWAY'
def post(self, req):
"""post a draft (requestable)
"""
if self._posted(req):
# return the parent page
return self.context(req)
#else
return self.view(req)
post.permit = 'create page'
def withdraw(self, req):
"remove from posted: reset self and all posted descendants back to draft"
if self.stage == 'posted':
self.stage = 'draft'
self.flush()
#set message
req.message = 'this %s is now draft' % self.kind
return self.view(req)
withdraw.permit = "admin page"
def kill(self, req):
"delete self and all childen!"
if (self.stage == 'draft'): #safety first
self.delete_branch()
message = '%s "%s" has been deleted' % (self.kind, self.name)
else:
message = 'deletion denied'
return req.redirect(
self.get_pob().url('view?message=%s' % url_safe(message)))
kill.permit = "create page" #creator can kill a page, but not if it has been been posted (as she can't withdraw it without admin permit)
def delete_branch(self):
"branch deletion - self and ALL child pages of any kind (the whole branch!) are deleted"
for p in self.get_branch():
if p.kind == 'image':
self.get(p.uid).delete_image()
else:
p.delete()
def manage(self, req):
"link to user edit"
user = self.User.list(page=self.uid)[0]
req.page = 'manage' # tabs need this
return user.edit(req)
manage.permit = 'edit user'
def details(self, req):
"link to edit of own details"
req.page = 'details'
return req.redirect(req.user.url("edit"))
###################### ratings / enable / disable ###################
ratedkinds=("page","image")
downratings=(-4,-4,-3,-2,-4,0,1)
upratings=(0,-2,-1,-1,1,2,2)
# non glyphicon version
# access these via rating_symbol()
rating_symbols=('×','?','√','♥','?','√','♥')
def rating_symbol(self,rating=None):
"give symbol for rating"
# rating should be in (-4,-3,-2,-1,0,1,2)
r=min(6,max(0,(rating if rating is not None else self.rating)+4))
return self.rating_symbols[r]
# glyphicon version
# access these via rating_class()
rating_classes=('remove-sign','question-sign','ok-sign','heart','question-sign','ok-sign','heart')
def rating_class(self,rating=None):
"give class for rating"
# rating should be in (-4,-3,-2,-1,0,1,2)
r=min(6,max(0,(rating if rating is not None else self.rating)+4))
return "glyphicon glyphicon-%s" % self.rating_classes[r]
# generic
def set_rating(self,rating):
"sets self.rating to rating"
self.rating=rating
self.flush()
def minrating(self):
"returns (cached) minimum rating accepted by global filter"
if not hasattr(self, "_v_minrating"):
self._v_minrating = self.list_int(item='rating',uid=1)[0]
return self._v_minrating
def set_global_filter(self,req):
"sets root rating (used as a global filter) to req.rating"
self.get(1).set_rating(req.rating)
return req.redirect(self.url())
def rate_up(self,req):
"increase rating"
try:
self.rating=self.upratings[self.rating+4]
self.flush()
except:
pass
return req.redirect(self.url())
def rate_down(self,req):
"decrease rating"
try:
self.rating=self.downratings[self.rating+4]
self.flush()
except:
pass
return req.redirect(self.url())
def toggle_disable(self,req):
"disable / enable"
try:
self.rating=(0,0,1,2,-3,-2,-1)[self.rating+4]
self.flush()
except:
pass
return req.redirect(self.url())
###################### emails ##########################
def email_enabled(self):
""
return self.Config.mailfrom and self.Config.SMTPhost and True or False
def email(self, TO, subject, text='', html=''):
"""convenient wrapper for library email function, supplying the configuration defaults
Note that if self.Config.mailfrom has a False value, or no SMTPhost is set, no attempt will be made to send any email
"""
if self.email_enabled():
email(
FROM=self.Config.mailfrom,
TO=TO,
subject=subject,
text=text,
html=html,
SMTP=self.Config.SMTPhost,
LOGIN=self.Config.SMTPlogin)
######################preferences ########################
# O/S : prefs should be stored in a separate table (rather than a column), for more efficient access
# as currently every single pref can require multiple page fetches (up the lineage) to find its value
# Alternatively, in get_pref(), lineage objects containing prefs should be cached when first accessed
# CONTAINER code elsewhere should be replaced with same LINEAGE approach as in get_pref()
page_default_prefs = {
'order_by': ('latest', 'order items by', ('date', 'latest', 'name',
'seq')),
#'show_time': ('Y', 'show dates and times', 'checkbox'),
# 'in_menu':('','in menu?','checkbox'),
'show_descendants': ('', 'show all descendants?', 'checkbox')
}
default_prefs = {
# {kind:{name:(default,display-name,display-type/size/options),},}
'root': copy(page_default_prefs),
'admin': copy(page_default_prefs),
'page': copy(page_default_prefs),
}
def get_prefs(self):
"returns dictionary of page preferences, from cache if possible - will use defaults if no prefs have yet been set"
#
# BUG! - THIS SHOULD TRAVERSE THE PREF HIERARCHY WHEN LOCAL PREF IS NOT YET CREATED, i.e. AS PER get_pref()
#
# preferences code NEEDS REDESIGN, to recognise use of empty strings
# currently, only checkboxes can have an empty string as a valid override preference
# PREFERENCES SHOULD BE TOTALLY AMALGAMATED WITH Config
#
if not hasattr(self, '_prefs'):
self._prefs = {}
if self.kind in self.default_prefs:
defs = self.default_prefs[self.kind]
if self.prefs:
for i in self.prefs.split('\n'):
if i:
k, v = i.split('=')
if k in defs: # check to skip old preferences that have been removed from defs
if not v and (
defs[k][2] != 'checkbox'
): # non-checkboxes require a value
v = None
self._prefs[k] = v
else: #prefs not yet created, so use defaults
for k, v in list(defs.items()):
self._prefs[k] = v[0]
return self._prefs
def get_pref(self, pref):
"returns relevant pref from self.prefs, or container prefs, or Config"
p = None
# print "getting pref: ",pref, " for " ,self.kind,self.uid
if self.kind in self.default_prefs: # check own prefs
p = self.get_prefs().get(pref)
# print "checking self: ",repr(p)
if p is None: # check up along the lineage
lineage = reversed(self.lineage.strip(".").split("."))
# print ">>> lineage = ",list(lineage)
for l in lineage:
if l:
container = self.get(safeint(l))
if container.kind in self.default_prefs: # check container's prefs
p = container.get_prefs().get(pref)
# print "checking lineage: ",container.uid, container.name,"=>", repr(p)
if not p is None:
break
if p is None: # check config
p = getattr(self.Config, pref, '')
# print "checking config: ",repr(p)
# print "GOT ",repr(p)
return p
@html
def preferences(self, req):
""
req.page = 'preferences'
preferences.permit = 'admin page'
def update_prefs(self, req):
"called by Page_preferences.evo: updates self.prefs"
xprefs = self.get_prefs()
self.prefs = ''
for name, defn in list(self.default_prefs[self.kind].items()):
default, displayname, typ = defn
value = req.get(name, '').strip()
# print "======",name,':',value,' ( ',req.get(name,''),' )'
self.prefs += '%s=%s\n' % (name, value)
# make any changes necessary - see change_theme() in music app as an example
if (xprefs.get(name) != value) and hasattr(self,
"change_%s" % name):
getattr(self, "change_%s" % name)(req)
self.flush()
del self._prefs # clear cache
return req.redirect(self.url())
update_prefs.permit = 'create page'
def set_pref(self, | |
<reponame>busyyang/torch_ecg
"""
"""
import os, sys, re, logging
import time, datetime
from functools import reduce
from copy import deepcopy
from itertools import repeat
from numbers import Real, Number
from typing import Union, Optional, List, Tuple, Dict, Sequence, NoReturn
import numpy as np
import pandas as pd
__all__ = [
"dict_to_str",
"str2bool",
"get_date_str",
"mask_to_intervals",
"list_sum",
"gen_gaussian_noise", "gen_sinusoidal_noise", "gen_baseline_wander",
"get_record_list_recursive3",
"init_logger",
]
def dict_to_str(d:Union[dict, list, tuple], current_depth:int=1, indent_spaces:int=4) -> str:
""" finished, checked,
convert a (possibly) nested dict into a `str` of json-like formatted form,
this nested dict might also contain lists or tuples of dict (and of str, int, etc.)
Parameters:
-----------
d: dict, or list, or tuple,
a (possibly) nested `dict`, or a list of `dict`
current_depth: int, default 1,
depth of `d` in the (possible) parent `dict` or `list`
indent_spaces: int, default 4,
the indent spaces of each depth
Returns:
--------
s: str,
the formatted string
"""
assert isinstance(d, (dict, list, tuple))
if len(d) == 0:
s = f"{{}}" if isinstance(d, dict) else f"[]"
return s
# flat_types = (Number, bool, str,)
flat_types = (Number, bool,)
flat_sep = ", "
s = "\n"
unit_indent = " "*indent_spaces
prefix = unit_indent*current_depth
if isinstance(d, (list, tuple)):
if all([isinstance(v, flat_types) for v in d]):
len_per_line = 110
current_len = len(prefix) + 1 # + 1 for a comma
val = []
for idx, v in enumerate(d):
add_v = f"\042{v}\042" if isinstance(v, str) else str(v)
add_len = len(add_v) + len(flat_sep)
if current_len + add_len > len_per_line:
val = ", ".join([item for item in val])
s += f"{prefix}{val},\n"
val = [add_v]
current_len = len(prefix) + 1 + len(add_v)
else:
val.append(add_v)
current_len += add_len
if len(val) > 0:
val = ", ".join([item for item in val])
s += f"{prefix}{val}\n"
else:
for v in d:
if isinstance(v, (dict, list, tuple)):
s += f"{prefix}{dict_to_str(v, current_depth+1)}\n"
else:
val = f"\042{v}\042" if isinstance(v, str) else v
s += f"{prefix}{val}\n"
elif isinstance(d, dict):
for k, v in d.items():
key = f"\042{k}\042" if isinstance(k, str) else k
if isinstance(v, (dict, list, tuple)):
s += f"{prefix}{key}: {dict_to_str(v, current_depth+1)}\n"
else:
val = f"\042{v}\042" if isinstance(v, str) else v
s += f"{prefix}{key}: {val}\n"
s += unit_indent*(current_depth-1)
s = f"{{{s}}}" if isinstance(d, dict) else f"[{s}]"
return s
def str2bool(v:Union[str, bool]) -> bool:
""" finished, checked,
converts a "boolean" value possibly in the format of str to bool
Parameters:
-----------
v: str or bool,
the "boolean" value
Returns:
--------
b: bool,
`v` in the format of bool
References:
-----------
https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if isinstance(v, bool):
b = v
elif v.lower() in ("yes", "true", "t", "y", "1"):
b = True
elif v.lower() in ("no", "false", "f", "n", "0"):
b = False
else:
raise ValueError("Boolean value expected.")
return b
def get_date_str(fmt:Optional[str]=None):
"""
"""
now = datetime.datetime.now()
_fmt = fmt or "%Y-%m-%d-%H-%M-%S"
ds = now.strftime(_fmt)
return ds
def mask_to_intervals(mask:np.ndarray, vals:Optional[Union[int,Sequence[int]]]=None) -> Union[list, dict]:
""" finished, checked,
Parameters:
-----------
mask: ndarray,
1d mask
vals: int or sequence of int, optional,
values in `mask` to obtain intervals
Returns:
--------
intervals: dict or list,
the intervals corr. to each value in `vals` if `vals` is `None` or `Sequence`;
or the intervals corr. to `vals` if `vals` is int.
each interval is of the form `[a,b]`, left inclusive, right exclusive
"""
if vals is None:
_vals = list(set(mask))
elif isinstance(vals, int):
_vals = [vals]
else:
_vals = vals
# assert set(_vals) & set(mask) == set(_vals)
intervals = {v:[] for v in _vals}
for v in _vals:
valid_inds = np.where(np.array(mask)==v)[0]
if len(valid_inds) == 0:
continue
split_indices = np.where(np.diff(valid_inds)>1)[0]
split_indices = split_indices.tolist() + (split_indices+1).tolist()
split_indices = sorted([0] + split_indices + [len(valid_inds)-1])
for idx in range(len(split_indices)//2):
intervals[v].append(
[valid_inds[split_indices[2*idx]], valid_inds[split_indices[2*idx+1]]+1]
)
if isinstance(vals, int):
intervals = intervals[vals]
return intervals
def list_sum(l:Sequence[list]) -> list:
""" finished, checked,
"""
return reduce(lambda a,b: a+b, l, [])
def gen_gaussian_noise(siglen:int, mean:Real=0, std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d Gaussian noise of given length, mean, and standard deviation
Parameters:
-----------
siglen: int,
length of the noise signal
mean: real number, default 0,
mean of the noise
std: real number, default 0,
standard deviation of the noise
Returns:
--------
gn: ndarray,
the gaussian noise of given length, mean, and standard deviation
"""
gn = np.random.normal(mean, std, siglen)
return gn
def gen_sinusoidal_noise(siglen:int, start_phase:Real, end_phase:Real, amplitude:Real, amplitude_mean:Real=0, amplitude_std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d sinusoidal noise of given length, amplitude, start phase, and end phase
Parameters:
-----------
siglen: int,
length of the (noise) signal
start_phase: real number,
start phase, with units in degrees
end_phase: real number,
end phase, with units in degrees
amplitude: real number,
amplitude of the sinusoidal curve
amplitude_mean: real number,
mean amplitude of an extra Gaussian noise
amplitude_std: real number, default 0,
standard deviation of an extra Gaussian noise
Returns:
--------
sn: ndarray,
the sinusoidal noise of given length, amplitude, start phase, and end phase
"""
sn = np.linspace(start_phase, end_phase, siglen)
sn = amplitude * np.sin(np.pi * sn / 180)
sn += gen_gaussian_noise(siglen, amplitude_mean, amplitude_std)
return sn
def gen_baseline_wander(siglen:int, fs:Real, bw_fs:Union[Real,Sequence[Real]], amplitude:Union[Real,Sequence[Real]], amplitude_mean:Real=0, amplitude_std:Real=0) -> np.ndarray:
""" finished, checked,
generate 1d baseline wander of given length, amplitude, and frequency
Parameters:
-----------
siglen: int,
length of the (noise) signal
fs: real number,
sampling frequency of the original signal
bw_fs: real number, or list of real numbers,
frequency (frequencies) of the baseline wander
amplitude: real number, or list of real numbers,
amplitude of the baseline wander (corr. to each frequency band)
amplitude_mean: real number, default 0,
mean amplitude of an extra Gaussian noise
amplitude_std: real number, default 0,
standard deviation of an extra Gaussian noise
Returns:
--------
bw: ndarray,
the baseline wander of given length, amplitude, frequency
Example:
--------
>>> gen_baseline_wander(4000, 400, [0.4,0.1,0.05], [0.1,0.2,0.4])
"""
bw = gen_gaussian_noise(siglen, amplitude_mean, amplitude_std)
if isinstance(bw_fs, Real):
_bw_fs = [bw_fs]
else:
_bw_fs = bw_fs
if isinstance(amplitude, Real):
_amplitude = list(repeat(amplitude, len(_bw_fs)))
else:
_amplitude = amplitude
assert len(_bw_fs) == len(_amplitude)
duration = (siglen / fs)
for bf, a in zip(_bw_fs, _amplitude):
start_phase = np.random.randint(0,360)
end_phase = duration * bf * 360 + start_phase
bw += gen_sinusoidal_noise(siglen, start_phase, end_phase, a, 0, 0)
return bw
def get_record_list_recursive3(db_dir:str, rec_patterns:Union[str,Dict[str,str]]) -> Union[List[str], Dict[str, List[str]]]:
""" finished, checked,
get the list of records in `db_dir` recursively,
for example, there are two folders "patient1", "patient2" in `db_dir`,
and there are records "A0001", "A0002", ... in "patient1"; "B0001", "B0002", ... in "patient2",
then the output would be "patient1{sep}A0001", ..., "patient2{sep}B0001", ...,
sep is determined by the system
Parameters:
-----------
db_dir: str,
the parent (root) path of the whole database
rec_patterns: str or dict,
pattern of the record filenames, e.g. "A(?:\d+).mat",
or patterns of several subsets, e.g. `{"A": "A(?:\d+).mat"}`
Returns:
--------
res: list of str,
list of records, in lexicographical order
"""
if isinstance(rec_patterns, str):
res = []
elif isinstance(rec_patterns, dict):
res = {k:[] for k in rec_patterns.keys()}
db_dir = os.path.join(db_dir, "tmp").replace("tmp", "") # make sure `db_dir` ends with a sep
roots = [db_dir]
while len(roots) > 0:
new_roots = []
for r in roots:
tmp = [os.path.join(r, item) for item in os.listdir(r)]
# res += [item for item in tmp if os.path.isfile(item)]
if isinstance(rec_patterns, str):
res += list(filter(re.compile(rec_patterns).search, tmp))
elif isinstance(rec_patterns, dict):
for k in rec_patterns.keys():
res[k] += list(filter(re.compile(rec_patterns[k]).search, tmp))
new_roots += [item for item in tmp if os.path.isdir(item)]
roots = deepcopy(new_roots)
if isinstance(rec_patterns, str):
res = [os.path.splitext(item)[0].replace(db_dir, "") for item in res]
res = sorted(res)
elif isinstance(rec_patterns, dict):
for k in rec_patterns.keys():
res[k] = [os.path.splitext(item)[0].replace(db_dir, "") for item in res[k]]
res[k] = sorted(res[k])
return res
def init_logger(log_dir:str, log_file:Optional[str]=None, mode:str="a", verbose:int=0) -> logging.Logger:
""" finished, checked,
Parameters:
-----------
log_dir: str,
directory of the log file
log_file: str, optional,
name of the log file
mode: str, default "a",
mode of writing the log file, can be one of "a", "w"
verbose: int, default 0,
log verbosity
Returns:
--------
logger: Logger
"""
if log_file is None:
log_file = f"log_{get_date_str()}.txt"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = os.path.join(log_dir, log_file)
print(f"log file path: {log_file}")
logger = logging.getLogger("ECG-UNET")
c_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler(log_file)
if verbose >= 2:
print("levels of c_handler and f_handler | |
--------------------------------------------------------
def _copy(self, deep=False, rows=None, cols=None, base_index=0, cls=None):
"""
Bracket indexing that returns a dataset will funnel into this routine.
deep : if True, perform a deep copy on column array
rows : row mask
cols : column mask
base_index : used for head/tail slicing
cls : class of return type, for subclass super() calls
First argument must be deep. Deep cannnot be set to None. It must be True or False.
"""
if cls is None:
cls = type(self)
newcols = self._as_itemcontainer(deep=deep, rows=rows, cols=cols, base_index=base_index)
# newcols is either an ItemContainer or a dictionary
ds = cls(newcols, base_index=base_index)
ds = self._copy_attributes(ds, deep=deep)
## # ! TO DO fixup sortkeys, this block would change type of self._col_sortlist from [] to {}.
## if self._col_sortlist is not None:
## # copy the dictionary
## # TODO: turn these keys into new_sort or active sort if there wasn't one
## keylist = {_k: _v for _k, _v in self._col_sortlist.items()}
## # also copy keylist here
## keylist = self._copy_from_dict(keylist, copy=deep, rows=rows, cols=cols)
## ds._col_sortlist = keylist
return ds
# --------------------------------------------------------
def _as_itemcontainer(self, deep=False, rows=None, cols=None, base_index=0):
"""
Returns an ItemContainer object for quick reconstruction or slicing/indexing of a dataset.
Will perform a deep copy if requested and necessary.
"""
def apply_rowmask(arr, mask):
# callback for applying mask/slice to columns
name = arr.get_name()
arr = arr[mask]
arr.set_name(name)
return arr
if rows is None:
# item container copy, with or without a column selection
newcols = self._all_items.copy(cols=cols)
else:
# get array data, slice, send back to item container for copy
# slice will take a view of array (same memory)
# boolean/fancy index will always make copy
# will also slice/restore FastArray subclasses
newcols = self._all_items.copy_apply(apply_rowmask, rows, cols=cols)
# only slices, full arrays need a deep copy
if deep and (isinstance(rows, slice) or rows is None):
for v in newcols.iter_values():
name = v[0].get_name()
v[0] = v[0].copy()
v[0].set_name(name)
# deep copy item_attributes
for i, vn in enumerate(v[1:]):
v[i+1] = vn.copy() if hasattr(vn, 'copy') else vn
return newcols
# --------------------------------------------------------
def _autocomplete(self) -> str:
return f'Dataset{self.shape}'
# --------------------------------------------------------
def copy(self, deep=True):
"""
Make a copy of the Dataset.
Parameters
----------
deep : bool
Indicates whether the underlying data should be copied too. Defaults to True.
Returns
-------
Dataset
Examples
--------
>>> ds = rt.Dataset({'a': np.arange(-3,3), 'b':3*['A', 'B'], 'c':3*[True, False]})
>>> ds
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
>>> ds1 = ds.copy()
>>> ds.a = ds.a + 1
>>> ds1
# a b c
- -- - -----
0 -3 A True
1 -2 B False
2 -1 A True
3 0 B False
4 1 A True
5 2 B False
Even though we have changed ds, ds1 remains unchanged.
"""
return self._copy(deep)
# --------------------------------------------------------
def filter(self, rowfilter: np.ndarray, inplace:bool=False) -> 'Dataset':
"""
Use a row filter to make a copy of the Dataset.
Parameters
----------
rowfilter: array, fancy index or boolean mask
inplace : bool
When set to True will reduce memory overhead. Defaults to False.
Examples
--------
Filter a Dataset using the least memory possible
>>> ds = rt.Dataset({'a': rt.arange(10_000_000), 'b': rt.arange(10_000_000.0)})
>>> f = rt.logical(rt.arange(10_000_000) % 2)
>>> ds.filter(f, inplace=True)
# a b
------- ------- ---------
0 1 1.00
1 3 3.00
2 5 5.00
... ... ...
4999997 9999995 1.000e+07
4999998 9999997 1.000e+07
4999999 9999999 1.000e+07
<BLANKLINE>
[5000000 rows x 2 columns] total bytes: 57.2 MB
"""
if inplace:
# normalize rowfilter
if np.isscalar(rowfilter):
rowfilter=np.asanyarray([rowfilter])
elif not isinstance(rowfilter, np.ndarray):
rowfilter=np.asanyarray(rowfilter)
self._all_items.copy_inplace(rowfilter)
# check for boolean array
if rowfilter.dtype.char == '?':
newlen = np.sum(rowfilter)
else:
newlen = len(rowfilter)
self._nrows = newlen
return self
else:
return self._copy(False, rowfilter)
def get_nrows(self):
"""
Get the number of elements in each column of the Dataset.
Returns
-------
int
The number of elements in each column of the Dataset.
"""
return self._nrows
## -------------------------------------------------------
#def save_uncompressed(self, path, name):
# """
# *not implemented*
# """
# self.save(self, path, name, compress=False)
# -------------------------------------------------------
def save(self, path: Union[str, os.PathLike] = '', share: Optional[str] = None, compress:bool=True, overwrite:bool=True, name: Optional[str] = None, onefile:bool=False,
bandsize: Optional[int] = None, append: Optional[str] = None, complevel: Optional[int] = None):
"""
Save a dataset to a single .sds file or shared memory.
Parameters
----------
path : str or os.PathLike
full path to save location + file name (if no .sds extension is included, it will be added)
share : str, optional
Shared memory name. If set, dataset will be saved to shared memory and NOT to disk
when shared memory is specified, a filename must be included in path. only this will be used,
the rest of the path will be discarded.
compress : bool
Use compression when saving the file. Shared memory is always saved uncompressed.
overwrite : bool
Defaults to True. If False, prompt the user when overwriting an existing .sds file;
mainly useful for Struct.save(), which may call Dataset.save() multiple times.
name : str, optional
bandsize : int, optional
If set to an integer > 10000 it will compress column data every bandsize rows
append : str, optional
If set to a string it will append to the file with the section name.
complevel : int, optional
Compression level from 0 to 9. 2 (default) is average. 1 is faster, less compressed, 3 is slower, more compressed.
Examples
--------
>>> ds = rt.Dataset({'col_'+str(i):a rt.range(5) for i in range(3)})
>>> ds.save('my_data')
>>> os.path.exists('my_data.sds')
True
>>> ds.save('my_data', overwrite=False)
my_data.sds already exists and is a file. Overwrite? (y/n) n
No file was saved.
>>> ds.save('my_data', overwrite=True)
Overwriting file with my_data.sds
>>> ds.save('shareds1', share='sharename')
>>> os.path.exists('shareds1.sds')
False
See Also
--------
Dataset.load(), Struct.save(), Struct.load(), load_sds(), load_h5()
"""
if share is not None:
if path=='':
raise ValueError(f'Must provide single .sds file name for item with share name {share}. e.g. my_ds.save("dataset1.sds", share="{share}")')
save_sds(path, self, share=share, compress=compress, overwrite=overwrite, name=name, onefile=onefile, bandsize=bandsize, append=append, complevel=complevel)
# -------------------------------------------------------
@classmethod
def load(cls, path: Union[str, os.PathLike] = '', share=None, decompress:bool=True, info:bool=False, include: Optional[Sequence[str]] = None,
filter: Optional[np.ndarray] = None, sections: Optional[Sequence[str]] = None, threads: Optional[int] = None):
"""
Load dataset from .sds file or shared memory.
Parameters
----------
path : str
full path to load location + file name (if no .sds extension is included, it will be added)
share : str, optional
shared memory name. loader will check for dataset in shared memory first. if it's not there, the
data (if file found on disk) will be loaded into the user's workspace AND shared memory. a sharename
must be accompanied by a file name. (the rest of a full path will be trimmed off internally)
decompress : bool
**not implemented. the internal .sds loader will detect if the file is compressed
info : bool
Defaults to False. If True, load information about the contained arrays instead of loading them from file.
include : sequence of str, optional
Defaults to None. If provided, only load certain columns from the dataset.
filter : np.ndarray of int or np.ndarray of bool, optional
sections : sequence of str, optional
threads : int, optional
Defaults to None. Request certain number of threads during load.
Examples
--------
>>> ds = rt.Dataset({'col_'+str(i):np.random.rand(5) for i in range(3)})
>>> ds.save('my_data')
>>> rt.Dataset.load('my_data')
# col_0 col_1 col_2
- ----- ----- -----
0 0.94 0.88 0.87
1 0.95 0.93 0.16
2 0.18 0.94 0.95
3 0.41 0.60 0.05
4 0.53 0.23 0.71
>>> ds = rt.Dataset.load('my_data', share='sharename')
>>> os.remove('my_data.sds')
>>> os.path.exists('my_data.sds')
False
>>> rt.Dataset.load('my_data', share='sharename')
# col_0 col_1 col_2
- ----- ----- -----
0 0.94 0.88 0.87
1 0.95 0.93 0.16
2 0.18 0.94 0.95
3 0.41 0.60 0.05
4 0.53 0.23 0.71
"""
return load_sds(path, share=share, info=info, include=include, filter=filter, sections=sections, threads=threads)
# -------------------------------------------------------
@property
def size(self) -> int:
"""
| |
<filename>escriptcore/py_src/faultsystems.py<gh_stars>0
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
#from esys.escript import sqrt, EPSILON, cos, sin, Lsup, atan, length, matrixmult, wherePositive, matrix_mult, inner, Scalar, whereNonNegative, whereNonPositive, maximum, minimum, sign, whereNegative, whereZero
import esys.escriptcore.pdetools as pdt
#from .util import *
from . import util as es
import numpy
import math
__all__= ['FaultSystem']
class FaultSystem(object):
"""
The FaultSystem class defines a system of faults in the Earth's crust.
A fault system is defined by set of faults index by a tag. Each fault is defined by a starting point V0 and a list of
strikes ``strikes`` and length ``l``. The strikes and the length is used to define a polyline with points ``V[i]`` such that
- ``V[0]=V0``
- ``V[i]=V[i]+ls[i]*array(cos(strikes[i]),sin(strikes[i]),0)``
So ``strikes`` defines the angle between the direction of the fault segment and the x0 axis. ls[i]==0 is allowed.
In case of a 3D model a fault plane is defined through a dip and depth.
The class provides a mechanism to parametrise each fault with the domain [0,w0_max] x [0, w1_max] (to [0,w0_max] in the 2D case).
"""
NOTAG="__NOTAG__"
MIN_DEPTH_ANGLE=0.1
def __init__(self,dim=3):
"""
Sets up the fault system
:param dim: spatial dimension
:type dim: ``int`` of value 2 or 3
"""
if not (dim == 2 or dim == 3):
raise ValueError("only dimension2 2 and 3 are supported.")
self.__dim=dim
self.__top={}
self.__ls={}
self.__strikes={}
self.__strike_vectors={}
self.__medDepth={}
self.__total_length={}
if dim ==2:
self.__depths=None
self.__depth_vectors=None
self.__dips=None
self.__bottom=None
self.__normals=None
else:
self.__depths={}
self.__depth_vectors={}
self.__dips={}
self.__bottom={}
self.__normals={}
self.__offsets={}
self.__w1_max={}
self.__w0_max={}
self.__center=None
self.__orientation = None
def getStart(self,tag=None):
"""
returns the starting point of fault ``tag``
:rtype: ``numpy.array``.
"""
return self.getTopPolyline(tag)[0]
def getTags(self):
"""
returns a list of the tags used by the fault system
:rtype: ``list``
"""
return list(self.__top.keys())
def getDim(self):
"""
returns the spatial dimension
:rtype: ``int``
"""
return self.__dim
def getTopPolyline(self, tag=None):
"""
returns the polyline used to describe fault tagged by ``tag``
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of vertices defining the top of the fault. The coordinates are ``numpy.array``.
"""
if tag is None: tag=self.NOTAG
return self.__top[tag]
def getStrikes(self, tag=None):
"""
:return: the strike of the segements in fault ``tag``
:rtype: ``list`` of ``float``
"""
if tag is None: tag=self.NOTAG
return self.__strikes[tag]
def getStrikeVectors(self, tag=None):
"""
:return: the strike vectors of fault ``tag``
:rtype: ``list`` of ``numpy.array``.
"""
if tag is None: tag=self.NOTAG
return self.__strike_vectors[tag]
def getLengths(self, tag=None):
"""
:return: the lengths of segments in fault ``tag``
:rtype: ``list`` of ``float``
"""
if tag is None: tag=self.NOTAG
return self.__ls[tag]
def getTotalLength(self, tag=None):
"""
:return: the total unrolled length of fault ``tag``
:rtype: ``float``
"""
if tag is None: tag=self.NOTAG
return self.__total_length[tag]
def getMediumDepth(self,tag=None):
"""
returns the medium depth of fault ``tag``
:rtype: ``float``
"""
if tag is None: tag=self.NOTAG
return self.__medDepth[tag]
def getDips(self, tag=None):
"""
returns the list of the dips of the segements in fault ``tag``
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of segment dips. In the 2D case None is returned.
"""
if tag is None: tag=self.NOTAG
if self.getDim()==3:
return self.__dips[tag]
else:
return None
def getBottomPolyline(self, tag=None):
"""
returns the list of the vertices defining the bottom of the fault ``tag``
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of vertices. In the 2D case None is returned.
"""
if tag is None: tag=self.NOTAG
if self.getDim()==3:
return self.__bottom[tag]
else:
return None
def getSegmentNormals(self, tag=None):
"""
returns the list of the normals of the segments in fault ``tag``
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of vectors normal to the segments. In the 2D case None is returned.
"""
if tag is None: tag=self.NOTAG
if self.getDim()==3:
return self.__normals[tag]
else:
return None
def getDepthVectors(self, tag=None):
"""
returns the list of the depth vector at top vertices in fault ``tag``.
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of segment depths. In the 2D case None is returned.
"""
if tag is None: tag=self.NOTAG
if self.getDim()==3:
return self.__depth_vectors[tag]
else:
return None
def getDepths(self, tag=None):
"""
returns the list of the depths of the segements in fault ``tag``.
:param tag: the tag of the fault
:type tag: ``float`` or ``str``
:return: the list of segment depths. In the 2D case None is returned.
"""
if tag is None: tag=self.NOTAG
if self.getDim()==3:
return self.__depths[tag]
else:
return None
def getW0Range(self,tag=None):
"""
returns the range of the parameterization in ``w0``
:rtype: two ``float``
"""
return self.getW0Offsets(tag)[0], self.getW0Offsets(tag)[-1]
def getW1Range(self,tag=None):
"""
returns the range of the parameterization in ``w1``
:rtype: two ``float``
"""
if tag is None: tag=self.NOTAG
return -self.__w1_max[tag],0
def getW0Offsets(self, tag=None):
"""
returns the offsets for the parametrization of fault ``tag``.
:return: the offsets in the parametrization
:rtype: ``list`` of ``float``
"""
if tag is None: tag=self.NOTAG
return self.__offsets[tag]
def getCenterOnSurface(self):
"""
returns the center point of the fault system at the surface
:rtype: ``numpy.array``
"""
if self.__center is None:
self.__center=numpy.zeros((3,),numpy.float)
counter=0
for t in self.getTags():
for s in self.getTopPolyline(t):
self.__center[:2]+=s[:2]
counter+=1
self.__center/=counter
return self.__center[:self.getDim()]
def getOrientationOnSurface(self):
"""
returns the orientation of the fault system in RAD on the surface around the fault system center
:rtype: ``float``
"""
if self.__orientation is None:
center=self.getCenterOnSurface()
covariant=numpy.zeros((2,2))
for t in self.getTags():
for s in self.getTopPolyline(t):
covariant[0,0]+=(center[0]-s[0])**2
covariant[0,1]+=(center[1]-s[1])*(center[0]-s[0])
covariant[1,1]+=(center[1]-s[1])**2
covariant[1,0]+=(center[1]-s[1])*(center[0]-s[0])
e, V=numpy.linalg.eigh(covariant)
if e[0]>e[1]:
d=V[:,0]
else:
d=V[:,1]
if abs(d[0])>0.:
self.__orientation=es.atan(d[1]/d[0])
else:
self.__orientation=math.pi/2
return self.__orientation
def transform(self, rot=0, shift=numpy.zeros((3,))):
"""
applies a shift and a consecutive rotation in the x0x1 plane.
:param rot: rotation angle in RAD
:type rot: ``float``
:param shift: shift vector to be applied before rotation
:type shift: ``numpy.array`` of size 2 or 3
"""
if self.getDim() == 2:
mat=numpy.array([[es.cos(rot), -es.sin(rot)], [es.sin(rot), es.cos(rot)] ])
else:
mat=numpy.array([[es.cos(rot), -es.sin(rot),0.], [es.sin(rot), es.cos(rot),0.], [0.,0.,1.] ])
for t in self.getTags():
strikes=[ s+ rot for s in self.getStrikes(t) ]
V0=self.getStart(t)
self.addFault(strikes = [ s+ rot for s in self.getStrikes(t) ], \
ls = self.getLengths(t), \
V0=numpy.dot(mat,self.getStart(t)+shift), \
tag =t, \
dips=self.getDips(t),\
depths=self.getDepths(t), \
w0_offsets=self.getW0Offsets(t), \
w1_max=-self.getW1Range(t)[0])
def addFault(self, strikes, ls, V0=[0.,0.,0.],tag=None, dips=None, depths= None, w0_offsets=None, w1_max=None):
"""
adds a new fault to the fault system. The fault is named by ``tag``.
The fault is defined by a starting point V0 and a list of ``strikes`` and length ``ls``. The strikes and the length
is used to define a polyline with points ``V[i]`` such that
- ``V[0]=V0``
- ``V[i]=V[i]+ls[i]*array(cos(strikes[i]),sin(strikes[i]),0)``
So ``strikes`` defines the angle between the direction of the fault segment and the x0 axis. In 3D ``ls[i]`` ==0 is allowed.
In case of a 3D model a fault plane is defined through a dip ``dips`` and depth ``depths``.
From the dip and the depth the polyline ``bottom`` of the bottom of the fault is computed.
Each segment in the fault is decribed by the for vertices ``v0=top[i]``, ``v1==top[i+1]``, ``v2=bottom[i]`` and ``v3=bottom[i+1]``
The segment is parametrized by ``w0`` and ``w1`` with ``w0_offsets[i]<=w0<=w0_offsets[i+1]`` and ``-w1_max<=w1<=0``. Moreover
- ``(w0,w1)=(w0_offsets[i] , 0)->v0``
- ``(w0,w1)=(w0_offsets[i+1], 0)->v1``
- ``(w0,w1)=(w0_offsets[i] , -w1_max)->v2``
- ``(w0,w1)=(w0_offsets[i+1], -w1_max)->v3``
If no ``w0_offsets`` is given,
- ``w0_offsets[0]=0``
- ``w0_offsets[i]=w0_offsets[i-1]+L[i]``
where ``L[i]`` is the length of the segments on the top in 2D and in the middle of the segment in 3D.
If no ``w1_max`` is given, the average fault depth is used.
:param strikes: list of strikes. This is the angle of the | |
Resume a SQL pool.
examples:
- name: Resume a SQL pool.
text: |-
az synapse sql pool resume --name sqlpool --workspace-name testsynapseworkspace --resource-group rg
"""
helps['synapse sql pool delete'] = """
type: command
short-summary: Delete a SQL pool.
examples:
- name: Delete a SQL pool.
text: |-
az synapse sql pool delete --name sqlpool --workspace-name testsynapseworkspace --resource-group rg
"""
helps['synapse sql pool restore'] = """
type: command
short-summary: Create a new SQL pool by restoring from a backup.
examples:
- name: Create a new SQL pool by restoring an existing SQL pool's restore point.
text: |-
az synapse sql pool restore --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--dest-name newsqlpool --time 2020-11-25T02:47:37
"""
helps['synapse sql pool show-connection-string'] = """
type: command
short-summary: Generate a connection string to a SQL pool.
examples:
- name: Generate connection string for ado.net
text: |-
az synapse sql pool show-connection-string --name sqlpool --workspace-name testsynapseworkspace -c ado.net
"""
helps['synapse sql pool list-deleted'] = """
type: command
short-summary: List all deleted SQL pools.
examples:
- name: List deleted SQL pools.
text: |-
az synapse sql pool list-deleted --workspace-name testsynapseworkspace --resource-group rg
"""
helps['synapse sql pool wait'] = """
type: command
short-summary: Place the CLI in a waiting state until a condition of a SQL pool is met.
"""
helps['synapse sql pool classification'] = """
type: group
short-summary: Manage sensitivity classifications.
"""
helps['synapse sql pool classification create'] = """
type: command
short-summary: Create a column's sensitivity classification.
examples:
- name: Create sensitivity classification for a given column.
text: |-
az synapse sql pool classification create --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --schema dbo --table mytable --column mycolumn \\
--information-type Name --label "Confidential - GDPR"
"""
helps['synapse sql pool classification update'] = """
type: command
short-summary: Update a column's sensitivity classification.
examples:
- name: Update sensitivity classification for a given column.
text: |-
az synapse sql pool classification update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --schema dbo --table mytable --column mycolumn \\
--information-type Name --label "Confidential - GDPR"
"""
helps['synapse sql pool classification list'] = """
type: command
short-summary: Get the sensitivity classifications of a given SQL pool.
examples:
- name: List the sensitivity classification of a given SQL pool.
text: |-
az synapse sql pool classification list --name sqlpool --workspace-name testsynapseworkspace --resource-group rg
"""
helps['synapse sql pool classification show'] = """
type: command
short-summary: Get the sensitivity classification of a given column.
examples:
- name: Get the sensitivity classification of a given column.
text: |-
az synapse sql pool classification show --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--schema dbo --table mytable --column mycolumn
"""
helps['synapse sql pool classification delete'] = """
type: command
short-summary: Delete the sensitivity classification of a given column.
examples:
- name: Delete the sensitivity classification of a given column.
text: |-
az synapse sql pool classification delete --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--schema dbo --table mytable --column mycolumn
"""
helps['synapse sql pool classification recommendation'] = """
type: group
short-summary: Manage sensitivity classification recommendations.
"""
helps['synapse sql pool classification recommendation list'] = """
type: command
short-summary: List the recommended sensitivity classifications of a given SQL pool.
examples:
- name: List the recommended sensitivity classifications of a given SQL pool.
text: |-
az synapse sql pool classification recommendation list --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg
"""
helps['synapse sql pool classification recommendation enable'] = """
type: command
short-summary: Enable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
examples:
- name: Enable sensitivity recommendations for a given column.
text: |-
az synapse sql pool classification recommendation enable --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --schema dbo --table mytable --column mycolumn
"""
helps['synapse sql pool classification recommendation disable'] = """
type: command
short-summary: Disable sensitivity recommendations for a given column(recommendations are enabled by default on all columns).
examples:
- name: Disable sensitivity recommendations for a given column.
text: |-
az synapse sql pool classification recommendation disable --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --schema dbo --table mytable --column mycolumn
"""
helps['synapse sql pool tde'] = """
type: group
short-summary: Manage a SQL pool's transparent data encryption.
"""
helps['synapse sql pool tde set'] = """
type: command
short-summary: Set a SQL pool's transparent data encryption configuration.
examples:
- name: Set a SQL pool's transparent data encryption configuration. (autogenerated)
text: |-
az synapse sql pool tde set --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--status Enabled --transparent-data-encryption-name tdename
"""
helps['synapse sql pool tde show'] = """
type: command
short-summary: Get a SQL pool's transparent data encryption configuration.
examples:
- name: Get a SQL pool's transparent data encryption configuration. (autogenerated)
text: |-
az synapse sql pool tde show --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--transparent-data-encryption-name tdename
"""
helps['synapse sql pool threat-policy'] = """
type: group
short-summary: Manage a SQL pool's threat detection policies.
"""
helps['synapse sql pool threat-policy show'] = """
type: command
short-summary: Get a SQL pool's threat detection policy.
examples:
- name: Get a SQL pool's threat detection policy.
text: |-
az synapse sql pool threat-policy show --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --security-alert-policy-name threatpolicy
"""
helps['synapse sql pool threat-policy update'] = """
type: command
short-summary: Update a SQL pool's threat detection policy.
long-summary: If the policy is being enabled, storage_account or both storage_endpoint and storage_account_access_key must be specified.
examples:
- name: Enable by storage account name.
text: |-
az synapse sql pool threat-policy update --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--state Enabled --storage-account mystorageaccount --security-alert-policy-name threatpolicy
- name: Enable by storage endpoint and key.
text: |-
az synapse sql pool threat-policy update --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--state Enabled --storage-endpoint https://mystorage.blob.core.windows.net --storage-key MYKEY== \\
--security-alert-policy-name threatpolicy
- name: Disable a subset of alert types.
text: |-
az synapse sql pool threat-policy update --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--disabled-alerts Sql_Injection_Vulnerability Access_Anomaly --security-alert-policy-name threatpolicy
- name: Configure email recipients for a policy.
text: |-
az synapse sql pool threat-policy update --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--email-addresses <EMAIL> <EMAIL> --email-account-admins true \\
--security-alert-policy-name threatpolicy
- name: Disable a threat policy.
text: |-
az synapse sql pool threat-policy update --name sqlpool --workspace-name testsynapseworkspace --resource-group rg \\
--state Disabled --security-alert-policy-name threatpolicy
"""
helps['synapse sql pool audit-policy'] = """
type: group
short-summary: Manage a SQL pool's auditing policy.
"""
helps['synapse sql pool audit-policy show'] = """
type: command
short-summary: Get a SQL pool's auditing policy.
examples:
- name: Get a SQL pool's auditing policy.
text: |-
az synapse sql pool audit-policy show --name sqlpool --workspace-name testsynapseworkspace --resource-group rg
"""
helps['synapse sql pool audit-policy update'] = """
type: command
short-summary: Update a SQL pool's auditing policy.
long-summary: If the policy is being enabled, `--storage-account` or both `--storage-endpoint` and `--storage-key` must be specified.
examples:
- name: Enable by storage account name.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Enabled --blob-storage-target-state Enabled --storage-account mystorage \\
--blob-auditing-policy-name bapname
- name: Enable by storage endpoint and key.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Enabled --blob-storage-target-state Enabled \\
--storage-endpoint https://mystorage.blob.core.windows.net --storage-key MYKEY== \\
--blob-auditing-policy-name bapname
- name: Set the list of audit actions.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --actions SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP 'UPDATE on database::mydb by public' \\
--blob-auditing-policy-name bapname
- name: Disable an auditing policy.
text: |-
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Disabled --blob-auditing-policy-name bapname
- name: Disable a blob storage auditing policy.
text: |-
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --blob-storage-target-state Disabled --blob-auditing-policy-name bapname
- name: Enable a log analytics auditing policy.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Enabled --log-analytics-target-state Enabled \\
--log-analytics-workspace-resource-id myworkspaceresourceid --blob-auditing-policy-name bapname
- name: Disable a log analytics auditing policy.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --log-analytics-target-state Disabled --blob-auditing-policy-name bapname
- name: Enable an event hub auditing policy.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Enabled --event-hub-target-state Enabled \\
--event-hub-authorization-rule-id eventhubauthorizationruleid --event-hub eventhubname \\
--blob-auditing-policy-name bapname
- name: Enable an event hub auditing policy for default event hub.
text: |
az synapse sql pool audit-policy update --name sqlpool --workspace-name testsynapseworkspace \\
--resource-group rg --state Enabled --event-hub-target-state Enabled \\
--event-hub-authorization-rule-id eventhubauthorizationruleid --blob-auditing-policy-name bapname
- name: Disable an event hub auditing policy.
text: | |
*(string) --*
The version ID of the Amazon Redshift engine that is running on the cluster.
- **AllowVersionUpgrade** *(boolean) --*
A boolean value that, if ``true`` , indicates that major version upgrades will be applied automatically to the cluster during the maintenance window.
- **NumberOfNodes** *(integer) --*
The number of compute nodes in the cluster.
- **PubliclyAccessible** *(boolean) --*
A boolean value that, if ``true`` , indicates that the cluster can be accessed from a public network.
- **Encrypted** *(boolean) --*
A boolean value that, if ``true`` , indicates that data in the cluster is encrypted at rest.
- **RestoreStatus** *(dict) --*
A value that describes the status of a cluster restore action. This parameter returns null if the cluster was not created by restoring a snapshot.
- **Status** *(string) --*
The status of the restore action. Returns starting, restoring, completed, or failed.
- **CurrentRestoreRateInMegaBytesPerSecond** *(float) --*
The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup.
- **SnapshotSizeInMegaBytes** *(integer) --*
The size of the set of snapshot data used to restore the cluster.
- **ProgressInMegaBytes** *(integer) --*
The number of megabytes that have been transferred from snapshot storage.
- **ElapsedTimeInSeconds** *(integer) --*
The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore.
- **DataTransferProgress** *(dict) --*
- **Status** *(string) --*
Describes the status of the cluster. While the transfer is in progress the status is ``transferringdata`` .
- **CurrentRateInMegaBytesPerSecond** *(float) --*
Describes the data transfer rate in MB's per second.
- **TotalDataInMegaBytes** *(integer) --*
Describes the total amount of data to be transfered in megabytes.
- **DataTransferredInMegaBytes** *(integer) --*
Describes the total amount of data that has been transfered in MB's.
- **EstimatedTimeToCompletionInSeconds** *(integer) --*
Describes the estimated number of seconds remaining to complete the transfer.
- **ElapsedTimeInSeconds** *(integer) --*
Describes the number of seconds that have elapsed during the data transfer.
- **HsmStatus** *(dict) --*
A value that reports whether the Amazon Redshift cluster has finished applying any hardware security module (HSM) settings changes specified in a modify cluster command.
Values: active, applying
- **HsmClientCertificateIdentifier** *(string) --*
Specifies the name of the HSM client certificate the Amazon Redshift cluster uses to retrieve the data encryption keys stored in an HSM.
- **HsmConfigurationIdentifier** *(string) --*
Specifies the name of the HSM configuration that contains the information the Amazon Redshift cluster can use to retrieve and store keys in an HSM.
- **Status** *(string) --*
Reports whether the Amazon Redshift cluster has finished applying any HSM settings changes specified in a modify cluster command.
Values: active, applying
- **ClusterSnapshotCopyStatus** *(dict) --*
A value that returns the destination region and retention period that are configured for cross-region snapshot copy.
- **DestinationRegion** *(string) --*
The destination region that snapshots are automatically copied to when cross-region snapshot copy is enabled.
- **RetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region.
- **ManualSnapshotRetentionPeriod** *(integer) --*
The number of days that automated snapshots are retained in the destination region after they are copied from a source region. If the value is -1, the manual snapshot is retained indefinitely.
The value must be either -1 or an integer between 1 and 3,653.
- **SnapshotCopyGrantName** *(string) --*
The name of the snapshot copy grant.
- **ClusterPublicKey** *(string) --*
The public key for the cluster.
- **ClusterNodes** *(list) --*
The nodes in the cluster.
- *(dict) --*
The identifier of a node in a cluster.
- **NodeRole** *(string) --*
Whether the node is a leader node or a compute node.
- **PrivateIPAddress** *(string) --*
The private IP address of a node within a cluster.
- **PublicIPAddress** *(string) --*
The public IP address of a node within a cluster.
- **ElasticIpStatus** *(dict) --*
The status of the elastic IP (EIP) address.
- **ElasticIp** *(string) --*
The elastic IP (EIP) address for the cluster.
- **Status** *(string) --*
The status of the elastic IP (EIP) address.
- **ClusterRevisionNumber** *(string) --*
The specific revision number of the database in the cluster.
- **Tags** *(list) --*
The list of tags for the cluster.
- *(dict) --*
A tag consisting of a name/value pair for a resource.
- **Key** *(string) --*
The key, or name, for the resource tag.
- **Value** *(string) --*
The value for the resource tag.
- **KmsKeyId** *(string) --*
The AWS Key Management Service (AWS KMS) key ID of the encryption key used to encrypt data in the cluster.
- **EnhancedVpcRouting** *(boolean) --*
An option that specifies whether to create the cluster with enhanced VPC routing enabled. To create a cluster that uses enhanced VPC routing, the cluster must be in a VPC. For more information, see `Enhanced VPC Routing <https://docs.aws.amazon.com/redshift/latest/mgmt/enhanced-vpc-routing.html>`__ in the Amazon Redshift Cluster Management Guide.
If this option is ``true`` , enhanced VPC routing is enabled.
Default: false
- **IamRoles** *(list) --*
A list of AWS Identity and Access Management (IAM) roles that can be used by the cluster to access other AWS services.
- *(dict) --*
An AWS Identity and Access Management (IAM) role that can be used by the associated Amazon Redshift cluster to access other AWS services.
- **IamRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role, for example, ``arn:aws:iam::123456789012:role/RedshiftCopyUnload`` .
- **ApplyStatus** *(string) --*
A value that describes the status of the IAM role's association with an Amazon Redshift cluster.
The following are possible statuses and descriptions.
* ``in-sync`` : The role is available for use by the cluster.
* ``adding`` : The role is in the process of being associated with the cluster.
* ``removing`` : The role is in the process of being disassociated with the cluster.
- **PendingActions** *(list) --*
Cluster operations that are waiting to be started.
- *(string) --*
- **MaintenanceTrackName** *(string) --*
The name of the maintenance track for the cluster.
- **ElasticResizeNumberOfNodeOptions** *(string) --*
The number of nodes that you can resize the cluster to with the elastic resize method.
- **DeferredMaintenanceWindows** *(list) --*
Describes a group of ``DeferredMaintenanceWindow`` objects.
- *(dict) --*
Describes a deferred maintenance window
- **DeferMaintenanceIdentifier** *(string) --*
A unique identifier for the maintenance window.
- **DeferMaintenanceStartTime** *(datetime) --*
A timestamp for the beginning of the time period when we defer maintenance.
- **DeferMaintenanceEndTime** *(datetime) --*
A timestamp for the end of the time period when we defer maintenance.
- **SnapshotScheduleIdentifier** *(string) --*
A unique identifier for the cluster snapshot schedule.
- **SnapshotScheduleState** *(string) --*
The current state of the cluster snapshot schedule.
- **ResizeInfo** *(dict) --*
Returns the following:
* AllowCancelResize: a boolean value indicating if the resize operation can be cancelled.
* ResizeType: Returns ClassicResize
- **ResizeType** *(string) --*
Returns the value ``ClassicResize`` .
- **AllowCancelResize** *(boolean) --*
A boolean value indicating if the resize operation can be cancelled.
:type DBName: string
:param DBName:
The name of the first database to be created when the cluster is created.
To create additional databases after the cluster is created, connect to the cluster with a SQL client and use SQL commands to create a database. For more information, | |
"""
Copyright (c) 2016, Granular, Inc.
All rights reserved.
License: BSD 3-Clause ("BSD New" or "BSD Simplified")
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
import os
import math
import re
from uuid import uuid4
from six import string_types
#Scipy
import numpy as np
from skimage.transform import downscale_local_mean
#Geo
from osgeo import gdal, osr
from osgeo.gdalconst import GA_ReadOnly
from osgeo.osr import SpatialReference
from shapely import wkb, ops
from shapely.affinity import scale
from shapely.geometry import box
from pyspatial import spatiallib as slib
from skimage.io import imsave
from PIL import Image, ImageDraw
from pyspatial import fileutils
from pyspatial.vector import read_geojson, to_geometry, bounding_box
from pyspatial.vector import VectorLayer
from pyspatial.utils import projection_from_epsg
from pyspatial import globalmaptiles
NP2GDAL_CONVERSION = {
"uint8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
GDAL2NP_CONVERSION = {v: k for k, v in NP2GDAL_CONVERSION.items()}
TILE_REGEX = re.compile('([0-9]+)_([0-9]+)\.tif')
def rasterize(shp, ext_outline=False, ext_fill=True, int_outline=False,
int_fill=False, scale_factor=4):
"""Convert a vector shape to a raster. Assumes the shape has already
been transformed in to a pixel based coordinate system. The algorithm
checks for the intersection of each point in the shape with
a pixel grid created by the bounds of the shape. Partial overlaps
are estimated by scaling the image in X and Y by the scale factor,
rasterizing the shape, and downscaling (using mean), back to the
bounds of the original shape.
Parameters
----------
shp: shapely.Polygon or Multipolygon
The shape to rasterize
ext_outline: boolean (default False)
Include the outline of the shape in the raster
ext_fill: boolean (default True)
Fill the shape in the raster
int_outline: booelan (default False)
Include the outline of the interior shapes
int_fill: boolean (default False):
Fill the interior shapes
scale_factor: int (default 4)
The amount to scale the shape in X, Y before downscaling. The
higher this number, the more precise the estimate of the overlap.
Returns
-------
np.ndarray representing the rasterized shape.
"""
sf = int(scale_factor)
minx, miny, maxx, maxy = map(int, shp.bounds)
if minx == maxx and miny == maxy:
return np.array([[1.]])
elif maxy > miny and minx == maxx:
n = maxy - miny + 1
return np.zeros([n, 1]) + 1./n
elif maxy == miny and minx < maxx:
n = maxx - minx + 1
return np.zeros([1, n]) + 1./n
if ((maxx - minx + 1) + (maxy - miny + 1)) <= 2*sf:
sf = 1
shp = scale(shp, xfact=sf, yfact=sf)
minx, miny, maxx, maxy = shp.bounds
width = int(maxx - minx + 1)
height = int(maxy - miny + 1)
img = Image.new('L', (width, height), 0)
_shp = shp.geoms if hasattr(shp, "geoms") else [shp]
ext_outline = int(ext_outline)
ext_fill = int(ext_fill)
int_outline = int(int_outline)
int_fill = int(int_fill)
for pg in _shp:
ext_pg = [(x-minx, y-miny) for x, y in pg.exterior.coords]
ImageDraw.Draw(img).polygon(ext_pg, outline=ext_outline, fill=ext_fill)
for s in pg.interiors:
int_pg = [(x-minx, y-miny) for x, y in s.coords]
ImageDraw.Draw(img).polygon(int_pg, outline=int_outline,
fill=int_fill)
return downscale_local_mean(np.array(img), (sf, sf))
class RasterBase(object):
"""
Provides methods and attributes common to both RasterBand and
RasterDataset, particularly for converting shapes to pixels
in the raster coordinate space. Stores a coordinate system for a raster.
Parameters
----------
RasterXSize, RasterYSize: int
Number of pixels in the width and height respectively.
geo_transform : list of float
GDAL coefficients for GeoTransform (defines boundaries and pixel size
for a raster in lat/lon space).
proj: osr.SpatialReference
The spatial projection for the raster.
Attributes
----------
xsize, ysize: int
Number of pixels in the width and height respectively.
geo_transform : list of float
GDAL coefficients for GeoTransform (defines boundaries and pixel size
for a raster in lat/lon space).
min_lon: float
The minimum longitude in proj coordinates
min_lat: float
The minimum latitude in proj coordinates
max_lat: float
The maximum latitude in proj coordinates
lon_px_size: float
Horizontal size of the pixel
lat_px_size: float
Vertical size of the pixel
proj: osr.SpatialReference
The spatial projection for the raster.
"""
def __init__(self, RasterXSize, RasterYSize, geo_transform, proj):
self.geo_transform = geo_transform
self.xsize = RasterXSize
self.ysize = RasterYSize
self.RasterXSize = self.xsize
self.RasterYSize = self.ysize
self.min_lon = self.geo_transform[0]
self.max_lat = self.geo_transform[3]
self.min_lat = self.geo_transform[3] + self.geo_transform[5]*self.ysize
self.lon_px_size = abs(self.geo_transform[1])
self.lat_px_size = self.geo_transform[5]
self.pixel_area = abs(self.lon_px_size * self.lat_px_size)
self.proj = proj
def _to_pixels(self, lon, lat, alt=None):
"""Convert a point from lon/lat to pixel coordinates. Note,
the altitude is currently ignored.
Parameters
----------
lon: float
Longitude of point
lat: float
Latitude of point
Returns
-------
list of int
(longitude in pixel space, latitude in pixel space).
Rounded to the nearest pixel.
"""
lon_px, lat_px = slib.to_pixels(lon, lat, self.min_lon,
self.max_lat, self.lon_px_size,
self.lat_px_size)
return int(lon_px), int(lat_px)
def shape_to_pixel(self, geom):
"""Takes a feature and returns a shapely object transformed into the
pixel coords.
Parameters
----------
feat : osgeo.ogr.Geometry
Feature to be transformed.
Returns
-------
shapely.Polygon
Feature in pixel coordinates.
"""
shp = wkb.loads(geom.ExportToWkb())
return ops.transform(self._to_pixels, shp)
def to_pixels(self, vector_layer):
"""Takes a vector layer and returns list of shapely geometry
transformed in pixel coordinates. If the projection of the
vector_layer is different than the raster band projection, it
transforms the coordinates first to raster projection.
Parameters
----------
vector_layer : VectorLayer
Shapes to be transformed.
Returns
-------
list of shapely.Polygon
Shapes in pixel coordinates.
"""
if self.proj.ExportToProj4() != vector_layer.proj.ExportToProj4():
vector_layer = vector_layer.transform(self.proj)
return [self.shape_to_pixel(geom) for geom in vector_layer]
def to_raster_coord(self, pxx, pxy):
"""Convert pixel corrdinates -> raster coordinates"""
if not (0 <= pxx < self.RasterXSize):
raise ValueError("Invalid x coordinate: %s" % pxx)
if not (0 <= pxy < self.RasterYSize):
raise ValueError("Invalid x coordinate: %s" % pxx)
# urx, ury are the upper right coordinates
# xsize, ysize, are the pixel sizes
urx, xsize, _, ury, _, ysize = self.geo_transform
return (urx + pxx * xsize, ury + ysize * pxy)
def to_geometry_grid(self, minx, miny, maxx, maxy):
"""Convert pixels into a geometry grid. All values should be in
pixel cooridnates.
Returns
-------
VectorLayer with index a tuple of the upper left corner coordinate
of each pixel.
"""
xs = np.arange(minx, maxx+1)
ys = np.arange(miny, maxy+1)
x, y = np.meshgrid(xs, ys)
index = []
boxes = []
for i in range(x.shape[0]):
for j in range(x.shape[1]):
x1, y1 = self.to_raster_coord(x[i, j], y[i, j])
x2, y2 = self.to_raster_coord(x[i, j] + 1, y[i, j] + 1)
boxes.append(bounding_box((x1, x2, y1, y2), self.proj))
index.append((int(x[i, j]), int(y[i, j])))
return VectorLayer(boxes, index=index, proj=self.proj)
def GetGeoTransform(self):
"""Returns affine transform from GDAL for describing the relationship
between raster positions (in pixel/line coordinates) and georeferenced
coordinates.
Returns
-------
min_lon: float
The minimum longitude in raster coordinates.
lon_px_size: float
Horizontal size of each pixel.
geo_transform[2] : float
Not used in our case. In general, this would be used if the
coordinate system had rotation or shearing.
max_lat: float
The maximum latitude in raster coordinates.
lat_px_size: float
Vertical size of the pixel.
geo_transform[5] : float
Not used in our case. In general, this would be used if the
coordinate system had rotation or shearing.
References
----------
http://www.gdal.org/gdal_tutorial.html
"""
return self.geo_transform
def | |
import wx
import numpy as np
from os import remove
from os.path import splitext, exists
from FileHandler import ReadXYZ
from scipy.signal import butter, filtfilt
from sklearn.decomposition import PCA
class Results():
def __init__(self):
"""EMPTY INITIATION"""
def updateAll(self, Data):
# Get Specifications
self.sampleRate = Data.Datasets[0].sampleRate
self.removeDC = Data.Specs.CheckboxDC.GetValue()
self.average = Data.Specs.CheckboxAverage.GetValue()
self.newReference = Data.Specs.DropDownNewRef.GetValue()
try:
self.preEpoch = float(Data.Specs.PreEpoch.GetValue())
except ValueError:
self.preEpoch = 100.0
Data.Specs.PreEpoch.SetValue(str(self.preEpoch))
try:
self.postEpoch = float(Data.Specs.PostEpoch.GetValue())
except ValueError:
self.postEpoch = 500.0
Data.Specs.PostEpoch.SetValue(str(self.postEpoch))
self.doPass = Data.Specs.CheckboxPass.GetValue()
try:
self.lowcut = float(Data.Specs.LowPass.GetValue())
# Checks that Lowpass value is below nyquist frequency
nyquistFreq = self.sampleRate * 0.5
if self.lowcut > nyquistFreq:
self.lowcut = nyquistFreq - 0.001
Data.Specs.LowPass.SetValue(str(self.lowcut))
dlg = wx.MessageDialog(
Data.Overview, "Low pass value was above the nyquist " +
"frequency (%s Hz). The value was set to %s Hz." % (
nyquistFreq, self.lowcut),
"Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
except ValueError:
self.lowcut = 0
Data.Specs.LowPass.SetValue(str(self.lowcut))
try:
self.highcut = float(Data.Specs.HighPass.GetValue())
# Checks that Highpass value is above sampling frequency
minFreq = 1. / int(np.round(
(self.preEpoch + self.postEpoch) * self.sampleRate * 0.001))
if self.highcut <= minFreq:
self.highcut = minFreq
Data.Specs.HighPass.SetValue(str(self.highcut))
dlg = wx.MessageDialog(
Data.Overview, "High pass value was below minimum " +
"Frequency and was adjusted to %.4f Hz." % minFreq,
"Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
except ValueError:
self.highcut = 0
Data.Specs.HighPass.SetValue(str(self.highcut))
self.doNotch = Data.Specs.CheckboxNotch.GetValue()
try:
self.notchValue = float(Data.Specs.Notch.GetValue())
except ValueError:
self.notchValue = 50.0
Data.Specs.Notch.SetValue(str(self.notch))
# Calculate number of total iteration steps
iterations = 1
iterations += self.removeDC
iterations += self.average or self.newReference != 'None'
iterations += self.doPass and self.lowcut != 0 and self.highcut != 0
iterations += self.doNotch
# Preprocessing Message
progText = '\n' * ((1 + iterations) * len(Data.Datasets) - 1)
nChannels = Data.Datasets[0].rawdata.shape[0]
progressMax = iterations * len(Data.Datasets) * nChannels
dlg = wx.ProgressDialog(
"Data Preprocessing", progText, progressMax,
style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_SMOOTH)
counter = 0
progText = ''
# Filter Channel Signal
for i, d in enumerate(Data.Datasets):
progFileName = Data.Filenames[i]
progText += 'Preprocessing %s:' % progFileName
# Load Dataset in memmap file
tmpFilename = splitext(d.filename)[0] + '.lineviewerTempData'
tmpDataset = np.memmap(tmpFilename, mode='w+', dtype='float32',
shape=d.rawdata.shape)
for t in range(nChannels):
tmpDataset[t] = d.rawdata[t]
# Update Progress Dialog
progUpdate = '\nRead Data:\t{:>6}%'.format(
np.round(100. * (t + 1) / nChannels, 1))
dlg.Update(counter, progText + progUpdate)
counter += 1
progText += '\nRead Data:\t{:>6}%'.format(100.0)
# 1. Remove DC
if self.removeDC:
dcOffset = np.vstack(tmpDataset.mean(axis=1))
for t in range(nChannels):
tmpDataset[t] -= dcOffset[t]
# Update Progress Dialog
progUpdate = '\nRemove DC:\t{:>6}%'.format(
np.round(100. * (t + 1) / nChannels, 1))
dlg.Update(counter, progText + progUpdate)
counter += 1
progText += '\nRemove DC:\t{:>6}%'.format(100.0)
# 2. Average or specific reference
if self.average or self.newReference != 'None':
if self.average:
refOffset = tmpDataset.mean(axis=0)
elif self.newReference != 'None':
electrodeID = np.where(
d.labelsChannel == self.newReference)[0]
if self.newReference != 'Average':
refOffset = tmpDataset[electrodeID]
for t in range(nChannels):
tmpDataset[t] -= refOffset[t]
# Update Progress Dialog
progUpdate = '\nRereference:\t{:>6}%'.format(
np.round(100. * (t + 1) / nChannels, 1))
dlg.Update(counter, progText + progUpdate)
counter += 1
progText += '\nRereference:\t{:>6}%'.format(100.0)
# 3. Run Butterworth Low-, High- or Bandpassfilter
if self.doPass and self.lowcut != 0 and self.highcut != 0:
b, a = butter_bandpass_param(d.sampleRate,
highcut=self.highcut,
lowcut=self.lowcut)
for t in range(nChannels):
tmpDataset[t] = filtfilt(b, a, tmpDataset[t])
# Update Progress Dialog
progUpdate = '\nFilter Data:\t{:>6}%'.format(
np.round(100. * (t + 1) / nChannels, 1))
dlg.Update(counter, progText + progUpdate)
counter += 1
progText += '\nFilter Data:\t{:>6}%'.format(100.0)
# 4. Notch Filter
if self.doNotch:
b, a = butter_bandpass_param(d.sampleRate,
notch=self.notchValue)
for t in range(nChannels):
tmpDataset[t] = filtfilt(b, a, tmpDataset[t])
# Update Progress Dialog
progUpdate = '\nNotch Filter:\t{:>6}%'.format(
np.round(100. * (t + 1) / nChannels, 1))
dlg.Update(counter, progText + progUpdate)
counter += 1
progText += '\nNotch Filter:\t{:>6}%'.format(100.0)
progText += '\n'
# Create epochs
self.preFrame = int(
np.round(self.preEpoch * self.sampleRate * 0.001))
self.preCut = np.copy(self.preFrame)
self.postFrame = int(
np.round(self.postEpoch * self.sampleRate * 0.001))
self.postCut = np.copy(self.postFrame)
# Drop markers if there's not enough preFrame or postFrame to cut
cutsIO = [True if m > self.preCut and m < tmpDataset.shape[
1] - self.postCut else False for m in d.markerTime]
epochs = np.array([tmpDataset[:, m - self.preCut:m + self.postCut]
for m in d.markerTime[np.where(cutsIO)]])
# Accumulate epoch information
if i == 0:
Data.epochs = epochs
Data.markers = d.markerValue[np.where(cutsIO)]
Data.labelsChannel = d.labelsChannel
else:
Data.epochs = np.vstack((Data.epochs, epochs))
Data.markers = np.hstack(
(Data.markers, d.markerValue[np.where(cutsIO)]))
# Clean up of temporary files and variables
del tmpDataset
if exists(tmpFilename):
remove(tmpFilename)
dlg.Destroy()
self.updateEpochs(Data)
def updateEpochs(self, Data):
# Get Specifications
self.blinkCorr = Data.Specs.CheckboxBlink.GetValue()
self.baselineCorr = Data.Specs.DropDownBase.GetSelection()
self.thresholdCorr = Data.Specs.CheckboxThreshold.GetValue()
try:
self.threshold = float(Data.Specs.ThreshValue.GetValue())
except ValueError:
self.threshold = 80.0
Data.Specs.ThreshValue.SetValue(str(self.threshold))
self.ignoreChannel = Data.Specs.channels2ignore
# Don't check ignored channels for thresholding
channel2Check = [i for i, e in enumerate(Data.labelsChannel)
if e not in self.ignoreChannel]
# Copy epoch values
epochs = np.copy(Data.epochs)
# Baseline Correction
if self.baselineCorr:
for e in epochs:
# if pre2zero is selected
if self.baselineCorr == 1:
baselineAvg = [[c] for c in np.mean(
e[:, self.preCut - self.preFrame:self.preCut], axis=1)]
# if pre2post is selected
elif self.baselineCorr == 2:
baselineAvg = [[c] for c in e.mean(axis=1)]
e -= baselineAvg
# Common parameters
self.matrixThreshold = np.zeros(
(epochs.shape[0], epochs.shape[1])).astype('bool')
self.matrixBlink = np.zeros(
(epochs.shape[0], epochs.shape[2])).astype('bool')
# Check Epochs for Threshold
if self.thresholdCorr:
# Create Progressbar for outlier detection
progressMax = epochs.shape[0]
dlg = wx.ProgressDialog(
"Outlier detection progress: Threshold",
"Time remaining to detect Threshold outliers", progressMax,
style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_SMOOTH)
# Go through all the epochs
for i, e_long in enumerate(epochs):
e_short = epochs[i][:,
self.preCut - self.preFrame:self.preCut +
self.postFrame]
# Check for Threshold outliers
if self.thresholdCorr:
badChannels = np.where(
((e_short > self.threshold) |
(e_short < -self.threshold)).mean(axis=1))[0]
badChannels = [b for b in badChannels
if b in channel2Check]
self.matrixThreshold[i][badChannels] = True
dlg.Update(i)
dlg.Destroy()
# Check Epochs for Blink
if self.blinkCorr:
# Create Progressbar for outlier detection
nChannels = len(Data.Datasets[0].rawdata)
progressMax = len(Data.Datasets) * nChannels
dlg = wx.ProgressDialog(
"Outlier detection progress: Blink",
"Time remaining to detect Blink outliers", progressMax,
style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_SMOOTH)
# Go through all datasets to detect blinks
for i, d in enumerate(Data.Datasets):
# Bandpass filter (1Hz - 10Hz) data to prepare for PCA
b, a = butter_bandpass_param(d.sampleRate,
highcut=1,
lowcut=10)
tmpFilename = splitext(d.filename)[0] + '.lineviewerTempData'
tmpDataset = np.memmap(tmpFilename, mode='w+', dtype='float32',
shape=d.rawdata.shape)
for t in range(nChannels):
tmpDataset[t] = filtfilt(b, a, d.rawdata[t])
dlg.Update(i * nChannels + t)
# Run PCA on first 25 components
pca = PCA(n_components=25)
pca.fit(tmpDataset)
# Detect blink component:
stdThresh = 4
outliersPos = ((np.transpose(pca.components_) -
pca.components_.mean(axis=1)) > stdThresh *
pca.components_.std(axis=1))
outliersNeg = ((np.transpose(pca.components_) -
pca.components_.mean(axis=1)) < -stdThresh *
pca.components_.std(axis=1))
outliersAbs = outliersPos + outliersNeg
outliersPerComp = outliersAbs.sum(axis=0)
blinkCompID = np.where(
outliersPerComp == outliersPerComp.max())[0]
# Check which blinks are in the epochs
blinkTimepoints = outliersAbs[:, blinkCompID].reshape(-1)
cutsIO = [True if m > self.preCut and m < tmpDataset.shape[1] -
self.postCut else False for m in d.markerTime]
blinkArray = np.array(
[blinkTimepoints[m - self.preCut:m + self.postCut]
for m in d.markerTime[np.where(cutsIO)]])
if i == 0:
self.matrixBlink = blinkArray
else:
self.matrixBlink = np.vstack(
(self.matrixBlink, blinkArray))
# Clean up of temporary files and variables
del tmpDataset
if exists(tmpFilename):
remove(tmpFilename)
dlg.Destroy()
# Connect all epochs and markers to self
self.epochs = epochs
self.markers = Data.markers
# Correct for selected outliers
if not hasattr(self, 'matrixSelected'):
self.matrixSelected = np.repeat('ok_normal', self.epochs.shape[0])
self.matrixSelected[np.where(
self.matrixThreshold.sum(axis=1))[0]] = 'threshold'
self.matrixSelected[np.where(
self.matrixBlink.sum(axis=1))[0]] = 'blink'
else:
# Check if new datasets were loaded
if self.matrixSelected.shape[0] < self.markers.shape[0]:
startID = self.matrixSelected.shape[0]
newLength = self.markers.shape[0] - startID
newSelectedMatrix = np.repeat('ok_normal', newLength)
newSelectedMatrix[np.where(self.matrixThreshold[
startID:].sum(axis=1))[0]] = 'threshold'
newSelectedMatrix[np.where(self.matrixBlink[
startID:].sum(axis=1))[0]] = 'blink'
self.matrixSelected = np.hstack([self.matrixSelected,
newSelectedMatrix])
# Correct if correction filters are on
if self.blinkCorr:
self.matrixSelected[
[i for i in np.where(self.matrixBlink.sum(axis=1))[0]
if self.matrixSelected[i] == 'ok_normal' or
self.matrixSelected[i] == 'threshold']] = 'blink'
else:
self.matrixSelected[
[i for i, bl in enumerate(self.matrixSelected)
if 'blink' in bl]] = 'ok_normal'
if self.thresholdCorr:
self.matrixSelected[
[i for i in np.where(self.matrixThreshold.sum(axis=1))[0]
if self.matrixSelected[i] == 'ok_normal']] = 'threshold'
# Make sure that channels are ignored, even in a already loaded dataset
if self.ignoreChannel != []:
id2threshold = np.where(self.matrixThreshold.sum(axis=1))[0]
idSelected = np.any([self.matrixSelected == 'threshold',
self.matrixSelected == 'blink'], axis=0)
id2Clean = [ic for ic in idSelected if ic not in id2threshold]
self.matrixSelected[id2Clean] = 'ok_normal'
# Correct if correction | |
+ 17*
m.b38*m.b89 + 18*m.b38*m.b92 + 19*m.b38*m.b95 + 20*m.b38*m.b98 + 21*m.b38*m.b101 + 22*m.b38*
m.b104 + 23*m.b38*m.b107 + 24*m.b38*m.b110 + 25*m.b38*m.b113 + 26*m.b38*m.b116 + 27*m.b38*m.b119
+ 28*m.b38*m.b122 + 29*m.b38*m.b125 + 30*m.b38*m.b128 + 31*m.b38*m.b131 + 32*m.b38*m.b134 + 33*
m.b38*m.b137 + 34*m.b38*m.b140 + 35*m.b38*m.b143 + 36*m.b38*m.b146 + 37*m.b38*m.b149 + m.b39*
m.b42 + 2*m.b39*m.b45 + 3*m.b39*m.b48 + 4*m.b39*m.b51 + 5*m.b39*m.b54 + 6*m.b39*m.b57 + 7*m.b39*
m.b60 + 8*m.b39*m.b63 + 9*m.b39*m.b66 + 10*m.b39*m.b69 + 11*m.b39*m.b72 + 12*m.b39*m.b75 + 13*
m.b39*m.b78 + 14*m.b39*m.b81 + 15*m.b39*m.b84 + 16*m.b39*m.b87 + 17*m.b39*m.b90 + 18*m.b39*m.b93
+ 19*m.b39*m.b96 + 20*m.b39*m.b99 + 21*m.b39*m.b102 + 22*m.b39*m.b105 + 23*m.b39*m.b108 + 24*
m.b39*m.b111 + 25*m.b39*m.b114 + 26*m.b39*m.b117 + 27*m.b39*m.b120 + 28*m.b39*m.b123 + 29*m.b39*
m.b126 + 30*m.b39*m.b129 + 31*m.b39*m.b132 + 32*m.b39*m.b135 + 33*m.b39*m.b138 + 34*m.b39*m.b141
+ 35*m.b39*m.b144 + 36*m.b39*m.b147 + 37*m.b39*m.b150 + m.b40*m.b43 + 2*m.b40*m.b46 + 3*m.b40*
m.b49 + 4*m.b40*m.b52 + 5*m.b40*m.b55 + 6*m.b40*m.b58 + 7*m.b40*m.b61 + 8*m.b40*m.b64 + 9*m.b40*
m.b67 + 10*m.b40*m.b70 + 11*m.b40*m.b73 + 12*m.b40*m.b76 + 13*m.b40*m.b79 + 14*m.b40*m.b82 + 15*
m.b40*m.b85 + 16*m.b40*m.b88 + 17*m.b40*m.b91 + 18*m.b40*m.b94 + 19*m.b40*m.b97 + 20*m.b40*m.b100
+ 21*m.b40*m.b103 + 22*m.b40*m.b106 + 23*m.b40*m.b109 + 24*m.b40*m.b112 + 25*m.b40*m.b115 + 26*
m.b40*m.b118 + 27*m.b40*m.b121 + 28*m.b40*m.b124 + 29*m.b40*m.b127 + 30*m.b40*m.b130 + 31*m.b40*
m.b133 + 32*m.b40*m.b136 + 33*m.b40*m.b139 + 34*m.b40*m.b142 + 35*m.b40*m.b145 + 36*m.b40*m.b148
+ m.b41*m.b44 + 2*m.b41*m.b47 + 3*m.b41*m.b50 + 4*m.b41*m.b53 + 5*m.b41*m.b56 + 6*m.b41*m.b59 +
7*m.b41*m.b62 + 8*m.b41*m.b65 + 9*m.b41*m.b68 + 10*m.b41*m.b71 + 11*m.b41*m.b74 + 12*m.b41*m.b77
+ 13*m.b41*m.b80 + 14*m.b41*m.b83 + 15*m.b41*m.b86 + 16*m.b41*m.b89 + 17*m.b41*m.b92 + 18*m.b41*
m.b95 + 19*m.b41*m.b98 + 20*m.b41*m.b101 + 21*m.b41*m.b104 + 22*m.b41*m.b107 + 23*m.b41*m.b110 +
24*m.b41*m.b113 + 25*m.b41*m.b116 + 26*m.b41*m.b119 + 27*m.b41*m.b122 + 28*m.b41*m.b125 + 29*
m.b41*m.b128 + 30*m.b41*m.b131 + 31*m.b41*m.b134 + 32*m.b41*m.b137 + 33*m.b41*m.b140 + 34*m.b41*
m.b143 + 35*m.b41*m.b146 + 36*m.b41*m.b149 + m.b42*m.b45 + 2*m.b42*m.b48 + 3*m.b42*m.b51 + 4*
m.b42*m.b54 + 5*m.b42*m.b57 + 6*m.b42*m.b60 + 7*m.b42*m.b63 + 8*m.b42*m.b66 + 9*m.b42*m.b69 + 10*
m.b42*m.b72 + 11*m.b42*m.b75 + 12*m.b42*m.b78 + 13*m.b42*m.b81 + 14*m.b42*m.b84 + 15*m.b42*m.b87
+ 16*m.b42*m.b90 + 17*m.b42*m.b93 + 18*m.b42*m.b96 + 19*m.b42*m.b99 + 20*m.b42*m.b102 + 21*m.b42
*m.b105 + 22*m.b42*m.b108 + 23*m.b42*m.b111 + 24*m.b42*m.b114 + 25*m.b42*m.b117 + 26*m.b42*m.b120
+ 27*m.b42*m.b123 + 28*m.b42*m.b126 + 29*m.b42*m.b129 + 30*m.b42*m.b132 + 31*m.b42*m.b135 + 32*
m.b42*m.b138 + 33*m.b42*m.b141 + 34*m.b42*m.b144 + 35*m.b42*m.b147 + 36*m.b42*m.b150 + m.b43*
m.b46 + 2*m.b43*m.b49 + 3*m.b43*m.b52 + 4*m.b43*m.b55 + 5*m.b43*m.b58 + 6*m.b43*m.b61 + 7*m.b43*
m.b64 + 8*m.b43*m.b67 + 9*m.b43*m.b70 + 10*m.b43*m.b73 + 11*m.b43*m.b76 + 12*m.b43*m.b79 + 13*
m.b43*m.b82 + 14*m.b43*m.b85 + 15*m.b43*m.b88 + 16*m.b43*m.b91 + 17*m.b43*m.b94 + 18*m.b43*m.b97
+ 19*m.b43*m.b100 + 20*m.b43*m.b103 + 21*m.b43*m.b106 + 22*m.b43*m.b109 + 23*m.b43*m.b112 + 24*
m.b43*m.b115 + 25*m.b43*m.b118 + 26*m.b43*m.b121 + 27*m.b43*m.b124 + 28*m.b43*m.b127 + 29*m.b43*
m.b130 + 30*m.b43*m.b133 + 31*m.b43*m.b136 + 32*m.b43*m.b139 + 33*m.b43*m.b142 + 34*m.b43*m.b145
+ 35*m.b43*m.b148 + m.b44*m.b47 + 2*m.b44*m.b50 + 3*m.b44*m.b53 + 4*m.b44*m.b56 + 5*m.b44*m.b59
+ 6*m.b44*m.b62 + 7*m.b44*m.b65 + 8*m.b44*m.b68 + 9*m.b44*m.b71 + 10*m.b44*m.b74 + 11*m.b44*
m.b77 + 12*m.b44*m.b80 + 13*m.b44*m.b83 + 14*m.b44*m.b86 + 15*m.b44*m.b89 + 16*m.b44*m.b92 + 17*
m.b44*m.b95 + 18*m.b44*m.b98 + 19*m.b44*m.b101 + 20*m.b44*m.b104 + 21*m.b44*m.b107 + 22*m.b44*
m.b110 + 23*m.b44*m.b113 + 24*m.b44*m.b116 + 25*m.b44*m.b119 + 26*m.b44*m.b122 + 27*m.b44*m.b125
+ 28*m.b44*m.b128 + 29*m.b44*m.b131 + 30*m.b44*m.b134 + 31*m.b44*m.b137 + 32*m.b44*m.b140 + 33*
m.b44*m.b143 + 34*m.b44*m.b146 + 35*m.b44*m.b149 + m.b45*m.b48 + 2*m.b45*m.b51 + 3*m.b45*m.b54 +
4*m.b45*m.b57 + 5*m.b45*m.b60 + 6*m.b45*m.b63 + 7*m.b45*m.b66 + 8*m.b45*m.b69 + 9*m.b45*m.b72 +
10*m.b45*m.b75 + 11*m.b45*m.b78 + 12*m.b45*m.b81 + 13*m.b45*m.b84 + 14*m.b45*m.b87 + 15*m.b45*
m.b90 + 16*m.b45*m.b93 + 17*m.b45*m.b96 + 18*m.b45*m.b99 + 19*m.b45*m.b102 + 20*m.b45*m.b105 + 21
*m.b45*m.b108 + 22*m.b45*m.b111 + 23*m.b45*m.b114 + 24*m.b45*m.b117 + 25*m.b45*m.b120 + 26*m.b45*
m.b123 + 27*m.b45*m.b126 + 28*m.b45*m.b129 + 29*m.b45*m.b132 + 30*m.b45*m.b135 + 31*m.b45*m.b138
+ 32*m.b45*m.b141 + 33*m.b45*m.b144 + 34*m.b45*m.b147 + 35*m.b45*m.b150 + m.b46*m.b49 + 2*m.b46*
m.b52 + 3*m.b46*m.b55 + 4*m.b46*m.b58 + 5*m.b46*m.b61 + 6*m.b46*m.b64 + 7*m.b46*m.b67 + 8*m.b46*
m.b70 + 9*m.b46*m.b73 + 10*m.b46*m.b76 + 11*m.b46*m.b79 + 12*m.b46*m.b82 + 13*m.b46*m.b85 + 14*
m.b46*m.b88 + 15*m.b46*m.b91 + 16*m.b46*m.b94 + 17*m.b46*m.b97 + 18*m.b46*m.b100 + 19*m.b46*
m.b103 + 20*m.b46*m.b106 + 21*m.b46*m.b109 + 22*m.b46*m.b112 + 23*m.b46*m.b115 + 24*m.b46*m.b118
+ 25*m.b46*m.b121 + 26*m.b46*m.b124 + 27*m.b46*m.b127 + 28*m.b46*m.b130 + 29*m.b46*m.b133 + 30*
m.b46*m.b136 + 31*m.b46*m.b139 + 32*m.b46*m.b142 + 33*m.b46*m.b145 + 34*m.b46*m.b148 + m.b47*
m.b50 + 2*m.b47*m.b53 + 3*m.b47*m.b56 + 4*m.b47*m.b59 + 5*m.b47*m.b62 + 6*m.b47*m.b65 + 7*m.b47*
m.b68 + 8*m.b47*m.b71 + 9*m.b47*m.b74 + 10*m.b47*m.b77 + 11*m.b47*m.b80 + 12*m.b47*m.b83 + 13*
m.b47*m.b86 + 14*m.b47*m.b89 + 15*m.b47*m.b92 + 16*m.b47*m.b95 + 17*m.b47*m.b98 + 18*m.b47*m.b101
+ 19*m.b47*m.b104 + 20*m.b47*m.b107 + 21*m.b47*m.b110 + 22*m.b47*m.b113 + 23*m.b47*m.b116 + 24*
m.b47*m.b119 + 25*m.b47*m.b122 + 26*m.b47*m.b125 + 27*m.b47*m.b128 + 28*m.b47*m.b131 + 29*m.b47*
m.b134 + 30*m.b47*m.b137 + 31*m.b47*m.b140 + 32*m.b47*m.b143 + 33*m.b47*m.b146 + 34*m.b47*m.b149
+ m.b48*m.b51 + 2*m.b48*m.b54 + 3*m.b48*m.b57 + 4*m.b48*m.b60 + 5*m.b48*m.b63 + 6*m.b48*m.b66 +
7*m.b48*m.b69 + 8*m.b48*m.b72 + 9*m.b48*m.b75 + 10*m.b48*m.b78 + 11*m.b48*m.b81 + 12*m.b48*m.b84
+ 13*m.b48*m.b87 + 14*m.b48*m.b90 + 15*m.b48*m.b93 + 16*m.b48*m.b96 + 17*m.b48*m.b99 + 18*m.b48*
m.b102 + 19*m.b48*m.b105 + 20*m.b48*m.b108 + 21*m.b48*m.b111 + 22*m.b48*m.b114 + 23*m.b48*m.b117
+ 24*m.b48*m.b120 + 25*m.b48*m.b123 + 26*m.b48*m.b126 + 27*m.b48*m.b129 + 28*m.b48*m.b132 + 29*
m.b48*m.b135 + 30*m.b48*m.b138 + 31*m.b48*m.b141 + 32*m.b48*m.b144 + 33*m.b48*m.b147 + 34*m.b48*
m.b150 + m.b49*m.b52 + 2*m.b49*m.b55 + 3*m.b49*m.b58 + 4*m.b49*m.b61 + 5*m.b49*m.b64 + 6*m.b49*
m.b67 + 7*m.b49*m.b70 + 8*m.b49*m.b73 + 9*m.b49*m.b76 + 10*m.b49*m.b79 + 11*m.b49*m.b82 + 12*
m.b49*m.b85 + 13*m.b49*m.b88 + 14*m.b49*m.b91 + 15*m.b49*m.b94 + 16*m.b49*m.b97 + 17*m.b49*m.b100
+ 18*m.b49*m.b103 + 19*m.b49*m.b106 + 20*m.b49*m.b109 + 21*m.b49*m.b112 + 22*m.b49*m.b115 + 23*
m.b49*m.b118 + 24*m.b49*m.b121 + 25*m.b49*m.b124 + 26*m.b49*m.b127 + 27*m.b49*m.b130 + 28*m.b49*
m.b133 + 29*m.b49*m.b136 + 30*m.b49*m.b139 + 31*m.b49*m.b142 + 32*m.b49*m.b145 + 33*m.b49*m.b148
+ m.b50*m.b53 + 2*m.b50*m.b56 + 3*m.b50*m.b59 + 4*m.b50*m.b62 + 5*m.b50*m.b65 + 6*m.b50*m.b68 +
7*m.b50*m.b71 + 8*m.b50*m.b74 + 9*m.b50*m.b77 + 10*m.b50*m.b80 + 11*m.b50*m.b83 + 12*m.b50*m.b86
+ 13*m.b50*m.b89 + 14*m.b50*m.b92 + 15*m.b50*m.b95 + 16*m.b50*m.b98 + 17*m.b50*m.b101 + 18*m.b50
*m.b104 + 19*m.b50*m.b107 + 20*m.b50*m.b110 + 21*m.b50*m.b113 + 22*m.b50*m.b116 + 23*m.b50*m.b119
+ 24*m.b50*m.b122 + 25*m.b50*m.b125 + 26*m.b50*m.b128 + 27*m.b50*m.b131 + 28*m.b50*m.b134 + 29*
m.b50*m.b137 + 30*m.b50*m.b140 + 31*m.b50*m.b143 + 32*m.b50*m.b146 + 33*m.b50*m.b149 + m.b51*
m.b54 + 2*m.b51*m.b57 + 3*m.b51*m.b60 + 4*m.b51*m.b63 + 5*m.b51*m.b66 + 6*m.b51*m.b69 + 7*m.b51*
m.b72 + 8*m.b51*m.b75 + 9*m.b51*m.b78 + 10*m.b51*m.b81 + 11*m.b51*m.b84 + 12*m.b51*m.b87 + 13*
m.b51*m.b90 + 14*m.b51*m.b93 + 15*m.b51*m.b96 + 16*m.b51*m.b99 + 17*m.b51*m.b102 + 18*m.b51*
m.b105 + 19*m.b51*m.b108 + 20*m.b51*m.b111 + 21*m.b51*m.b114 + 22*m.b51*m.b117 + 23*m.b51*m.b120
+ 24*m.b51*m.b123 + 25*m.b51*m.b126 + 26*m.b51*m.b129 + 27*m.b51*m.b132 + 28*m.b51*m.b135 + 29*
m.b51*m.b138 + 30*m.b51*m.b141 + 31*m.b51*m.b144 + 32*m.b51*m.b147 + 33*m.b51*m.b150 + m.b52*
m.b55 + 2*m.b52*m.b58 + 3*m.b52*m.b61 + 4*m.b52*m.b64 + 5*m.b52*m.b67 + 6*m.b52*m.b70 + 7*m.b52*
m.b73 + 8*m.b52*m.b76 + 9*m.b52*m.b79 + 10*m.b52*m.b82 + 11*m.b52*m.b85 + 12*m.b52*m.b88 + 13*
m.b52*m.b91 + 14*m.b52*m.b94 + 15*m.b52*m.b97 + 16*m.b52*m.b100 + 17*m.b52*m.b103 + 18*m.b52*
m.b106 + 19*m.b52*m.b109 + 20*m.b52*m.b112 + 21*m.b52*m.b115 + 22*m.b52*m.b118 + 23*m.b52*m.b121
+ 24*m.b52*m.b124 + 25*m.b52*m.b127 + 26*m.b52*m.b130 + 27*m.b52*m.b133 + 28*m.b52*m.b136 + 29*
m.b52*m.b139 + 30*m.b52*m.b142 + 31*m.b52*m.b145 + 32*m.b52*m.b148 + m.b53*m.b56 + 2*m.b53*m.b59
+ 3*m.b53*m.b62 + 4*m.b53*m.b65 + 5*m.b53*m.b68 + 6*m.b53*m.b71 + 7*m.b53*m.b74 + 8*m.b53*m.b77
+ 9*m.b53*m.b80 + 10*m.b53*m.b83 + 11*m.b53*m.b86 + 12*m.b53*m.b89 + 13*m.b53*m.b92 + 14*m.b53*
m.b95 + 15*m.b53*m.b98 + 16*m.b53*m.b101 + 17*m.b53*m.b104 + 18*m.b53*m.b107 + 19*m.b53*m.b110 +
20*m.b53*m.b113 + 21*m.b53*m.b116 + 22*m.b53*m.b119 + 23*m.b53*m.b122 + 24*m.b53*m.b125 + 25*
m.b53*m.b128 + 26*m.b53*m.b131 + 27*m.b53*m.b134 + 28*m.b53*m.b137 + 29*m.b53*m.b140 + 30*m.b53*
m.b143 + 31*m.b53*m.b146 + 32*m.b53*m.b149 + m.b54*m.b57 + 2*m.b54*m.b60 + 3*m.b54*m.b63 + 4*
m.b54*m.b66 + 5*m.b54*m.b69 + 6*m.b54*m.b72 + 7*m.b54*m.b75 + 8*m.b54*m.b78 + 9*m.b54*m.b81 + 10*
m.b54*m.b84 + 11*m.b54*m.b87 + 12*m.b54*m.b90 + 13*m.b54*m.b93 + 14*m.b54*m.b96 + 15*m.b54*m.b99
+ 16*m.b54*m.b102 + 17*m.b54*m.b105 + 18*m.b54*m.b108 + 19*m.b54*m.b111 + 20*m.b54*m.b114 + 21*
m.b54*m.b117 + 22*m.b54*m.b120 + 23*m.b54*m.b123 + 24*m.b54*m.b126 + 25*m.b54*m.b129 + 26*m.b54*
m.b132 + 27*m.b54*m.b135 + 28*m.b54*m.b138 + 29*m.b54*m.b141 + 30*m.b54*m.b144 + 31*m.b54*m.b147
+ 32*m.b54*m.b150 + m.b55*m.b58 + 2*m.b55*m.b61 + 3*m.b55*m.b64 + 4*m.b55*m.b67 + 5*m.b55*m.b70
+ 6*m.b55*m.b73 + 7*m.b55*m.b76 + 8*m.b55*m.b79 + 9*m.b55*m.b82 + 10*m.b55*m.b85 + 11*m.b55*
m.b88 + 12*m.b55*m.b91 + 13*m.b55*m.b94 + 14*m.b55*m.b97 + 15*m.b55*m.b100 + 16*m.b55*m.b103 + 17
*m.b55*m.b106 + 18*m.b55*m.b109 + 19*m.b55*m.b112 + 20*m.b55*m.b115 + 21*m.b55*m.b118 + 22*m.b55*
m.b121 + 23*m.b55*m.b124 + 24*m.b55*m.b127 + 25*m.b55*m.b130 + 26*m.b55*m.b133 + 27*m.b55*m.b136
+ 28*m.b55*m.b139 + 29*m.b55*m.b142 + 30*m.b55*m.b145 + 31*m.b55*m.b148 + m.b56*m.b59 + 2*m.b56*
| |
RDF()
editor = rdf.to_editor("<insert xml here>")
energy_diff = editor.new_energy_diff()
energy_diff \
.about("reaction0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00237") \
.add_source("species0000", eUriType.MODEL_URI, 1) \
.add_sink("species0001", eUriType.MODEL_URI, 1)
editor.add_energy_diff(energy_diff)
See Also:
:class:`EnergyDiff`
Returns: None
"""
return _pyom.editor_add_energy_diff(self._obj, energy_diff.get_ptr())
@propagate_omexmeta_error
def add_personal_information(self, personal_information: PersonalInformation) -> None:
"""Adds a PersonalInformation to the relevant RDF graph (the one that created
this :class:`Editor`).
Users do not normally call this method themselves, because the preferred
user interface is to use the context manager for :class:`PersonalInformation`.
If the context manager for :class:`PersonalInformation` is not used to
create the :class:`PersonalInformation` object, then this method must be called
to add the :class:`PersonalInformation` object to the relevant :class:`RDF` object
See Examples.
Args:
personal_information: An instance of :class:`PersonalInformation` to add to the model
.. code-block: python
:linenos:
# Users should do the following. This implicitly calls the
# :meth:`Editor.add_personal_information` after the `with` block has finished.
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
with editor.new_personal_information() as personal_information:
personal_information.add_creator("1234-1234-1234-1234") \
.add_name("Ciaran") \
.add_mbox("<EMAIL>") \
.add_account_name("1234-1234-1234-1234") \
.add_account_service_homepage("https://github.com/sys-bio/libomexmeta")
# If the context manager is not used, you must manually call :meth:`Editor.add_personal_information`
rdf = RDF()
editor = rdf.to_editor("<insert xml here>")
personal_information = editor.new_personal_information()
personal_information \
.about("reaction0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00237") \
.add_source("species0000", eUriType.MODEL_URI, 1) \
.add_sink("species0001", eUriType.MODEL_URI, 1)
editor.add_personal_information(personal_information)
See Also:
:class:`PersonalInformation`
Returns: None
"""
return _pyom.editor_add_personal_information(self._obj, personal_information.get_ptr())
@propagate_omexmeta_error
def add_physical_property(self, property: PhysicalProperty) -> None:
"""Adds a PhysicalProperty to the relevant RDF graph (the one that created
this :class:`Editor`).
Composite annotations usually create their own :class:`PhysicalProperty` but this method
gives users the option to add one manually.
Users do not normally call this method themselves, because the preferred
user interface is to use the context manager for :class:`PhysicalProperty`.
If the context manager for :class:`PhysicalProperty` is not used to
create the :class:`PhysicalProperty` object, then this method must be called
to add the :class:`PhysicalProperty` object to the relevant :class:`RDF` object
See Examples.
Args:
property: An instance of :class:`PhysicalProperty` to add to the model
.. code-block: python
:linenos:
# Users should do the following. This implicitly calls the
# :meth:`Editor.add_personal_information` after the `with` block has finished.
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
property = editor.new_physical_property()
property.about("EntityProperty", eUriType.LOCAL_URI) \
.is_version_of("opb:OPB_12345") \
.is_property_of("species0001", eUriType.MODEL_URI)
with editor.new_physical_entity() as physical_entity:
physical_entity.about("species0001", eUriType.MODEL_URI) \
.identity("uniprot:PD12345") \
.is_part_of("fma:1234") \
.has_property(property=property)
# Or to add the property outside of a composite annotation
editor.add_physical_property(property)
See Also:
:class:`PhysicalProperty`
Returns: None
"""
return _pyom.editor_add_physical_property(self._obj, property.get_ptr())
@propagate_omexmeta_error
def check_valid_metaid(self, id: str) -> None:
"""Convenience method for checking whether the metaid `id` is valid for this RDF graph"""
return _pyom.editor_check_valid_metaid(self._obj, id)
def get_metaids(self) -> List[str]:
"""Return a list of available metaids for current xml model"""
num_ids = _pyom.editor_get_num_metaids(self._obj)
propagate_omexmeta_error(num_ids)
return [_pyom.get_and_free_c_str(
propagate_omexmeta_error(_pyom.editor_get_metaid(self._obj, id))
) for id in range(num_ids)]
@propagate_omexmeta_error
def remove_single_annotation(self, single_annotaiton_ptr: ct.c_int64) -> None:
"""Remove a :class:`SingularAnnotation` from the RDF graph. Does nothing if not exists"""
return _pyom.editor_remove_single_annotation(self._obj, single_annotaiton_ptr)
@propagate_omexmeta_error
def remove_physical_entity(self, physical_entity_ptr: ct.c_int64) -> None:
"""Remove a :class:`PhysicalEntity` from the RDF graph. Does nothing if not exists"""
return _pyom.editor_remove_physical_entity(self._obj, physical_entity_ptr)
@propagate_omexmeta_error
def remove_physical_process(self, physical_process_ptr: ct.c_int64) -> None:
"""Remove a :class:`PhysicalProcess` from the RDF graph. Does nothing if not exists"""
return _pyom.editor_remove_physical_process(self._obj, physical_process_ptr)
@propagate_omexmeta_error
def remove_energy_diff(self, energy_diff_ptr: ct.c_int64) -> None:
"""Remove a :class:`EnergyDiff` from the RDF graph. Does nothing if not exists"""
return _pyom.editor_remove_energy_diff(self._obj, energy_diff_ptr)
@propagate_omexmeta_error
def remove_personal_information(self, personal_information_ptr: ct.c_int64) -> None:
"""Remove a :class:`PersonalInformation` from the RDF graph. Does nothing if not exists"""
return _pyom.editor_remove_personal_information(self._obj, personal_information_ptr)
def get_xml(self) -> str:
"""Returns the xml currently being edited by this :class:`Editor`"""
return _pyom.get_and_free_c_str(
propagate_omexmeta_error(_pyom.editor_get_xml(self._obj))
)
@contextmanager
def new_singular_annotation(self) -> SingularAnnotation:
"""Create a new :class:`SingularAnnotation` object.
This is a context manager, i.e. designed to be used inside a `with` block.
Doing so, automatically adds this :class:`SingularAnnotation` to the relevant
:class:`RDF` object. Use without a context manager requires users to manually
add the :class:`SingularAnnotation` to the :class:`RDF`
using :meth:`add_singular_annotation`
.. code-block: python
:linenos:
rdf = RDF()
editor = rdf.to_editor("insert xml here")
with editor.new_singular_annotation() as singular_annotation:
singular_annotation.about("SmadNuclearTransport") \
.predicate_from_uri("http://CaptainPredicate.org")\
.resource_literal("Literally a resource")
See Also:
:class:`SingularAnnotation`
Returns:
:class:`SingularAnnotation`
"""
obj = _pyom.editor_new_singular_annotation(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
singular_annotation = SingularAnnotation(obj)
try:
yield singular_annotation
finally:
self.add_singular_annotation(singular_annotation)
@contextmanager
def new_personal_information(self) -> PersonalInformation:
"""Create a new :class:`PersonalInformation` object.
This is a context manager, i.e. designed to be used inside a `with` block.
Doing so, automatically adds this :class:`PersonalInformation` to the relevant
:class:`RDF` object. Use without a context manager requires users to manually
add the :class:`PersonalInformation` to the :class:`RDF`
using :meth:`add_personal_information`
.. code-block: python
:linenos:
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
with editor.new_personal_information() as personal_information:
personal_information.add_creator("1234-1234-1234-1234") \
.add_name("Ciaran") \
.add_mbox("<EMAIL>") \
.add_account_name("1234-1234-1234-1234") \
.add_account_service_homepage("https://github.com/sys-bio/libomexmeta")
See Also:
:class:`PersonalInformation`
Returns:
:class:`PersonalInformation`
"""
obj = _pyom.editor_new_personal_information(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
information = PersonalInformation(obj)
try:
yield information
finally:
self.add_personal_information(information)
@contextmanager
def new_physical_entity(self) -> PhysicalEntity:
"""Create a new :class:`PhysicalEntity` object.
This is a context manager, i.e. designed to be used inside a `with` block.
Doing so, automatically adds this :class:`PhysicalEntity` to the relevant
:class:`RDF` object. Use without a context manager requires users to manually
add the :class:`PhysicalEntity` to the :class:`RDF`
using :meth:`add_physical_entity`
.. code-block: python
:linenos:
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
with editor.new_physical_entity() as physical_entity:
physical_entity \
.about("species0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00340") \
.identity("uniprot:P84022") \
.is_part_of("obo/FMA_7163") \
.is_part_of("obo/FMA_264020")
See Also:
:class:`PhysicalEntity`
Returns:
:class:`PhysicalEntity`
"""
obj = _pyom.editor_new_physical_entity(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
physical_entity = PhysicalEntity(obj)
try:
yield physical_entity
finally:
self.add_physical_entity(physical_entity)
@contextmanager
def new_physical_process(self) -> PhysicalProcess:
"""Create a new :class:`PhysicalProcess` object.
This is a context manager, i.e. designed to be used inside a `with` block.
Doing so, automatically adds this :class:`PhysicalProcess` to the relevant
:class:`RDF` object. Use without a context manager requires users to manually
add the :class:`PhysicalProcess` to the :class:`RDF`
using :meth:`add_physical_process`
.. code-block: python
:linenos:
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
with editor.new_physical_process() as physical_process:
physical_process \
.about("reaction0000", eUriType.MODEL_URI) \
.has_property(is_version_of="OPB:OPB_00237") \
.add_source("species0000", eUriType.MODEL_URI, 1) \
.add_sink("species0001", eUriType.MODEL_URI, 1)
See Also:
:class:`PhysicalProcess`
Returns:
:class:`PhysicalProcess`
"""
obj = _pyom.editor_new_physical_process(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
physical_process = PhysicalProcess(obj)
try:
yield physical_process
finally:
self.add_physical_process(physical_process)
@contextmanager
def new_energy_diff(self) -> EnergyDiff:
"""Create a new :class:`EnergyDiff` object.
This is a context manager, i.e. designed to be used inside a `with` block.
Doing so, automatically adds this :class:`EnergyDiff` to the relevant
:class:`RDF` object. Use without a context manager requires users to manually
add the :class:`EnergyDiff` to the :class:`RDF`
using :meth:`add_energy_diff`
.. code-block: python
:linenos:
rdf = RDF()
editor = rdf.to_editor("<insert sbml here>")
with editor.new_energy_diff() as energy_diff:
energy_diff.about("reaction0000", eUriType.MODEL_URI) \
.add_source("species0000", eUriType.MODEL_URI) \
.add_sink("species0001", eUriType.MODEL_URI) \
.has_property("localParameter0000", eUriType.LOCAL_URI, "opb:OPB_01058")
See Also:
:class:`EnergyDiff`
Returns:
:class:`EnergyDiff`
"""
obj = _pyom.editor_new_energy_diff(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
energy_diff = EnergyDiff(obj)
try:
yield energy_diff
finally:
self.add_energy_diff(energy_diff)
def new_physical_property(self) -> PhysicalProperty:
"""Create a new :class:`EnergyDiff` object """
obj = _pyom.editor_new_physical_property(self._obj)
if obj is None:
raise OmexMetaException(_pyom.get_last_error())
return PhysicalProperty(obj)
def delete(self):
"""clean up resources used by this :class:`Editor` object"""
return _pyom.editor_delete(self._obj)
@propagate_omexmeta_error
def add_creator(self, creator) -> Editor:
"""Add model level annotation "creator" to the rdf graph"""
self._obj = _pyom.editor_add_creator(self._obj, creator.encode())
propagate_omexmeta_error(self._obj)
return self
def add_curator(self, curator) -> Editor:
"""Add model level annotation "curator" to the rdf graph"""
self._obj = _pyom.editor_add_contributor(self._obj, curator.encode())
propagate_omexmeta_error(self._obj)
return self
def add_taxon(self, taxon) -> Editor:
"""Add model level annotation "taxon" to the rdf graph"""
self._obj = _pyom.editor_add_taxon(self._obj, taxon.encode())
propagate_omexmeta_error(self._obj)
return self
def add_pubmed(self, pubmed) -> Editor:
"""Add model level annotation "pubmed" to the rdf graph"""
self._obj = _pyom.editor_add_pubmed(self._obj, pubmed.encode())
propagate_omexmeta_error(self._obj)
return self
def add_description(self, description) -> Editor:
"""Add model level annotation "description" to the rdf graph"""
self._obj = _pyom.editor_add_description(self._obj, description.encode())
propagate_omexmeta_error(self._obj)
return self
def add_date_created(self, date_created) -> Editor:
"""Add model level annotation "date created" to the rdf graph"""
self._obj = _pyom.editor_add_date_created(self._obj, date_created.encode())
propagate_omexmeta_error(self._obj)
return self
def add_parent_model(self, parent_model) -> Editor:
"""Add model level annotation "parent model" to the rdf graph"""
self._obj = _pyom.editor_add_parent_model(self._obj, parent_model.encode())
propagate_omexmeta_error(self._obj)
return self
def strip_annotations(self, annotationElementName: str = "annotation") -> str:
xml = _pyom.get_and_free_c_str(_pyom.editor_strip_annotations(self._obj, annotationElementName.encode()))
propagate_omexmeta_error(self._obj)
return xml
class SingularAnnotation:
"""Interface for handling | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import sys
if sys.version_info < (3, 7):
raise ImportError("This module requires Python 3.7 or later.")
_lazy_type_to_package_map = {
# Message types
"AdAssetPolicySummary": "google.ads.googleads.v7.common.types.asset_policy",
"AddressInfo": "google.ads.googleads.v7.common.types.criteria",
"AdImageAsset": "google.ads.googleads.v7.common.types.ad_asset",
"AdMediaBundleAsset": "google.ads.googleads.v7.common.types.ad_asset",
"AdScheduleInfo": "google.ads.googleads.v7.common.types.criteria",
"AdTextAsset": "google.ads.googleads.v7.common.types.ad_asset",
"AdVideoAsset": "google.ads.googleads.v7.common.types.ad_asset",
"AffiliateLocationFeedItem": "google.ads.googleads.v7.common.types.extensions",
"AgeRangeInfo": "google.ads.googleads.v7.common.types.criteria",
"AppAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"AppEngagementAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"AppFeedItem": "google.ads.googleads.v7.common.types.extensions",
"AppPaymentModelInfo": "google.ads.googleads.v7.common.types.criteria",
"BasicUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"BidModifierSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"BidModifierSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"BookOnGoogleAsset": "google.ads.googleads.v7.common.types.asset_types",
"BudgetCampaignAssociationStatus": "google.ads.googleads.v7.common.types.segments",
"BudgetSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"BudgetSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"BusinessNameFilter": "google.ads.googleads.v7.common.types.feed_item_set_filter_type_infos",
"CallFeedItem": "google.ads.googleads.v7.common.types.extensions",
"CallOnlyAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"CalloutAsset": "google.ads.googleads.v7.common.types.asset_types",
"CalloutFeedItem": "google.ads.googleads.v7.common.types.extensions",
"CarrierInfo": "google.ads.googleads.v7.common.types.criteria",
"ClickLocation": "google.ads.googleads.v7.common.types.click_location",
"CombinedAudienceInfo": "google.ads.googleads.v7.common.types.criteria",
"CombinedRuleUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"Commission": "google.ads.googleads.v7.common.types.bidding",
"ConceptGroup": "google.ads.googleads.v7.common.types.keyword_plan_common",
"ContentLabelInfo": "google.ads.googleads.v7.common.types.criteria",
"CpcBidSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"CpcBidSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"CpvBidSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"CpvBidSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"CriterionCategoryAvailability": "google.ads.googleads.v7.common.types.criterion_category_availability",
"CriterionCategoryChannelAvailability": "google.ads.googleads.v7.common.types.criterion_category_availability",
"CriterionCategoryLocaleAvailability": "google.ads.googleads.v7.common.types.criterion_category_availability",
"CrmBasedUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"CustomAffinityInfo": "google.ads.googleads.v7.common.types.criteria",
"CustomAudienceInfo": "google.ads.googleads.v7.common.types.criteria",
"CustomerMatchUserListMetadata": "google.ads.googleads.v7.common.types.offline_user_data",
"CustomIntentInfo": "google.ads.googleads.v7.common.types.criteria",
"CustomParameter": "google.ads.googleads.v7.common.types.custom_parameter",
"DateRange": "google.ads.googleads.v7.common.types.dates",
"DateSpecificRuleUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"DeviceInfo": "google.ads.googleads.v7.common.types.criteria",
"DisplayCallToAction": "google.ads.googleads.v7.common.types.ad_type_infos",
"DisplayUploadAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"DynamicAffiliateLocationSetFilter": "google.ads.googleads.v7.common.types.feed_item_set_filter_type_infos",
"DynamicLocationSetFilter": "google.ads.googleads.v7.common.types.feed_item_set_filter_type_infos",
"EnhancedCpc": "google.ads.googleads.v7.common.types.bidding",
"ExpandedDynamicSearchAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ExpandedTextAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ExplorerAutoOptimizerSetting": "google.ads.googleads.v7.common.types.explorer_auto_optimizer_setting",
"ExpressionRuleUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"FinalAppUrl": "google.ads.googleads.v7.common.types.final_app_url",
"FrequencyCapEntry": "google.ads.googleads.v7.common.types.frequency_cap",
"FrequencyCapKey": "google.ads.googleads.v7.common.types.frequency_cap",
"GenderInfo": "google.ads.googleads.v7.common.types.criteria",
"GeoPointInfo": "google.ads.googleads.v7.common.types.criteria",
"GmailAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"GmailTeaser": "google.ads.googleads.v7.common.types.ad_type_infos",
"HistoricalMetricsOptions": "google.ads.googleads.v7.common.types.keyword_plan_common",
"HotelAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"HotelAdvanceBookingWindowInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelCalloutFeedItem": "google.ads.googleads.v7.common.types.extensions",
"HotelCheckInDateRangeInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelCheckInDayInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelCityInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelClassInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelCountryRegionInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelDateSelectionTypeInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelIdInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelLengthOfStayInfo": "google.ads.googleads.v7.common.types.criteria",
"HotelStateInfo": "google.ads.googleads.v7.common.types.criteria",
"ImageAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ImageAsset": "google.ads.googleads.v7.common.types.asset_types",
"ImageDimension": "google.ads.googleads.v7.common.types.asset_types",
"ImageFeedItem": "google.ads.googleads.v7.common.types.extensions",
"IncomeRangeInfo": "google.ads.googleads.v7.common.types.criteria",
"InteractionTypeInfo": "google.ads.googleads.v7.common.types.criteria",
"IpBlockInfo": "google.ads.googleads.v7.common.types.criteria",
"Keyword": "google.ads.googleads.v7.common.types.segments",
"KeywordAnnotations": "google.ads.googleads.v7.common.types.keyword_plan_common",
"KeywordConcept": "google.ads.googleads.v7.common.types.keyword_plan_common",
"KeywordInfo": "google.ads.googleads.v7.common.types.criteria",
"KeywordPlanAggregateMetricResults": "google.ads.googleads.v7.common.types.keyword_plan_common",
"KeywordPlanAggregateMetrics": "google.ads.googleads.v7.common.types.keyword_plan_common",
"KeywordPlanDeviceSearches": "google.ads.googleads.v7.common.types.keyword_plan_common",
"KeywordPlanHistoricalMetrics": "google.ads.googleads.v7.common.types.keyword_plan_common",
"LanguageInfo": "google.ads.googleads.v7.common.types.criteria",
"LeadFormAsset": "google.ads.googleads.v7.common.types.asset_types",
"LeadFormDeliveryMethod": "google.ads.googleads.v7.common.types.asset_types",
"LeadFormField": "google.ads.googleads.v7.common.types.asset_types",
"LeadFormSingleChoiceAnswers": "google.ads.googleads.v7.common.types.asset_types",
"LegacyAppInstallAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"LegacyResponsiveDisplayAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ListingDimensionInfo": "google.ads.googleads.v7.common.types.criteria",
"ListingGroupInfo": "google.ads.googleads.v7.common.types.criteria",
"ListingScopeInfo": "google.ads.googleads.v7.common.types.criteria",
"LocalAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"LocationFeedItem": "google.ads.googleads.v7.common.types.extensions",
"LocationGroupInfo": "google.ads.googleads.v7.common.types.criteria",
"LocationInfo": "google.ads.googleads.v7.common.types.criteria",
"LogicalUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"LogicalUserListOperandInfo": "google.ads.googleads.v7.common.types.user_lists",
"ManualCpc": "google.ads.googleads.v7.common.types.bidding",
"ManualCpm": "google.ads.googleads.v7.common.types.bidding",
"ManualCpv": "google.ads.googleads.v7.common.types.bidding",
"MatchingFunction": "google.ads.googleads.v7.common.types.matching_function",
"MaximizeConversions": "google.ads.googleads.v7.common.types.bidding",
"MaximizeConversionValue": "google.ads.googleads.v7.common.types.bidding",
"MediaBundleAsset": "google.ads.googleads.v7.common.types.asset_types",
"Metrics": "google.ads.googleads.v7.common.types.metrics",
"MobileAppCategoryInfo": "google.ads.googleads.v7.common.types.criteria",
"MobileApplicationInfo": "google.ads.googleads.v7.common.types.criteria",
"MobileDeviceInfo": "google.ads.googleads.v7.common.types.criteria",
"Money": "google.ads.googleads.v7.common.types.feed_common",
"MonthlySearchVolume": "google.ads.googleads.v7.common.types.keyword_plan_common",
"OfflineUserAddressInfo": "google.ads.googleads.v7.common.types.offline_user_data",
"Operand": "google.ads.googleads.v7.common.types.matching_function",
"OperatingSystemVersionInfo": "google.ads.googleads.v7.common.types.criteria",
"ParentalStatusInfo": "google.ads.googleads.v7.common.types.criteria",
"PercentCpc": "google.ads.googleads.v7.common.types.bidding",
"PercentCpcBidSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"PercentCpcBidSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"PlacementInfo": "google.ads.googleads.v7.common.types.criteria",
"PolicyTopicConstraint": "google.ads.googleads.v7.common.types.policy",
"PolicyTopicEntry": "google.ads.googleads.v7.common.types.policy",
"PolicyTopicEvidence": "google.ads.googleads.v7.common.types.policy",
"PolicyValidationParameter": "google.ads.googleads.v7.common.types.policy",
"PolicyViolationKey": "google.ads.googleads.v7.common.types.policy",
"PreferredContentInfo": "google.ads.googleads.v7.common.types.criteria",
"PriceFeedItem": "google.ads.googleads.v7.common.types.extensions",
"PriceOffer": "google.ads.googleads.v7.common.types.extensions",
"ProductBiddingCategoryInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductBrandInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductChannelExclusivityInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductChannelInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductConditionInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductCustomAttributeInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductImage": "google.ads.googleads.v7.common.types.ad_type_infos",
"ProductItemIdInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductTypeInfo": "google.ads.googleads.v7.common.types.criteria",
"ProductVideo": "google.ads.googleads.v7.common.types.ad_type_infos",
"PromotionAsset": "google.ads.googleads.v7.common.types.asset_types",
"PromotionFeedItem": "google.ads.googleads.v7.common.types.extensions",
"ProximityInfo": "google.ads.googleads.v7.common.types.criteria",
"RealTimeBiddingSetting": "google.ads.googleads.v7.common.types.real_time_bidding_setting",
"ResponsiveDisplayAdControlSpec": "google.ads.googleads.v7.common.types.ad_type_infos",
"ResponsiveDisplayAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ResponsiveSearchAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"RuleBasedUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"Segments": "google.ads.googleads.v7.common.types.segments",
"ShoppingComparisonListingAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ShoppingProductAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"ShoppingSmartAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"SimilarUserListInfo": "google.ads.googleads.v7.common.types.user_lists",
"SitelinkAsset": "google.ads.googleads.v7.common.types.asset_types",
"SitelinkFeedItem": "google.ads.googleads.v7.common.types.extensions",
"StoreAttribute": "google.ads.googleads.v7.common.types.offline_user_data",
"StoreSalesMetadata": "google.ads.googleads.v7.common.types.offline_user_data",
"StoreSalesThirdPartyMetadata": "google.ads.googleads.v7.common.types.offline_user_data",
"StructuredSnippetAsset": "google.ads.googleads.v7.common.types.asset_types",
"StructuredSnippetFeedItem": "google.ads.googleads.v7.common.types.extensions",
"TagSnippet": "google.ads.googleads.v7.common.types.tag_snippet",
"TargetCpa": "google.ads.googleads.v7.common.types.bidding",
"TargetCpaSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"TargetCpaSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"TargetCpm": "google.ads.googleads.v7.common.types.bidding",
"TargetImpressionShare": "google.ads.googleads.v7.common.types.bidding",
"TargetImpressionShareSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"TargetImpressionShareSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"TargetingSetting": "google.ads.googleads.v7.common.types.targeting_setting",
"TargetRestriction": "google.ads.googleads.v7.common.types.targeting_setting",
"TargetRestrictionOperation": "google.ads.googleads.v7.common.types.targeting_setting",
"TargetRoas": "google.ads.googleads.v7.common.types.bidding",
"TargetRoasSimulationPoint": "google.ads.googleads.v7.common.types.simulation",
"TargetRoasSimulationPointList": "google.ads.googleads.v7.common.types.simulation",
"TargetSpend": "google.ads.googleads.v7.common.types.bidding",
"TextAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"TextAsset": "google.ads.googleads.v7.common.types.asset_types",
"TextLabel": "google.ads.googleads.v7.common.types.text_label",
"TextMessageFeedItem": "google.ads.googleads.v7.common.types.extensions",
"TopicInfo": "google.ads.googleads.v7.common.types.criteria",
"TransactionAttribute": "google.ads.googleads.v7.common.types.offline_user_data",
"UnknownListingDimensionInfo": "google.ads.googleads.v7.common.types.criteria",
"UrlCollection": "google.ads.googleads.v7.common.types.url_collection",
"UserAttribute": "google.ads.googleads.v7.common.types.offline_user_data",
"UserData": "google.ads.googleads.v7.common.types.offline_user_data",
"UserIdentifier": "google.ads.googleads.v7.common.types.offline_user_data",
"UserInterestInfo": "google.ads.googleads.v7.common.types.criteria",
"UserListActionInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListDateRuleItemInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListInfo": "google.ads.googleads.v7.common.types.criteria",
"UserListLogicalRuleInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListNumberRuleItemInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListRuleInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListRuleItemGroupInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListRuleItemInfo": "google.ads.googleads.v7.common.types.user_lists",
"UserListStringRuleItemInfo": "google.ads.googleads.v7.common.types.user_lists",
"Value": "google.ads.googleads.v7.common.types.value",
"VideoAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoBumperInStreamAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoNonSkippableInStreamAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoOutstreamAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoResponsiveAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoTrueViewDiscoveryAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"VideoTrueViewInStreamAdInfo": "google.ads.googleads.v7.common.types.ad_type_infos",
"WebhookDelivery": "google.ads.googleads.v7.common.types.asset_types",
"WebpageConditionInfo": "google.ads.googleads.v7.common.types.criteria",
"WebpageInfo": "google.ads.googleads.v7.common.types.criteria",
"WebpageSampleInfo": "google.ads.googleads.v7.common.types.criteria",
"YearMonth": "google.ads.googleads.v7.common.types.dates",
"YearMonthRange": "google.ads.googleads.v7.common.types.dates",
"YouTubeChannelInfo": "google.ads.googleads.v7.common.types.criteria",
"YoutubeVideoAsset": "google.ads.googleads.v7.common.types.asset_types",
"YouTubeVideoInfo": "google.ads.googleads.v7.common.types.criteria",
"AccessInvitationStatusEnum": "google.ads.googleads.v7.enums.types.access_invitation_status",
"AccessReasonEnum": "google.ads.googleads.v7.enums.types.access_reason",
"AccessRoleEnum": "google.ads.googleads.v7.enums.types.access_role",
"AccountBudgetProposalStatusEnum": "google.ads.googleads.v7.enums.types.account_budget_proposal_status",
"AccountBudgetProposalTypeEnum": "google.ads.googleads.v7.enums.types.account_budget_proposal_type",
"AccountBudgetStatusEnum": "google.ads.googleads.v7.enums.types.account_budget_status",
"AccountLinkStatusEnum": "google.ads.googleads.v7.enums.types.account_link_status",
"AdCustomizerPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.ad_customizer_placeholder_field",
"AdDestinationTypeEnum": "google.ads.googleads.v7.enums.types.ad_destination_type",
"AdGroupAdRotationModeEnum": "google.ads.googleads.v7.enums.types.ad_group_ad_rotation_mode",
"AdGroupAdStatusEnum": "google.ads.googleads.v7.enums.types.ad_group_ad_status",
"AdGroupCriterionApprovalStatusEnum": "google.ads.googleads.v7.enums.types.ad_group_criterion_approval_status",
"AdGroupCriterionStatusEnum": "google.ads.googleads.v7.enums.types.ad_group_criterion_status",
"AdGroupStatusEnum": "google.ads.googleads.v7.enums.types.ad_group_status",
"AdGroupTypeEnum": "google.ads.googleads.v7.enums.types.ad_group_type",
"AdNetworkTypeEnum": "google.ads.googleads.v7.enums.types.ad_network_type",
"AdServingOptimizationStatusEnum": "google.ads.googleads.v7.enums.types.ad_serving_optimization_status",
"AdStrengthEnum": "google.ads.googleads.v7.enums.types.ad_strength",
"AdTypeEnum": "google.ads.googleads.v7.enums.types.ad_type",
"AdvertisingChannelSubTypeEnum": "google.ads.googleads.v7.enums.types.advertising_channel_sub_type",
"AdvertisingChannelTypeEnum": "google.ads.googleads.v7.enums.types.advertising_channel_type",
"AffiliateLocationFeedRelationshipTypeEnum": "google.ads.googleads.v7.enums.types.affiliate_location_feed_relationship_type",
"AffiliateLocationPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.affiliate_location_placeholder_field",
"AgeRangeTypeEnum": "google.ads.googleads.v7.enums.types.age_range_type",
"AppCampaignAppStoreEnum": "google.ads.googleads.v7.enums.types.app_campaign_app_store",
"AppCampaignBiddingStrategyGoalTypeEnum": "google.ads.googleads.v7.enums.types.app_campaign_bidding_strategy_goal_type",
"AppPaymentModelTypeEnum": "google.ads.googleads.v7.enums.types.app_payment_model_type",
"AppPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.app_placeholder_field",
"AppStoreEnum": "google.ads.googleads.v7.enums.types.app_store",
"AppUrlOperatingSystemTypeEnum": "google.ads.googleads.v7.enums.types.app_url_operating_system_type",
"AssetFieldTypeEnum": "google.ads.googleads.v7.enums.types.asset_field_type",
"AssetLinkStatusEnum": "google.ads.googleads.v7.enums.types.asset_link_status",
"AssetPerformanceLabelEnum": "google.ads.googleads.v7.enums.types.asset_performance_label",
"AssetTypeEnum": "google.ads.googleads.v7.enums.types.asset_type",
"AttributionModelEnum": "google.ads.googleads.v7.enums.types.attribution_model",
"BatchJobStatusEnum": "google.ads.googleads.v7.enums.types.batch_job_status",
"BiddingSourceEnum": "google.ads.googleads.v7.enums.types.bidding_source",
"BiddingStrategyStatusEnum": "google.ads.googleads.v7.enums.types.bidding_strategy_status",
"BiddingStrategyTypeEnum": "google.ads.googleads.v7.enums.types.bidding_strategy_type",
"BidModifierSourceEnum": "google.ads.googleads.v7.enums.types.bid_modifier_source",
"BillingSetupStatusEnum": "google.ads.googleads.v7.enums.types.billing_setup_status",
"BrandSafetySuitabilityEnum": "google.ads.googleads.v7.enums.types.brand_safety_suitability",
"BudgetCampaignAssociationStatusEnum": "google.ads.googleads.v7.enums.types.budget_campaign_association_status",
"BudgetDeliveryMethodEnum": "google.ads.googleads.v7.enums.types.budget_delivery_method",
"BudgetPeriodEnum": "google.ads.googleads.v7.enums.types.budget_period",
"BudgetStatusEnum": "google.ads.googleads.v7.enums.types.budget_status",
"BudgetTypeEnum": "google.ads.googleads.v7.enums.types.budget_type",
"CallConversionReportingStateEnum": "google.ads.googleads.v7.enums.types.call_conversion_reporting_state",
"CalloutPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.callout_placeholder_field",
"CallPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.call_placeholder_field",
"CallTrackingDisplayLocationEnum": "google.ads.googleads.v7.enums.types.call_tracking_display_location",
"CallTypeEnum": "google.ads.googleads.v7.enums.types.call_type",
"CampaignCriterionStatusEnum": "google.ads.googleads.v7.enums.types.campaign_criterion_status",
"CampaignDraftStatusEnum": "google.ads.googleads.v7.enums.types.campaign_draft_status",
"CampaignExperimentStatusEnum": "google.ads.googleads.v7.enums.types.campaign_experiment_status",
"CampaignExperimentTrafficSplitTypeEnum": "google.ads.googleads.v7.enums.types.campaign_experiment_traffic_split_type",
"CampaignExperimentTypeEnum": "google.ads.googleads.v7.enums.types.campaign_experiment_type",
"CampaignServingStatusEnum": "google.ads.googleads.v7.enums.types.campaign_serving_status",
"CampaignSharedSetStatusEnum": "google.ads.googleads.v7.enums.types.campaign_shared_set_status",
"CampaignStatusEnum": "google.ads.googleads.v7.enums.types.campaign_status",
"ChangeClientTypeEnum": "google.ads.googleads.v7.enums.types.change_client_type",
"ChangeEventResourceTypeEnum": "google.ads.googleads.v7.enums.types.change_event_resource_type",
"ChangeStatusOperationEnum": "google.ads.googleads.v7.enums.types.change_status_operation",
"ChangeStatusResourceTypeEnum": "google.ads.googleads.v7.enums.types.change_status_resource_type",
"ClickTypeEnum": "google.ads.googleads.v7.enums.types.click_type",
"CombinedAudienceStatusEnum": "google.ads.googleads.v7.enums.types.combined_audience_status",
"ContentLabelTypeEnum": "google.ads.googleads.v7.enums.types.content_label_type",
"ConversionActionCategoryEnum": "google.ads.googleads.v7.enums.types.conversion_action_category",
"ConversionActionCountingTypeEnum": "google.ads.googleads.v7.enums.types.conversion_action_counting_type",
"ConversionActionStatusEnum": "google.ads.googleads.v7.enums.types.conversion_action_status",
"ConversionActionTypeEnum": "google.ads.googleads.v7.enums.types.conversion_action_type",
"ConversionAdjustmentTypeEnum": "google.ads.googleads.v7.enums.types.conversion_adjustment_type",
"ConversionAttributionEventTypeEnum": "google.ads.googleads.v7.enums.types.conversion_attribution_event_type",
"ConversionCustomVariableStatusEnum": "google.ads.googleads.v7.enums.types.conversion_custom_variable_status",
"ConversionLagBucketEnum": "google.ads.googleads.v7.enums.types.conversion_lag_bucket",
"ConversionOrAdjustmentLagBucketEnum": "google.ads.googleads.v7.enums.types.conversion_or_adjustment_lag_bucket",
"CriterionCategoryChannelAvailabilityModeEnum": "google.ads.googleads.v7.enums.types.criterion_category_channel_availability_mode",
"CriterionCategoryLocaleAvailabilityModeEnum": "google.ads.googleads.v7.enums.types.criterion_category_locale_availability_mode",
"CriterionSystemServingStatusEnum": "google.ads.googleads.v7.enums.types.criterion_system_serving_status",
"CriterionTypeEnum": "google.ads.googleads.v7.enums.types.criterion_type",
"CustomAudienceMemberTypeEnum": "google.ads.googleads.v7.enums.types.custom_audience_member_type",
"CustomAudienceStatusEnum": "google.ads.googleads.v7.enums.types.custom_audience_status",
"CustomAudienceTypeEnum": "google.ads.googleads.v7.enums.types.custom_audience_type",
"CustomerMatchUploadKeyTypeEnum": "google.ads.googleads.v7.enums.types.customer_match_upload_key_type",
"CustomerPayPerConversionEligibilityFailureReasonEnum": "google.ads.googleads.v7.enums.types.customer_pay_per_conversion_eligibility_failure_reason",
"CustomInterestMemberTypeEnum": "google.ads.googleads.v7.enums.types.custom_interest_member_type",
"CustomInterestStatusEnum": "google.ads.googleads.v7.enums.types.custom_interest_status",
"CustomInterestTypeEnum": "google.ads.googleads.v7.enums.types.custom_interest_type",
"CustomPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.custom_placeholder_field",
"DataDrivenModelStatusEnum": "google.ads.googleads.v7.enums.types.data_driven_model_status",
"DayOfWeekEnum": "google.ads.googleads.v7.enums.types.day_of_week",
"DeviceEnum": "google.ads.googleads.v7.enums.types.device",
"DisplayAdFormatSettingEnum": "google.ads.googleads.v7.enums.types.display_ad_format_setting",
"DisplayUploadProductTypeEnum": "google.ads.googleads.v7.enums.types.display_upload_product_type",
"DistanceBucketEnum": "google.ads.googleads.v7.enums.types.distance_bucket",
"DsaPageFeedCriterionFieldEnum": "google.ads.googleads.v7.enums.types.dsa_page_feed_criterion_field",
"EducationPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.education_placeholder_field",
"ExtensionSettingDeviceEnum": "google.ads.googleads.v7.enums.types.extension_setting_device",
"ExtensionTypeEnum": "google.ads.googleads.v7.enums.types.extension_type",
"ExternalConversionSourceEnum": "google.ads.googleads.v7.enums.types.external_conversion_source",
"FeedAttributeTypeEnum": "google.ads.googleads.v7.enums.types.feed_attribute_type",
"FeedItemQualityApprovalStatusEnum": "google.ads.googleads.v7.enums.types.feed_item_quality_approval_status",
"FeedItemQualityDisapprovalReasonEnum": "google.ads.googleads.v7.enums.types.feed_item_quality_disapproval_reason",
"FeedItemSetStatusEnum": "google.ads.googleads.v7.enums.types.feed_item_set_status",
"FeedItemSetStringFilterTypeEnum": "google.ads.googleads.v7.enums.types.feed_item_set_string_filter_type",
"FeedItemStatusEnum": "google.ads.googleads.v7.enums.types.feed_item_status",
"FeedItemTargetDeviceEnum": "google.ads.googleads.v7.enums.types.feed_item_target_device",
"FeedItemTargetStatusEnum": "google.ads.googleads.v7.enums.types.feed_item_target_status",
"FeedItemTargetTypeEnum": "google.ads.googleads.v7.enums.types.feed_item_target_type",
"FeedItemValidationStatusEnum": "google.ads.googleads.v7.enums.types.feed_item_validation_status",
"FeedLinkStatusEnum": "google.ads.googleads.v7.enums.types.feed_link_status",
"FeedMappingCriterionTypeEnum": "google.ads.googleads.v7.enums.types.feed_mapping_criterion_type",
"FeedMappingStatusEnum": "google.ads.googleads.v7.enums.types.feed_mapping_status",
"FeedOriginEnum": "google.ads.googleads.v7.enums.types.feed_origin",
"FeedStatusEnum": "google.ads.googleads.v7.enums.types.feed_status",
"FlightPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.flight_placeholder_field",
"FrequencyCapEventTypeEnum": "google.ads.googleads.v7.enums.types.frequency_cap_event_type",
"FrequencyCapLevelEnum": "google.ads.googleads.v7.enums.types.frequency_cap_level",
"FrequencyCapTimeUnitEnum": "google.ads.googleads.v7.enums.types.frequency_cap_time_unit",
"GenderTypeEnum": "google.ads.googleads.v7.enums.types.gender_type",
"GeoTargetConstantStatusEnum": "google.ads.googleads.v7.enums.types.geo_target_constant_status",
"GeoTargetingRestrictionEnum": "google.ads.googleads.v7.enums.types.geo_targeting_restriction",
"GeoTargetingTypeEnum": "google.ads.googleads.v7.enums.types.geo_targeting_type",
"GoogleAdsFieldCategoryEnum": "google.ads.googleads.v7.enums.types.google_ads_field_category",
"GoogleAdsFieldDataTypeEnum": "google.ads.googleads.v7.enums.types.google_ads_field_data_type",
"GoogleVoiceCallStatusEnum": "google.ads.googleads.v7.enums.types.google_voice_call_status",
"HotelDateSelectionTypeEnum": "google.ads.googleads.v7.enums.types.hotel_date_selection_type",
"HotelPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.hotel_placeholder_field",
"HotelPriceBucketEnum": "google.ads.googleads.v7.enums.types.hotel_price_bucket",
"HotelRateTypeEnum": "google.ads.googleads.v7.enums.types.hotel_rate_type",
"ImagePlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.image_placeholder_field",
"IncomeRangeTypeEnum": "google.ads.googleads.v7.enums.types.income_range_type",
"InteractionEventTypeEnum": "google.ads.googleads.v7.enums.types.interaction_event_type",
"InteractionTypeEnum": "google.ads.googleads.v7.enums.types.interaction_type",
"InvoiceTypeEnum": "google.ads.googleads.v7.enums.types.invoice_type",
"JobPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.job_placeholder_field",
"KeywordMatchTypeEnum": "google.ads.googleads.v7.enums.types.keyword_match_type",
"KeywordPlanAggregateMetricTypeEnum": "google.ads.googleads.v7.enums.types.keyword_plan_aggregate_metric_type",
"KeywordPlanCompetitionLevelEnum": "google.ads.googleads.v7.enums.types.keyword_plan_competition_level",
"KeywordPlanConceptGroupTypeEnum": "google.ads.googleads.v7.enums.types.keyword_plan_concept_group_type",
"KeywordPlanForecastIntervalEnum": "google.ads.googleads.v7.enums.types.keyword_plan_forecast_interval",
"KeywordPlanKeywordAnnotationEnum": "google.ads.googleads.v7.enums.types.keyword_plan_keyword_annotation",
"KeywordPlanNetworkEnum": "google.ads.googleads.v7.enums.types.keyword_plan_network",
"LabelStatusEnum": "google.ads.googleads.v7.enums.types.label_status",
"LeadFormCallToActionTypeEnum": "google.ads.googleads.v7.enums.types.lead_form_call_to_action_type",
"LeadFormDesiredIntentEnum": "google.ads.googleads.v7.enums.types.lead_form_desired_intent",
"LeadFormFieldUserInputTypeEnum": "google.ads.googleads.v7.enums.types.lead_form_field_user_input_type",
"LeadFormPostSubmitCallToActionTypeEnum": "google.ads.googleads.v7.enums.types.lead_form_post_submit_call_to_action_type",
"LegacyAppInstallAdAppStoreEnum": "google.ads.googleads.v7.enums.types.legacy_app_install_ad_app_store",
"LinkedAccountTypeEnum": "google.ads.googleads.v7.enums.types.linked_account_type",
"ListingGroupTypeEnum": "google.ads.googleads.v7.enums.types.listing_group_type",
"LocalPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.local_placeholder_field",
"LocationExtensionTargetingCriterionFieldEnum": "google.ads.googleads.v7.enums.types.location_extension_targeting_criterion_field",
"LocationGroupRadiusUnitsEnum": "google.ads.googleads.v7.enums.types.location_group_radius_units",
"LocationPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.location_placeholder_field",
"LocationSourceTypeEnum": "google.ads.googleads.v7.enums.types.location_source_type",
"ManagerLinkStatusEnum": "google.ads.googleads.v7.enums.types.manager_link_status",
"MatchingFunctionContextTypeEnum": "google.ads.googleads.v7.enums.types.matching_function_context_type",
"MatchingFunctionOperatorEnum": "google.ads.googleads.v7.enums.types.matching_function_operator",
"MediaTypeEnum": "google.ads.googleads.v7.enums.types.media_type",
"MerchantCenterLinkStatusEnum": "google.ads.googleads.v7.enums.types.merchant_center_link_status",
"MessagePlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.message_placeholder_field",
"MimeTypeEnum": "google.ads.googleads.v7.enums.types.mime_type",
"MinuteOfHourEnum": "google.ads.googleads.v7.enums.types.minute_of_hour",
"MobileAppVendorEnum": "google.ads.googleads.v7.enums.types.mobile_app_vendor",
"MobileDeviceTypeEnum": "google.ads.googleads.v7.enums.types.mobile_device_type",
"MonthOfYearEnum": "google.ads.googleads.v7.enums.types.month_of_year",
"NegativeGeoTargetTypeEnum": "google.ads.googleads.v7.enums.types.negative_geo_target_type",
"OfflineUserDataJobFailureReasonEnum": "google.ads.googleads.v7.enums.types.offline_user_data_job_failure_reason",
"OfflineUserDataJobStatusEnum": "google.ads.googleads.v7.enums.types.offline_user_data_job_status",
"OfflineUserDataJobTypeEnum": "google.ads.googleads.v7.enums.types.offline_user_data_job_type",
"OperatingSystemVersionOperatorTypeEnum": "google.ads.googleads.v7.enums.types.operating_system_version_operator_type",
"OptimizationGoalTypeEnum": "google.ads.googleads.v7.enums.types.optimization_goal_type",
"ParentalStatusTypeEnum": "google.ads.googleads.v7.enums.types.parental_status_type",
"PaymentModeEnum": "google.ads.googleads.v7.enums.types.payment_mode",
"PlaceholderTypeEnum": "google.ads.googleads.v7.enums.types.placeholder_type",
"PlacementTypeEnum": "google.ads.googleads.v7.enums.types.placement_type",
"PolicyApprovalStatusEnum": "google.ads.googleads.v7.enums.types.policy_approval_status",
"PolicyReviewStatusEnum": "google.ads.googleads.v7.enums.types.policy_review_status",
"PolicyTopicEntryTypeEnum": "google.ads.googleads.v7.enums.types.policy_topic_entry_type",
"PolicyTopicEvidenceDestinationMismatchUrlTypeEnum": "google.ads.googleads.v7.enums.types.policy_topic_evidence_destination_mismatch_url_type",
"PolicyTopicEvidenceDestinationNotWorkingDeviceEnum": "google.ads.googleads.v7.enums.types.policy_topic_evidence_destination_not_working_device",
"PolicyTopicEvidenceDestinationNotWorkingDnsErrorTypeEnum": "google.ads.googleads.v7.enums.types.policy_topic_evidence_destination_not_working_dns_error_type",
"PositiveGeoTargetTypeEnum": "google.ads.googleads.v7.enums.types.positive_geo_target_type",
"PreferredContentTypeEnum": "google.ads.googleads.v7.enums.types.preferred_content_type",
"PriceExtensionPriceQualifierEnum": "google.ads.googleads.v7.enums.types.price_extension_price_qualifier",
"PriceExtensionPriceUnitEnum": "google.ads.googleads.v7.enums.types.price_extension_price_unit",
"PriceExtensionTypeEnum": "google.ads.googleads.v7.enums.types.price_extension_type",
"PricePlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.price_placeholder_field",
"ProductBiddingCategoryLevelEnum": "google.ads.googleads.v7.enums.types.product_bidding_category_level",
"ProductBiddingCategoryStatusEnum": "google.ads.googleads.v7.enums.types.product_bidding_category_status",
"ProductChannelEnum": "google.ads.googleads.v7.enums.types.product_channel",
"ProductChannelExclusivityEnum": "google.ads.googleads.v7.enums.types.product_channel_exclusivity",
"ProductConditionEnum": "google.ads.googleads.v7.enums.types.product_condition",
"ProductCustomAttributeIndexEnum": "google.ads.googleads.v7.enums.types.product_custom_attribute_index",
"ProductTypeLevelEnum": "google.ads.googleads.v7.enums.types.product_type_level",
"PromotionExtensionDiscountModifierEnum": "google.ads.googleads.v7.enums.types.promotion_extension_discount_modifier",
"PromotionExtensionOccasionEnum": "google.ads.googleads.v7.enums.types.promotion_extension_occasion",
"PromotionPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.promotion_placeholder_field",
"ProximityRadiusUnitsEnum": "google.ads.googleads.v7.enums.types.proximity_radius_units",
"QualityScoreBucketEnum": "google.ads.googleads.v7.enums.types.quality_score_bucket",
"ReachPlanAdLengthEnum": "google.ads.googleads.v7.enums.types.reach_plan_ad_length",
"ReachPlanAgeRangeEnum": "google.ads.googleads.v7.enums.types.reach_plan_age_range",
"ReachPlanNetworkEnum": "google.ads.googleads.v7.enums.types.reach_plan_network",
"RealEstatePlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.real_estate_placeholder_field",
"RecommendationTypeEnum": "google.ads.googleads.v7.enums.types.recommendation_type",
"ResourceChangeOperationEnum": "google.ads.googleads.v7.enums.types.resource_change_operation",
"ResourceLimitTypeEnum": "google.ads.googleads.v7.enums.types.resource_limit_type",
"ResponseContentTypeEnum": "google.ads.googleads.v7.enums.types.response_content_type",
"SearchEngineResultsPageTypeEnum": "google.ads.googleads.v7.enums.types.search_engine_results_page_type",
"SearchTermMatchTypeEnum": "google.ads.googleads.v7.enums.types.search_term_match_type",
"SearchTermTargetingStatusEnum": "google.ads.googleads.v7.enums.types.search_term_targeting_status",
"ServedAssetFieldTypeEnum": "google.ads.googleads.v7.enums.types.served_asset_field_type",
"SharedSetStatusEnum": "google.ads.googleads.v7.enums.types.shared_set_status",
"SharedSetTypeEnum": "google.ads.googleads.v7.enums.types.shared_set_type",
"SimulationModificationMethodEnum": "google.ads.googleads.v7.enums.types.simulation_modification_method",
"SimulationTypeEnum": "google.ads.googleads.v7.enums.types.simulation_type",
"SitelinkPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.sitelink_placeholder_field",
"SlotEnum": "google.ads.googleads.v7.enums.types.slot",
"SpendingLimitTypeEnum": "google.ads.googleads.v7.enums.types.spending_limit_type",
"StructuredSnippetPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.structured_snippet_placeholder_field",
"SummaryRowSettingEnum": "google.ads.googleads.v7.enums.types.summary_row_setting",
"SystemManagedResourceSourceEnum": "google.ads.googleads.v7.enums.types.system_managed_entity_source",
"TargetCpaOptInRecommendationGoalEnum": "google.ads.googleads.v7.enums.types.target_cpa_opt_in_recommendation_goal",
"TargetImpressionShareLocationEnum": "google.ads.googleads.v7.enums.types.target_impression_share_location",
"TargetingDimensionEnum": "google.ads.googleads.v7.enums.types.targeting_dimension",
"TimeTypeEnum": "google.ads.googleads.v7.enums.types.time_type",
"TrackingCodePageFormatEnum": "google.ads.googleads.v7.enums.types.tracking_code_page_format",
"TrackingCodeTypeEnum": "google.ads.googleads.v7.enums.types.tracking_code_type",
"TravelPlaceholderFieldEnum": "google.ads.googleads.v7.enums.types.travel_placeholder_field",
"UserIdentifierSourceEnum": "google.ads.googleads.v7.enums.types.user_identifier_source",
"UserInterestTaxonomyTypeEnum": "google.ads.googleads.v7.enums.types.user_interest_taxonomy_type",
"UserListAccessStatusEnum": "google.ads.googleads.v7.enums.types.user_list_access_status",
"UserListClosingReasonEnum": "google.ads.googleads.v7.enums.types.user_list_closing_reason",
"UserListCombinedRuleOperatorEnum": "google.ads.googleads.v7.enums.types.user_list_combined_rule_operator",
"UserListCrmDataSourceTypeEnum": "google.ads.googleads.v7.enums.types.user_list_crm_data_source_type",
"UserListDateRuleItemOperatorEnum": "google.ads.googleads.v7.enums.types.user_list_date_rule_item_operator",
"UserListLogicalRuleOperatorEnum": "google.ads.googleads.v7.enums.types.user_list_logical_rule_operator",
"UserListMembershipStatusEnum": "google.ads.googleads.v7.enums.types.user_list_membership_status",
"UserListNumberRuleItemOperatorEnum": "google.ads.googleads.v7.enums.types.user_list_number_rule_item_operator",
"UserListPrepopulationStatusEnum": "google.ads.googleads.v7.enums.types.user_list_prepopulation_status",
"UserListRuleTypeEnum": "google.ads.googleads.v7.enums.types.user_list_rule_type",
"UserListSizeRangeEnum": "google.ads.googleads.v7.enums.types.user_list_size_range",
"UserListStringRuleItemOperatorEnum": "google.ads.googleads.v7.enums.types.user_list_string_rule_item_operator",
"UserListTypeEnum": "google.ads.googleads.v7.enums.types.user_list_type",
"VanityPharmaDisplayUrlModeEnum": "google.ads.googleads.v7.enums.types.vanity_pharma_display_url_mode",
"VanityPharmaTextEnum": "google.ads.googleads.v7.enums.types.vanity_pharma_text",
"WebpageConditionOperandEnum": "google.ads.googleads.v7.enums.types.webpage_condition_operand",
"WebpageConditionOperatorEnum": "google.ads.googleads.v7.enums.types.webpage_condition_operator",
"AccessInvitationErrorEnum": "google.ads.googleads.v7.errors.types.access_invitation_error",
"AccountBudgetProposalErrorEnum": "google.ads.googleads.v7.errors.types.account_budget_proposal_error",
"AccountLinkErrorEnum": "google.ads.googleads.v7.errors.types.account_link_error",
"AdCustomizerErrorEnum": "google.ads.googleads.v7.errors.types.ad_customizer_error",
"AdErrorEnum": "google.ads.googleads.v7.errors.types.ad_error",
"AdGroupAdErrorEnum": "google.ads.googleads.v7.errors.types.ad_group_ad_error",
"AdGroupBidModifierErrorEnum": "google.ads.googleads.v7.errors.types.ad_group_bid_modifier_error",
"AdGroupCriterionErrorEnum": "google.ads.googleads.v7.errors.types.ad_group_criterion_error",
"AdGroupErrorEnum": "google.ads.googleads.v7.errors.types.ad_group_error",
"AdGroupFeedErrorEnum": "google.ads.googleads.v7.errors.types.ad_group_feed_error",
"AdParameterErrorEnum": "google.ads.googleads.v7.errors.types.ad_parameter_error",
"AdSharingErrorEnum": "google.ads.googleads.v7.errors.types.ad_sharing_error",
"AdxErrorEnum": "google.ads.googleads.v7.errors.types.adx_error",
"AssetErrorEnum": "google.ads.googleads.v7.errors.types.asset_error",
"AssetLinkErrorEnum": "google.ads.googleads.v7.errors.types.asset_link_error",
"AuthenticationErrorEnum": "google.ads.googleads.v7.errors.types.authentication_error",
"AuthorizationErrorEnum": "google.ads.googleads.v7.errors.types.authorization_error",
"BatchJobErrorEnum": "google.ads.googleads.v7.errors.types.batch_job_error",
"BiddingErrorEnum": "google.ads.googleads.v7.errors.types.bidding_error",
"BiddingStrategyErrorEnum": "google.ads.googleads.v7.errors.types.bidding_strategy_error",
"BillingSetupErrorEnum": "google.ads.googleads.v7.errors.types.billing_setup_error",
"CampaignBudgetErrorEnum": "google.ads.googleads.v7.errors.types.campaign_budget_error",
"CampaignCriterionErrorEnum": "google.ads.googleads.v7.errors.types.campaign_criterion_error",
"CampaignDraftErrorEnum": "google.ads.googleads.v7.errors.types.campaign_draft_error",
"CampaignErrorEnum": "google.ads.googleads.v7.errors.types.campaign_error",
"CampaignExperimentErrorEnum": "google.ads.googleads.v7.errors.types.campaign_experiment_error",
"CampaignFeedErrorEnum": "google.ads.googleads.v7.errors.types.campaign_feed_error",
"CampaignSharedSetErrorEnum": "google.ads.googleads.v7.errors.types.campaign_shared_set_error",
"ChangeEventErrorEnum": "google.ads.googleads.v7.errors.types.change_event_error",
"ChangeStatusErrorEnum": "google.ads.googleads.v7.errors.types.change_status_error",
"CollectionSizeErrorEnum": "google.ads.googleads.v7.errors.types.collection_size_error",
"ContextErrorEnum": "google.ads.googleads.v7.errors.types.context_error",
"ConversionActionErrorEnum": "google.ads.googleads.v7.errors.types.conversion_action_error",
"ConversionAdjustmentUploadErrorEnum": "google.ads.googleads.v7.errors.types.conversion_adjustment_upload_error",
"ConversionCustomVariableErrorEnum": "google.ads.googleads.v7.errors.types.conversion_custom_variable_error",
"ConversionUploadErrorEnum": "google.ads.googleads.v7.errors.types.conversion_upload_error",
"CountryCodeErrorEnum": "google.ads.googleads.v7.errors.types.country_code_error",
"CriterionErrorEnum": "google.ads.googleads.v7.errors.types.criterion_error",
"CurrencyCodeErrorEnum": "google.ads.googleads.v7.errors.types.currency_code_error",
"CustomAudienceErrorEnum": "google.ads.googleads.v7.errors.types.custom_audience_error",
"CustomerClientLinkErrorEnum": "google.ads.googleads.v7.errors.types.customer_client_link_error",
"CustomerErrorEnum": "google.ads.googleads.v7.errors.types.customer_error",
"CustomerFeedErrorEnum": "google.ads.googleads.v7.errors.types.customer_feed_error",
"CustomerManagerLinkErrorEnum": "google.ads.googleads.v7.errors.types.customer_manager_link_error",
"CustomerUserAccessErrorEnum": "google.ads.googleads.v7.errors.types.customer_user_access_error",
"CustomInterestErrorEnum": "google.ads.googleads.v7.errors.types.custom_interest_error",
"DatabaseErrorEnum": "google.ads.googleads.v7.errors.types.database_error",
"DateErrorEnum": "google.ads.googleads.v7.errors.types.date_error",
"DateRangeErrorEnum": "google.ads.googleads.v7.errors.types.date_range_error",
"DistinctErrorEnum": "google.ads.googleads.v7.errors.types.distinct_error",
"EnumErrorEnum": "google.ads.googleads.v7.errors.types.enum_error",
"ErrorCode": "google.ads.googleads.v7.errors.types.errors",
"ErrorDetails": "google.ads.googleads.v7.errors.types.errors",
"ErrorLocation": "google.ads.googleads.v7.errors.types.errors",
"ExtensionFeedItemErrorEnum": "google.ads.googleads.v7.errors.types.extension_feed_item_error",
"ExtensionSettingErrorEnum": "google.ads.googleads.v7.errors.types.extension_setting_error",
"FeedAttributeReferenceErrorEnum": "google.ads.googleads.v7.errors.types.feed_attribute_reference_error",
"FeedErrorEnum": "google.ads.googleads.v7.errors.types.feed_error",
"FeedItemErrorEnum": "google.ads.googleads.v7.errors.types.feed_item_error",
"FeedItemSetErrorEnum": "google.ads.googleads.v7.errors.types.feed_item_set_error",
"FeedItemSetLinkErrorEnum": "google.ads.googleads.v7.errors.types.feed_item_set_link_error",
"FeedItemTargetErrorEnum": "google.ads.googleads.v7.errors.types.feed_item_target_error",
"FeedItemValidationErrorEnum": "google.ads.googleads.v7.errors.types.feed_item_validation_error",
"FeedMappingErrorEnum": "google.ads.googleads.v7.errors.types.feed_mapping_error",
"FieldErrorEnum": "google.ads.googleads.v7.errors.types.field_error",
"FieldMaskErrorEnum": "google.ads.googleads.v7.errors.types.field_mask_error",
"FunctionErrorEnum": "google.ads.googleads.v7.errors.types.function_error",
"FunctionParsingErrorEnum": "google.ads.googleads.v7.errors.types.function_parsing_error",
"GeoTargetConstantSuggestionErrorEnum": "google.ads.googleads.v7.errors.types.geo_target_constant_suggestion_error",
"GoogleAdsError": "google.ads.googleads.v7.errors.types.errors",
"GoogleAdsFailure": "google.ads.googleads.v7.errors.types.errors",
"HeaderErrorEnum": "google.ads.googleads.v7.errors.types.header_error",
"IdErrorEnum": "google.ads.googleads.v7.errors.types.id_error",
"ImageErrorEnum": "google.ads.googleads.v7.errors.types.image_error",
"InternalErrorEnum": "google.ads.googleads.v7.errors.types.internal_error",
"InvoiceErrorEnum": "google.ads.googleads.v7.errors.types.invoice_error",
"KeywordPlanAdGroupErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_ad_group_error",
"KeywordPlanAdGroupKeywordErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_ad_group_keyword_error",
"KeywordPlanCampaignErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_campaign_error",
"KeywordPlanCampaignKeywordErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_campaign_keyword_error",
"KeywordPlanErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_error",
"KeywordPlanIdeaErrorEnum": "google.ads.googleads.v7.errors.types.keyword_plan_idea_error",
"LabelErrorEnum": "google.ads.googleads.v7.errors.types.label_error",
"LanguageCodeErrorEnum": "google.ads.googleads.v7.errors.types.language_code_error",
"ListOperationErrorEnum": "google.ads.googleads.v7.errors.types.list_operation_error",
"ManagerLinkErrorEnum": "google.ads.googleads.v7.errors.types.manager_link_error",
"MediaBundleErrorEnum": "google.ads.googleads.v7.errors.types.media_bundle_error",
"MediaFileErrorEnum": "google.ads.googleads.v7.errors.types.media_file_error",
"MediaUploadErrorEnum": "google.ads.googleads.v7.errors.types.media_upload_error",
"MultiplierErrorEnum": "google.ads.googleads.v7.errors.types.multiplier_error",
"MutateErrorEnum": "google.ads.googleads.v7.errors.types.mutate_error",
"NewResourceCreationErrorEnum": "google.ads.googleads.v7.errors.types.new_resource_creation_error",
"NotAllowlistedErrorEnum": "google.ads.googleads.v7.errors.types.not_allowlisted_error",
"NotEmptyErrorEnum": "google.ads.googleads.v7.errors.types.not_empty_error",
"NullErrorEnum": "google.ads.googleads.v7.errors.types.null_error",
"OfflineUserDataJobErrorEnum": "google.ads.googleads.v7.errors.types.offline_user_data_job_error",
"OperationAccessDeniedErrorEnum": "google.ads.googleads.v7.errors.types.operation_access_denied_error",
"OperatorErrorEnum": "google.ads.googleads.v7.errors.types.operator_error",
"PartialFailureErrorEnum": "google.ads.googleads.v7.errors.types.partial_failure_error",
"PaymentsAccountErrorEnum": "google.ads.googleads.v7.errors.types.payments_account_error",
"PolicyFindingDetails": "google.ads.googleads.v7.errors.types.errors",
"PolicyFindingErrorEnum": "google.ads.googleads.v7.errors.types.policy_finding_error",
"PolicyValidationParameterErrorEnum": "google.ads.googleads.v7.errors.types.policy_validation_parameter_error",
"PolicyViolationDetails": "google.ads.googleads.v7.errors.types.errors",
"PolicyViolationErrorEnum": "google.ads.googleads.v7.errors.types.policy_violation_error",
"QueryErrorEnum": "google.ads.googleads.v7.errors.types.query_error",
"QuotaErrorDetails": "google.ads.googleads.v7.errors.types.errors",
"QuotaErrorEnum": "google.ads.googleads.v7.errors.types.quota_error",
"RangeErrorEnum": "google.ads.googleads.v7.errors.types.range_error",
"ReachPlanErrorEnum": "google.ads.googleads.v7.errors.types.reach_plan_error",
"RecommendationErrorEnum": "google.ads.googleads.v7.errors.types.recommendation_error",
"RegionCodeErrorEnum": "google.ads.googleads.v7.errors.types.region_code_error",
"RequestErrorEnum": "google.ads.googleads.v7.errors.types.request_error",
"ResourceAccessDeniedErrorEnum": "google.ads.googleads.v7.errors.types.resource_access_denied_error",
"ResourceCountDetails": "google.ads.googleads.v7.errors.types.errors",
"ResourceCountLimitExceededErrorEnum": "google.ads.googleads.v7.errors.types.resource_count_limit_exceeded_error",
"SettingErrorEnum": "google.ads.googleads.v7.errors.types.setting_error",
"SharedCriterionErrorEnum": "google.ads.googleads.v7.errors.types.shared_criterion_error",
| |
import numpy as np
import random
import math
import time
from collections import Counter
class StringKernel():
def __init__(self):
pass
@staticmethod
def compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters=None):
pass
@staticmethod
def compute_kernel_string_listofstrings(string1, strings, ngram_range_min, ngram_range_max, clusters=None):
pass
@staticmethod
def compute_kernel_listofstrings(strings, ngram_range_min, ngram_range_max, normalize=False):
pass
@staticmethod
def run(dataset, ngram_range_min, ngram_range_max, normalize=False, clusters=None):
pass
class IntersectionStringKernel(StringKernel):
def __init__(self):
super().__init__()
@staticmethod
def compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters=None):
if clusters != None and len(clusters) > 0:
return IntersectionStringKernel.compute_kernel_two_strings_clusters2(string1, string2, ngram_range_min, ngram_range_max, clusters)
ngrams = {}
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index+d]
if ngram not in ngrams:
ngrams[ngram] = 1
else:
ngrams[ngram] = ngrams[ngram] + 1
kernel = 0
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index+d]
if (ngram in ngrams) and (ngrams[ngram] > 0):
kernel += 1
ngrams[ngram] = ngrams[ngram] - 1
return 1.0*kernel
@staticmethod
def compute_kernel_two_strings_clusters(string1, string2, ngram_range_min, ngram_range_max, clusters):
ngrams = {}
ngrams_in_clusters = {}
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index + d]
if ngram not in ngrams_in_clusters:
found = False
for cluster in clusters:
if ngram in cluster:
found = True
break
ngrams_in_clusters[ngram] = found
if ngrams_in_clusters[ngram] == False:
ngrams_in_clusters[ngram] = False
if ngram not in ngrams:
ngrams[ngram] = 1
else:
ngrams[ngram] = ngrams[ngram] + 1
kernel = 0
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index + d]
if (ngram in ngrams) and (ngrams[ngram] > 0):
kernel += 1
ngrams[ngram] = ngrams[ngram] - 1
d1 = {}
d2 = {}
for cl, _ in enumerate(clusters):
d1[cl] = 0
d2[cl] = 0
for cl_index, cl in enumerate(clusters):
for ngram in cl:
if len(ngram) >= ngram_range_min and len(ngram) <= ngram_range_max:
d1[cl_index] = d1[cl_index] + string1.count(ngram)
d2[cl_index] = d2[cl_index] + string2.count(ngram)
for cl, _ in enumerate(clusters):
kernel += min(d1[cl],d2[cl])
return kernel
@staticmethod
def compute_kernel_two_strings_clusters2(string1, string2, ngram_range_min, ngram_range_max, clusters):
index = 0
clusters_dict = {}
for cluster in clusters:
for ngram in cluster:
clusters_dict[ngram] = index
index += 1
s1_ngram = Counter()
s2_ngram = Counter()
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index + d]
if ngram not in clusters_dict:
clusters_dict[ngram] = index
index += 1
s1_ngram[clusters_dict[ngram]] += 1
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index + d]
if ngram not in clusters_dict:
clusters_dict[ngram] = index
index += 1
s2_ngram[clusters_dict[ngram]] += 1
kernel = 0
for c in s1_ngram:
kernel += min(s1_ngram[c], s2_ngram[c])
return kernel
@staticmethod
def compute_kernel_string_listofstrings(string1, strings, ngram_range_min, ngram_range_max, normalize=False, clusters=None):
kernels = []
for string2 in strings:
pr = IntersectionStringKernel.compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters)
if normalize == True:
i = IntersectionStringKernel.compute_kernel_two_strings(string1, string1, ngram_range_min, ngram_range_max, clusters)
j = IntersectionStringKernel.compute_kernel_two_strings(string2, string2, ngram_range_min, ngram_range_max, clusters)
if i == 0.0 or j == 0.0:
kernels.append(0.0)
else:
kernels.append(pr / math.sqrt(i*j))
elif normalize == False:
kernels.append(pr)
kernels = np.array(kernels)
return kernels
@staticmethod
def compute_kernel_listofstrings(strings, ngram_range_min, ngram_range_max, normalize=False):
kernels = []
for string1 in strings:
kernels.append(IntersectionStringKernel.compute_kernel_string_listofstrings(string1, strings, ngram_range_min, ngram_range_max))
if normalize == True:
for i, aux in enumerate(kernels):
for j, _ in enumerate(aux):
print (i,j, kernels[i][j], kernels[i][i], kernels[j][j], math.sqrt(kernels[i][i] * kernels[j][j]))
kernels[i][j] = kernels[i][j] / math.sqrt(kernels[i][i] * kernels[j][j])
print (kernels[i][j])
kernels = np.array(kernels)
return kernels
@staticmethod
def run(dataset, ngram_range_min, ngram_range_max, normalize=False, clusters=None):
correct = 0
total = 0
for entry_index, entry in enumerate(dataset.data):
if len(entry.answer_pool) == 0:
total += 1
continue
question = entry.question
similarity_scores = IntersectionStringKernel.compute_kernel_string_listofstrings(question, entry.answer_pool, ngram_range_min, ngram_range_max, normalize, clusters)
max_indexes = np.argwhere(similarity_scores == np.max(similarity_scores)).flatten().tolist()
random_max_index = random.choice(max_indexes)
if entry.answer_pool[random_max_index] in entry.correct_answer:
correct += 1
total += 1
# print ("Intersection kernel (", ngram_range_min, ngram_range_max, normalize, ") =", 1.0* correct / total, flush=True)
return 1.0 * correct / total
class PresenceStringKernel(StringKernel):
def __init__(self):
super().__init__()
@staticmethod
def compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters=None):
if clusters != None and len(clusters) > 0:
return PresenceStringKernel.compute_kernel_two_strings_clusters2(string1, string2, ngram_range_min, ngram_range_max, clusters)
ngrams = {}
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index+d]
ngrams[ngram] = 1
kernel = 0
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index+d]
if (ngram in ngrams):
kernel += 1
ngrams.pop(ngram)
return 1.0*kernel
@staticmethod
def compute_kernel_two_strings_clusters(string1, string2, ngram_range_min, ngram_range_max, clusters):
ngrams = {}
ngrams_in_clusters = {}
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index + d]
if ngram not in ngrams_in_clusters:
found = False
for cluster in clusters:
if ngram in cluster:
found = True
break
ngrams_in_clusters[ngram] = found
if ngrams_in_clusters[ngram] == False:
if ngram not in ngrams:
ngrams[ngram] = 1
else:
ngrams[ngram] = ngrams[ngram] + 1
kernel = 0
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index + d]
if (ngram in ngrams):
kernel += 1
ngrams.pop(ngram)
d1 = {}
d2 = {}
for cl, _ in enumerate(clusters):
d1[cl] = 0
d2[cl] = 0
for cl_index, cl in enumerate(clusters):
for ngram in cl:
if len(ngram) >= ngram_range_min and len(ngram) <= ngram_range_max:
d1[cl_index] = d1[cl_index] + string1.count(ngram)
d2[cl_index] = d2[cl_index] + string2.count(ngram)
for cl, _ in enumerate(clusters):
if d1[cl] * d2[cl] == 0:
continue
kernel += 1
return kernel
@staticmethod
def compute_kernel_two_strings_clusters2(string1, string2, ngram_range_min, ngram_range_max, clusters):
index = 0
clusters_dict = {}
for cluster in clusters:
for ngram in cluster:
clusters_dict[ngram] = index
index += 1
s1_ngram = Counter()
s2_ngram = Counter()
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index + d]
if ngram not in clusters_dict:
clusters_dict[ngram] = index
index += 1
s1_ngram[clusters_dict[ngram]] += 1
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max + 1):
if char_index + d <= len(string2):
ngram = string2[char_index:char_index + d]
if ngram not in clusters_dict:
clusters_dict[ngram] = index
index += 1
s2_ngram[clusters_dict[ngram]] += 1
kernel = 0
for c in s1_ngram:
if s1_ngram[c] * s2_ngram[c] >= 1:
kernel += 1
return kernel
@staticmethod
def compute_kernel_string_listofstrings(string1, strings, ngram_range_min, ngram_range_max, normalize=False, clusters=None):
kernels = []
for string2 in strings:
pr = PresenceStringKernel.compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters)
if normalize == True:
i = PresenceStringKernel.compute_kernel_two_strings(string1, string1, ngram_range_min, ngram_range_max, clusters)
j = PresenceStringKernel.compute_kernel_two_strings(string2, string2, ngram_range_min, ngram_range_max, clusters)
if i == 0.0 or j == 0.0:
kernels.append(0.0)
else:
kernels.append(pr / math.sqrt(i*j))
elif normalize == False:
kernels.append(pr)
kernels = np.array(kernels)
return kernels
@staticmethod
def compute_kernel_listofstrings(strings, ngram_range_min, ngram_range_max, normalize=False):
kernels = []
for string1 in strings:
kernels.append(PresenceStringKernel.compute_kernel_string_listofstrings(string1, strings, ngram_range_min, ngram_range_max))
if normalize == True:
for i, aux in enumerate(kernels):
for j, _ in enumerate(aux):
print (i,j, kernels[i][j], kernels[i][i], kernels[j][j], math.sqrt(kernels[i][i] * kernels[j][j]))
kernels[i][j] = kernels[i][j] / math.sqrt(kernels[i][i] * kernels[j][j])
print (kernels[i][j])
kernels = np.array(kernels)
return kernels
@staticmethod
def run(dataset, ngram_range_min, ngram_range_max, normalize=False, clusters=None):
correct = 0
total = 0
for entry_index, entry in enumerate(dataset.data):
if len(entry.answer_pool) == 0:
total += 1
continue
question = entry.question
similarity_scores = PresenceStringKernel.compute_kernel_string_listofstrings(question, entry.answer_pool, ngram_range_min, ngram_range_max, normalize, clusters)
max_indexes = np.argwhere(similarity_scores == np.max(similarity_scores)).flatten().tolist()
random_max_index = random.choice(max_indexes)
if entry.answer_pool[random_max_index] in entry.correct_answer:
correct += 1
total += 1
# print("Presence kernel (", ngram_range_min, ngram_range_max, normalize, ") =", 1.0 * correct / total, flush=True)
return 1.0* correct / total
class SpectrumStringKernel(StringKernel):
def __init__(self):
super().__init__()
@staticmethod
def compute_kernel_two_strings(string1, string2, ngram_range_min, ngram_range_max, clusters=None):
if clusters != None and len(clusters) > 0:
return SpectrumStringKernel.compute_kernel_two_strings_clusters2(string1, string2, ngram_range_min, ngram_range_max, clusters)
ngrams = {}
for char_index, char in enumerate(string1):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index + d <= len(string1):
ngram = string1[char_index:char_index+d]
if ngram not in ngrams:
ngrams[ngram] = 1
else:
ngrams[ngram] = ngrams[ngram] + 1
kernel = 0
for char_index, char in enumerate(string2):
for d in range(ngram_range_min, ngram_range_max+1):
if char_index | |
<gh_stars>0
import ply.lex as lex
import ply.yacc as yacc
import sys
from typeValidation import validType, isBool
from exec import execute
NUM_TEMP_VARIABLES = 50
quadruplets = []
quadrupletIndex = 1
operandsStack = []
operatorsStack = []
typesStack = []
jumpsStack = []
ifsStack = []
dosStack = []
exitsStack = []
readWriteVars = []
available = []
def peek(list):
if len(list) == 0:
return None
return list[len(list) - 1]
for i in range(NUM_TEMP_VARIABLES):
available.append('#' + str(i))
symbolsTableIndex = NUM_TEMP_VARIABLES
symbols = {}
tokens = [
'id',
'semicolon',
'openBracket',
'closeBracket',
'openParentheses',
'closeParentheses',
'doubleEqual',
'notEqual',
'biggerOrEqualThan',
'smallerOrEqualThan',
'biggerThan',
'smallerThan',
'equal',
'coma',
'string',
'comment',
'plusSign',
'minusSign',
'multSign',
'divSign',
#Reserved Tokens
'program',
'end',
'read',
'write',
'if',
'then',
'else',
'elif',
'do',
'exit',
'integer',
'int',
'real',
'subroutine',
'call',
'or',
'and',
'not',
]
reserved = {
'program' : 'program',
'end' : 'end',
'read' : 'read',
'write' : 'write',
'if' : 'if',
'then' : 'then',
'else' : 'else',
'elif' : 'elif',
'do' : 'do',
'exit' : 'exit',
'integer' : 'integer',
'real' : 'real',
'subroutine' : 'subroutine',
'call' : 'call',
'or' : 'or',
'and' : 'and',
'not' : 'not',
}
t_semicolon = r';'
t_openBracket = r'\['
t_closeBracket = r'\]'
t_or = r'or'
t_and = r'and'
t_not = r'not'
t_openParentheses = r'\('
t_closeParentheses = r'\)'
t_doubleEqual = r'\=\='
t_notEqual = r'\!\='
t_biggerOrEqualThan = r'\>\='
t_smallerOrEqualThan = r'\<\='
t_biggerThan = r'\>'
t_smallerThan = r'\<'
t_equal = r'\='
t_coma = r','
t_comment = r'\$[a-zA-Z0-9_ ]*'
t_string = r'\'[a-zA-Z0-9 \.\?\:\t\r\n\f()\[\]\&\!\@\#\$\%\^\-\=\+\/\,]*\''
t_plusSign = r'\+'
t_minusSign = r'-'
t_multSign = r'\*'
t_divSign = r'\/'
t_ignore = ' \t\r\n\f\v'
def t_real(t):
r'\d+\.\d+'
t.value = float(t.value)
return t
def t_int(t):
r'\d+'
t.value = int(t.value)
return t
def t_id(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
if t.value in reserved:
t.type = reserved[ t.value ]
else:
t.type = 'id'
return t
def t_error(t):
print("Illegal character!", t)
t.lexer.skip(1)
lexer = lex.lex()
def p_P(p):
'''
P : program id VARIABLES ACTION_QUADRUPLE_GOTOMAIN SUBROUTINES ACTION_FILL_GOTO_MAIN STATEMENTS end program
'''
def addSymbol(name, type, dimensions):
global symbolsTableIndex
if name in symbols:
raise Exception(f'{name} already declared')
if type == "subroutine":
symbols[name] = {
"startDirection" : quadrupletIndex
}
else:
symbols[name] = {
"type" : type,
"value" : 0 if type == 'integer' else 0.0,
"direction" : "#" + str(symbolsTableIndex)
}
if dimensions != None:
if isinstance(dimensions, int):
symbols[name]["rows"] = dimensions
symbols[name]["reserved"] = "#" + str(int(symbols[name]["direction"][1:]) + dimensions)
symbols[name]["reserved2"] = "#" + str(int(symbols[name]["direction"][1:]) + dimensions + 1)
symbols[name]["usingReserved"] = False
symbolsTableIndex += dimensions + 3
elif len(dimensions) == 2:
symbols[name]["rows"] = dimensions[0]
symbols[name]["columns"] = dimensions[1]
symbols[name]["reserved"] = "#" + str(int(symbols[name]["direction"][1:]) + dimensions[0] * dimensions[1])
symbols[name]["reserved2"] = "#" + str(int(symbols[name]["direction"][1:]) + dimensions[0] * dimensions[1] + 1)
symbols[name]["usingReserved"] = False
symbolsTableIndex += dimensions[0] * dimensions[1] + 3
else:
symbolsTableIndex += 1
def p_variables(p):
'''
VARIABLES : TYPE id ARRAY semicolon VARIABLES
|
'''
if len(p) == 6:
addSymbol(p[2], p[1], p[3])
def p_type(p):
'''
TYPE : integer
| real
'''
p[0] = p[1]
def p_array(p):
'''
ARRAY : openBracket ARITEXP closeBracket openBracket ARITEXP closeBracket
| openBracket ARITEXP closeBracket
|
'''
if len(p) == 7:
p[0] = [operandsStack.pop(), operandsStack.pop()]
typesStack.pop()
typesStack.pop()
elif len(p) == 4:
p[0] = operandsStack.pop()
typesStack.pop()
def p_subroutines(p):
'''
SUBROUTINES : subroutine id ACTION_ADD_TO_TABLE STATEMENTS ACTION_QUADRUPLE_GOBACK end subroutine SUBROUTINES
|
'''
def p_statements(p):
'''
STATEMENTS : if LOGEXP ACTION_QUADRUPLE_EMPTY_JUMP then STATEMENTS ACTION_NEW_IF ACTION_QUADRUPLE_GOTO_ENDIF ELIF ELSE end if ACTION_FILL_GOTO_ENDIF STATEMENTS
| DO
| VAR equal ARITEXP ACTION_QUADRUPLET_SET STATEMENTS
| call id ACTION_QUADRUPLE_CALL STATEMENTS
| read READVAR ACTION_QUADRUPLE_READ STATEMENTS
| write WRITEVAR ACTION_QUADRUPLE_WRITE STATEMENTS
| exit ACTION_QUADRUPLE_EXITSSTACK STATEMENTS
| comment STATEMENTS
|
'''
def p_(p):
'''
DO : do ACTION_PUSH_FLAG_EXITSSTACK then ACTION_PUSH_DOSSTACK STATEMENTS ACTION_GOTO_DO end do ACTION_FILL_EXITS_JUMPS STATEMENTS
| do id equal ARITEXP ACTION_QUADRUPLET_SET coma ACTION_PUSH_DOSSTACK LOGEXP ACTION_QUADRUPLE_EMPTY_JUMP then STATEMENTS ACTION_QUADRUPLE_ADD_TO_COUNTER ACTION_GOTO_DO end do ACTION_FILL_JUMP STATEMENTS
'''
def p_elif(p):
'''
ELIF : elif ACTION_FILL_JUMP LOGEXP ACTION_QUADRUPLE_EMPTY_JUMP then STATEMENTS ACTION_QUADRUPLE_GOTO_ENDIF ELIF
|
'''
def p_else(p):
'''
ELSE : else ACTION_FILL_JUMP STATEMENTS ACTION_QUADRUPLE_GOTO_ENDIF
| ACTION_FILL_JUMP
'''
def p_logexp(p):
'''
LOGEXP : LOGEXP or ACTION_OR_LOGEXP ANDEXP ACTION_CREATE_QUADRUPLE_LOGEXP
| ANDEXP
'''
def p_andexp(p):
'''
ANDEXP : ANDEXP and ACTION_AND_ANDEXP COMPARISON ACTION_QUADRUPLE_ANDEXP
| COMPARISON
'''
def p_comparison(p):
'''
COMPARISON : openParentheses LOGEXP closeParentheses
| ARITEXP COMP ARITEXP ACTION_QUADRUPLE_COMP_COMPARISON
| not LOGEXP ACTION_QUADRUPLE_NOT_COMPARISON
'''
def p_comp(p):
'''
COMP : doubleEqual
| notEqual
| biggerOrEqualThan
| smallerOrEqualThan
| biggerThan
| smallerThan
'''
operatorsStack.append(p[1])
p[0] = p[1]
def p_readvar(p):
'''
READVAR : VAR READV
'''
readWriteVars.append(p[1])
def p_readv(p):
'''
READV : coma VAR READV
|
'''
if len(p) == 4:
readWriteVars.append(p[2])
def p_writevar(p):
'''
WRITEVAR : VAR WRITEV
| string WRITEV
'''
readWriteVars.append(p[1])
def p_writev(p):
'''
WRITEV : coma VAR WRITEV
| coma string WRITEV
|
'''
if len(p) == 4:
readWriteVars.append(p[2])
def p_aritexp(p):
'''
ARITEXP : MULDIV
| ARITEXP plusSign ACTION_PLUSSIGN_ARITEXP MULDIV ACTION_QUADRUPLET_ARITEXP
| ARITEXP minusSign ACTION_MINUSSIGN_ARITEXP MULDIV ACTION_QUADRUPLET_ARITEXP
'''
def p_muldiv(p):
'''
MULDIV : VALUE
| MULDIV multSign ACTION_MULTSIGN_MULDIV VALUE ACTION_QUADRUPLET_MULDIV
| MULDIV divSign ACTION_DIVSIGN_MULDIV VALUE ACTION_QUADRUPLET_MULDIV
'''
p[0] = p[1]
def p_value(p):
'''
VALUE : VAL
| openParentheses ARITEXP closeParentheses
'''
def p_val(p):
'''
VAL : VAR ACTION_VAR_VAL
| int ACTION_INT_VAL
| real ACTION_REAL_VAL
'''
def p_var(p):
'''
VAR : id ARRAY ACTION_QUADRUPLE_ARRAY
'''
if p[2] == None:
p[0] = p[1]
else:
p[0] = [p[1], p[3]]
def p_action_var_val(p):
"ACTION_VAR_VAL :"
if isinstance(p[-1], list):
# print(p[-1])
if symbols[p[-1][0]]["reserved"] == p[-1][1] or symbols[p[-1][0]]["reserved2"] == p[-1][1]:
operandsStack.append("*" + p[-1][1][1:])
else:
operandsStack.append(p[-1][1])
typesStack.append(symbols[p[-1][0]]["type"])
else:
operandsStack.append(symbols[p[-1]]["direction"])
typesStack.append(symbols[p[-1]]["type"])
def p_action_int_val(p):
"ACTION_INT_VAL :"
# print("int_val", p[-1])
operandsStack.append(p[-1])
typesStack.append("integer")
def p_action_real_val(p):
"ACTION_REAL_VAL :"
# print("real_val", p[-1])
operandsStack.append(p[-1])
typesStack.append("real")
def p_action_plussign_aritexp(p):
"ACTION_PLUSSIGN_ARITEXP :"
# print("plusSign", p[-1])
operatorsStack.append(p[-1])
def p_action_minussign_aritexp(p):
"ACTION_MINUSSIGN_ARITEXP :"
# print("minusSign", p[-1])
operatorsStack.append(p[-1])
def p_action_quadruplet_set(p):
"ACTION_QUADRUPLET_SET :"
operator = p[-2]
variable = p[-3]
variableType = ""
variableDirection = ""
if isinstance(variable, list):
variableType = symbols[variable[0]]["type"]
if symbols[variable[0]]["reserved"] == variable[1] or symbols[variable[0]]["reserved2"] == variable[1]:
variableDirection = "*" + variable[1][1:]
else:
variableDirection = variable[1]
else:
variableType = symbols[variable]["type"]
variableDirection = symbols[variable]["direction"]
value = operandsStack.pop()
valueType = typesStack.pop()
# print(p[-1])
validType(operator, variableType, valueType)
quadruplets.append(str(operator) + ' ' + str(value) + ' ' + str(variableDirection))
global quadrupletIndex
quadrupletIndex += 1
def p_action_multsign_muldiv(p):
"ACTION_MULTSIGN_MULDIV :"
operatorsStack.append(p[-1])
def p_action_divsign_muldiv(p):
"ACTION_DIVSIGN_MULDIV :"
operatorsStack.append(p[-1])
def addQuadruplet():
operator = operatorsStack.pop()
rightOperand = operandsStack.pop()
rightOperandType = typesStack.pop()
leftOperand = operandsStack.pop()
leftOperandType = typesStack.pop()
typesStack.append(validType(operator, leftOperandType, rightOperandType))
temp = available.pop(0)
quadruplets.append(str(operator) + ' ' + str(leftOperand) + ' ' + str(rightOperand) + ' ' + str(temp))
global quadrupletIndex
quadrupletIndex += 1
operandsStack.append(temp)
def p_action_quadruplet_aritexp(p):
"ACTION_QUADRUPLET_ARITEXP :"
operator = peek(operatorsStack)
# print("quadruplet aritexpt operator list", operatorsStack)
if operator == "+" or operator == "-":
addQuadruplet()
def p_action_quadruplet_muldiv(p):
"ACTION_QUADRUPLET_MULDIV :"
operator = peek(operatorsStack)
if operator == "*" or operator == "/":
addQuadruplet()
def p_action_or_logexp(p):
"ACTION_OR_LOGEXP :"
operatorsStack.append(p[-1])
def p_action_and_andexp(p):
"ACTION_AND_ANDEXP :"
operatorsStack.append(p[-1])
def p_action_create_quadruple_logexp(p):
"ACTION_CREATE_QUADRUPLE_LOGEXP :"
operator = peek(operatorsStack)
if operator == "or":
addQuadruplet()
def p_action_quadruple_andexp(p):
"ACTION_QUADRUPLE_ANDEXP :"
operator = peek(operatorsStack)
if operator == "and":
addQuadruplet()
def p_action_quadruple_comp_comparison(p):
"ACTION_QUADRUPLE_COMP_COMPARISON :"
addQuadruplet()
def p_action_quadruple_not_comparison(p):
"ACTION_QUADRUPLE_NOT_COMPARISON :"
value = operandsStack.pop()
valueType = typesStack.pop()
isBool(valueType)
temp = available.pop(0)
quadruplets.append("not " + str(value) + ' ' + str(temp))
global quadrupletIndex
quadrupletIndex += 1
def p_action_quadruple_empty_jump(p):
"ACTION_QUADRUPLE_EMPTY_JUMP :"
global quadrupletIndex
value = quadruplets[quadrupletIndex - 2].split()
# print("ACTION_QUADRUPLE_EMPTY_JUMP", value[len(value) - 1])
quadruplets.append("gotoF " + str(value[len(value) - 1]) + ' ')
jumpsStack.append(quadrupletIndex)
quadrupletIndex += 1
def fillJump(quadrupletsIndex, goto):
# print("fillJump", quadrupletsIndex, goto)
quadruplets[quadrupletsIndex] = quadruplets[quadrupletsIndex] + str(goto)
def p_action_fill_jump(p):
"ACTION_FILL_JUMP :"
# print("jumpsStack", jumpsStack)
fillJump(jumpsStack.pop()- 1, quadrupletIndex)
def p_action_quadruple_goto_endif(p):
"ACTION_QUADRUPLE_GOTO_ENDIF :"
global quadrupletIndex
ifsStack[len(ifsStack) - 1].append(quadrupletIndex)
# print(ifsStack)
quadruplets.append("goto ")
quadrupletIndex += 1
def p_new_if(p):
"ACTION_NEW_IF :"
ifsStack.append([])
def p_action_fill_goto_endif(p):
"ACTION_FILL_GOTO_ENDIF :"
for goto in ifsStack[len(ifsStack) - 1]:
fillJump(goto - 1, quadrupletIndex)
ifsStack.pop()
def p_action_push_dosstack(p):
"ACTION_PUSH_DOSSTACK :"
dosStack.append(quadrupletIndex)
def p_action_goto_do(p):
"ACTION_GOTO_DO :"
quadruplets.append("goto" + ' ' + str(dosStack.pop()))
global quadrupletIndex
quadrupletIndex += 1
def p_action_quadruple_add_to_counter(p):
"ACTION_QUADRUPLE_ADD_TO_COUNTER :"
quadruplets.append("+ 1 " + str(symbols[p[-10]]["direction"]) + ' ' + str(symbols[p[-10]]["direction"]))
global quadrupletIndex
quadrupletIndex += 1
def p_action_push_flag_exitsstack(p):
"ACTION_PUSH_FLAG_EXITSSTACK :"
exitsStack.append('-')
def p_action_quadruple_exitsstack(p):
"ACTION_QUADRUPLE_EXITSSTACK :"
quadruplets.append("goto ")
global quadrupletIndex
exitsStack.append(quadrupletIndex)
quadrupletIndex += 1
def p_action_fill_exits_jumps(p):
"ACTION_FILL_EXITS_JUMPS :"
index = exitsStack.pop()
while index != '-':
fillJump(index - 1, quadrupletIndex)
index = exitsStack.pop()
def p_action_quadruple_array(p):
"ACTION_QUADRUPLE_ARRAY :"
global quadrupletIndex
if p[-1] != None:
if "reserved" not in symbols[p[-2]] or (isinstance(p[-1], int) and "columns" in symbols[p[-2]]):
raise Exception(f"{p[-2]} is not an array or matrix")
if isinstance(p[-1], int):
p[0] = "#" + str(p[-1] + int(symbols[p[-2]]["direction"][1:]))
elif isinstance(p[-1], list):
quadruplets.append("* " + str(p[-1][0]) + " " + str(symbols[p[-2]]["columns"]) + " " + str(symbols[p[-2]]["reserved"]))
quadruplets.append("+ " + str(p[-1][1]) + " " + str(symbols[p[-2]]["reserved"]) + " " + str(symbols[p[-2]]["reserved"]))
quadruplets.append("+ " + str(symbols[p[-2]]["direction"][1:]) + " " + str(symbols[p[-2]]["reserved"]) + " " + str(symbols[p[-2]]["reserved"]))
quadrupletIndex += 3
p[0] = | |
<filename>FwXG/sophoslib.py
#!/usr/bin/env python
"""
Introduction:
Library to control a Sophos Firewall XG via API.
The idea here is construct a HTTP GET in XML form-based as mentioned on API Sophos link:
https://docs.sophos.com/nsg/sophos-firewall/18.0/API/index.html
Usage:
Declare user, pass and IP to connect a Firewall XG.
By deault, it use IP Address 172.16.16.16 and port 4444.
sophosxg('user','pass')
There are three mayor method group here: GET, SET, DEL.
set_xxx(arguments) : Method to set information on Firewall XG
get_xxx() : Method to obtain information from Firewall XG.
del_xxx(argument) : Method to delete information on Firewall XG
Examples:
from sophoslib import sophosxg
fw = sophosxg('apiadmin','SYNCORP_Passw0rd')
fw.set_iphost('Test1','5.5.5.5')
fw.get_iphost()
fw.del_iphost('Test1')
For more information in how to activate the API for Sophos XG Firewall, check:
https://support.sophos.com/support/s/article/KB-000038263?language=en_US
"""
import xml.etree.ElementTree as ET
import xmltodict
import copy
import requests
from json import loads, dumps
requests.packages.urllib3.disable_warnings()
__author__ = "<NAME>"
__copyright__ = "Copyleft 2020, The SYNCORP Project."
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__GITHUB__ = "https://github.com/kdemenx"
__webpage__ = "https://www.syncorpgroup.com"
class sophosxg(object):
"""
Parameters
----------
username : str
username with API profile permission
password : str
password with API profile permission
ip : str
IP Address or DNS of Firewall XG. Default: 172.16.16.16
port : str
TCP Port to connect to Sophos XG. Default: 4444
Examples:
fw = sophosxg('apiadmin','SYNCORP_Passw0rd')
fw = sophosxg('apiadmin','SYNCORP_Passw0rd','192.168.10.1')
fw = sophosxg('apiadmin','SYNCORP_Passw0rd','192.168.10.1','443')
"""
def __init__(self, username, password, ip='172.16.16.16', port='4444'):
self.username = username
self.password = password
self.apiurl = 'https://{0}:{1}/webconsole/APIController?reqxml='.format(
ip, port)
self.xml_auth = ET.Element('Request')
xml_login = ET.SubElement(self.xml_auth, 'Login')
ET.SubElement(xml_login, 'Username').text = self.username
ET.SubElement(xml_login, 'Password').text = self.password
##########################################################################
# Get subclasses
##########################################################################
def get_localserviceacl(self):
self.make_xml('Get', 'LocalServiceACL')
return self.send()
def get_adminsettings(self):
self.make_xml('Get', 'AdminSettings')
return self.send()
def get_services(self):
self.make_xml('Get', 'Services')
return self.send()
def get_iphost(self):
self.make_xml('Get', 'IPHost')
return self.send()
def get_iphostgroup(self):
self.make_xml('Get', 'IPHostGroup')
return self.send()
def get_network_interface(self):
self.make_xml('Get', 'Interface')
return self.send()
def get_network_vlan(self):
self.make_xml('Get', 'VLAN')
return self.send()
def get_network_lag(self):
self.make_xml('Get', 'LAG')
return self.send()
def get_network_bridge(self):
self.make_xml('Get', 'BridgePair')
return self.send()
def get_network_zone(self):
self.make_xml('Get', 'Zone')
return self.send()
def get_ips_policy(self):
self.make_xml('Get', 'IPSPolicy')
return self.send()
def get_firewallrule(self):
self.make_xml('Get', 'FirewallRule')
return self.send()
def get_routing_unicast(self):
self.make_xml('Get', 'UnicastRoute')
return self.send()
def get_sys_services(self):
self.make_xml('Get', 'SystemServices')
return self.send()
def get_sys_centralmgmt(self):
self.make_xml('Get', 'CentralManagement')
return self.send()
def get_sys_notification(self):
self.make_xml('Get', 'Notification')
return self.send()
def get_conf_log(self):
self.make_xml('Get', 'SyslogServers')
return self.send()
def get_custom(self,custom):
self.make_xml('Get', custom)
return self.send()
##########################################################################
# Set subclasses
##########################################################################
def set_iphost(self, name, ipaddress, subnet='', hosttype='IP', ipfamily='IPv4'):
"""
GUI path:
SYSTEM - Host and Services - IP Host
Parameters
----------
name : str
name of object host
ipaddress : str
It usages depend of hosttype variable:
Hostype = IP (Default)
Value = IP Address
Hostype = Network
Value = IP Network.
Note: Please keep in mind the CIDR format here.
Note: As elaborate of this code (Version 1800.2).
Note: There is no verification by Sophos XG for CIDR format via API (GUI does)
Hostype = IP Range
Value = Start IP Address
Note: This value is usage in conjunction with variable 'subnet' as End IP Address.
Hostype = IP List
Value = List Of IP Addresses
Note: This is a unique string with all ip address with
NO SPACES, divided by commas (,). See examples.
hosttype : str
There are 4 values here:
- IP, Network, IPRange, IPList
Its values modify ipaddress and subnet variable usages. See more info on variables.
ipfamily : str
Declare IP Family: IPv4 or IPv6. Default: IPv4
----------
Examples:
fw.set_iphost('SYNCORP1','5.5.5.5')
fw.set_iphost('SYNCORP2','172.16.58.3','255.255.255.128','Network')
fw.set_iphost('SYNCORP3','192.168.10.10','192.168.10.253','IPRange')
fw.set_iphost('SYNCORP4','4.4.4.4,5.5.5.5,6.6.6.6',hosttype='IPList')
"""
xml_child = self.make_xml('Set', 'IPHost')
ET.SubElement(xml_child, 'Name').text = name
ET.SubElement(xml_child, 'IPFamily').text = ipfamily
ET.SubElement(xml_child, 'HostType').text = hosttype
if hosttype == 'IP':
ET.SubElement(xml_child, 'IPAddress').text = ipaddress
elif hosttype == 'Network':
ET.SubElement(xml_child, 'IPAddress').text = ipaddress
ET.SubElement(xml_child, 'Subnet').text = subnet
elif hosttype == 'IPRange':
ET.SubElement(xml_child, 'StartIPAddress').text = ipaddress
ET.SubElement(xml_child, 'EndIPAddress').text = subnet
elif hosttype == 'IPList':
ET.SubElement(xml_child, 'ListOfIPAddresses').text = ipaddress
# print(ET.tostring(self.xml_request).decode('utf-8'))
return self.send()
def set_iphostgroup(self, name, hosts, description='', ipfamily='IPv4'):
"""
GUI path:
SYSTEM - Host and Services - IP Host group
Parameters
----------
name : str
name of object host group
hosts : list
group different host in Python list. Please make sure that
host was made previously by method set_iphost(arguments)
description : str
Description in string format.
ipfamily : str
Declare IP Family: IPv4 or IPv6. Default: IPv4
----------
Examples:
fw.set_iphostgroup('GROUP1',['SYNCORP1','SYNCORP2','SYNCORP3'])
fw.get_iphostgroup()
fw.del_iphostgroup('GROUP1')
"""
xml_child = self.make_xml('Set', 'IPHostGroup')
ET.SubElement(xml_child, 'Name').text = name
ET.SubElement(xml_child, 'IPFamily').text = ipfamily
ET.SubElement(xml_child, 'Description').text = description
xml_child2 = ET.SubElement(xml_child, 'HostList')
for i in hosts:
ET.SubElement(xml_child2, 'Host').text = i
# print(ET.tostring(self.xml_request).decode('utf-8'))
return self.send()
def set_network_vlan(self, interface, vlan, zone, ipaddress, netmask, ipv4configuration='Enable', ipv4assignment='Static'):
"""
GUI path:
CONFIGURE - Network - Interfaces - VLAN
Parameters
----------
interface : str
Name of Physical or Virtual interface to create a VLAN.
vlan : str
Vlan value number.
zone : str
Security Zone that you want to assign VLAN.
ipaddress : str
IP Address to assign subinterface VLAN.
netmask : str
Network mask for IP Address variable.
ipv4configuration : str
Active IPv4 configuration. Default = 'Enable'.
It's required at least one IP Family (IPv4/IPv6)
ipv4assignment : str
Only 'Static', 'PPPoe', 'DHCP' are allowed.
Default = 'Static'
----------
Examples:
fw.set_network_vlan('PortD','1004','LAN','1.1.1.3','255.255.255.255')
"""
xml_child = self.make_xml('Set', 'VLAN')
ET.SubElement(xml_child, 'Name').text = interface + '.' + vlan
ET.SubElement(xml_child, 'Hardware').text = interface + '.' + vlan
ET.SubElement(xml_child, 'Interface').text = interface
ET.SubElement(xml_child, 'Zone').text = zone
ET.SubElement(xml_child, 'VLANID').text = vlan
ET.SubElement(xml_child, 'IPv4Configuration').text = ipv4configuration
ET.SubElement(xml_child, 'IPv4Assignment').text = ipv4assignment
ET.SubElement(xml_child, 'IPAddress').text = ipaddress
ET.SubElement(xml_child, 'Netmask').text = netmask
return self.send()
def set_network_lag(self, name, interfaces, zone, ipaddress, netmask,
mode='802.3ad(LACP)', ipassignment='Static', ipv4configuration='Enable',
xmithashpolicy='Layer2', mtu='1500', mac='Default'):
"""
GUI path:
CONFIGURE - Network - Interfaces - LAG
Parameters
----------
name : str
Name for LAG Virtual interface.
interfaces : list
group different physical interfaces in Python list.
zone : str
Security Zone that you want to assign LAG.
ipaddress : str
IP Address to assign LAG interface.
netmask : str
Network mask for IP Address variable.
mode : str
modes available: '802.3ad(LACP)' 'ActiveBackup'
Default = '802.3ad(LACP)'
xmithashpolicy : str
Load balancing method available:
'Layer2', 'Layer2+3', 'Layer3+4'.
Default = 'Layer2'
mtu : str
Specify Maximum Transmission Unit(MTU)value.
Range 576 to 9000 is allowed.
Default = '1500'
mac : str
Select to use default MAC Address.
Maximum characters allowed are 17.
Default = 'Default'
ipv4configuration : str
Active IPv4 configuration. Default = 'Enable'.
It's required at least one IP Family (IPv4/IPv6)
ipv4assignment : str
Only 'Static', 'DHCP' are allowed.
Default = 'Static'
----------
Examples:
portsgroup= ['PortF','PortG','PortH']
fw.set_network_lag('LAG1',portsgroup,'LAN','192.168.127.12','255.255.255.255')
fw.set_network_lag('LAG1',portsgroup,'LAN','192.168.127.12','255.255.255.255',mode='ActiveBackup')
"""
xml_child = self.make_xml('Set', 'LAG')
ET.SubElement(xml_child, 'Name').text = name
ET.SubElement(xml_child, 'Hardware').text = name
xml_child2 = ET.SubElement(xml_child, 'MemberInterface')
for i in interfaces:
ET.SubElement(xml_child2, 'Interface').text = i
ET.SubElement(xml_child, 'Mode').text = mode
ET.SubElement(xml_child, 'NetworkZone').text = zone
ET.SubElement(xml_child, 'IPv4Configuration').text = ipv4configuration
ET.SubElement(xml_child, 'IPAssignment').text = ipassignment
ET.SubElement(xml_child, 'IPv4Address').text = ipaddress
ET.SubElement(xml_child, 'Netmask').text = netmask
ET.SubElement(xml_child, 'MTU').text = mtu
ET.SubElement(xml_child, 'MACAddress').text = mac
if mode == '802.3ad(LACP)':
ET.SubElement(xml_child, 'XmitHashPolicy').text = xmithashpolicy
# ET.SubElement(xml_child,'InterfaceSpeed').text = 'Auto Negotiate'
# xml_child2= ET.SubElement(xml_child,'MSS')
# ET.SubElement(xml_child2,'OverrideMSS').text = 'Enable'
# ET.SubElement(xml_child2,'MSSValue').text = '1460'
return self.send()
def set_network_bridge(self, name, interfaces, ipaddress='', netmask='', gw='',
routingonbridge='Disable', ipassignment='Static',
ipv4configuration='Enable', mtu='1500'):
"""
GUI path:
CONFIGURE - Network - Interfaces - Bridge
Parameters
----------
name : str
Name for Bridge Virtual interface.
interfaces : dict
group different physical or Virtual interfaces in Python dictionary.
It require at least 2 Interfaces/Zone.
Keys dictionary represent Port.
Values dictionary represent Zone.
Example: ['PortA': 'LAN', 'PortB': 'WAN']
routingonbridge : str
Used to enable routing on bridge-pair.
Default = 'Disable'
ipaddress : str (Optional)
IP Address to assign Bridge interface.
Default = ''
netmask : str (Optional)
Network mask for IP Address variable.
Default = ''
gw : str (Optional)
Specify Gateway IP Address for IPv4 Configuration.
Default = ''
mtu : str
Specify Maximum Transmission Unit(MTU)value.
Range 576 to 9000 is allowed.
Default = '1500'
ipv4configuration : str
Active IPv4 configuration. Default = 'Enable'.
It's required at least one IP Family (IPv4/IPv6)
ipv4assignment : str
Only 'Static', 'DHCP' are allowed.
Default = 'Static'
----------
Examples:
bridge1={ 'PortG': 'LAN', 'PortH': 'WAN' }
bridge2={ 'PortE': 'DMZ', 'PortF': 'LAN' }
fw.set_network_bridge('Bridge100',bridge1,'3.3.3.3','255.255.255.0','172.16.58.3')
fw.set_network_bridge('Bridge101',bridge2)
"""
xml_child = self.make_xml('Set', 'BridgePair')
ET.SubElement(xml_child, 'Name').text = name
ET.SubElement(xml_child, 'Hardware').text = name
ET.SubElement(xml_child, 'Description').text = str(
len(interfaces.keys())) + ' Bridges'
ET.SubElement(xml_child, 'RoutingOnBridgePair').text = routingonbridge
xml_child2 = ET.SubElement(xml_child, 'BridgeMembers')
| |
node selection base on in/out degree.
Args:
filter_name (str): Name for new filter.
criterion (list): A two-element vector of numbers, example: [1,5].
predicate (str): BETWEEN (default) or IS_NOT_BETWEEN
edgeType (str): Type of edges to consider in degree count: ANY (default), UNDIRECTED, INCOMING, OUTGOING, DIRECTED
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
apply (bool): True to execute filter immediately; False to define filter but not execute it
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes; None if filter wasn't applied
Raises:
CyError: if criterion is not list of two values or filter couldn't be applied
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> create_degree_filter('myFilter', [2, 5]) # filter on any nodes having between 2 and 5 edges
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_degree_filter('myFilter', [2, 5], predicate='IS_NOT_BETWEEN') # filter for edges < 2 or > 5
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], edge_type='INCOMING') # filter for between 2 and 5 incoming edges
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], hide=True) # filter for between 2 and 5 edges, and hide them
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_column_filter('myFilter', [2, 5], apply=False) # define filter for between 2 and 5 edges, and hide them
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
"""
networks.set_current_network(network, base_url=base_url)
if not isinstance(criterion, list) or len(criterion) != 2:
raise CyError(f'Criterion "{criterion}" must be a list of two numeric values, e.g., [0.5, 2.0]')
cmd_json = {'id': 'DegreeFilter',
'parameters': {'criterion': criterion, 'predicate': predicate, 'edgeType': edge_type}}
cmd_body = {'name': filter_name, 'json': json.dumps(cmd_json)}
return _create_filter_and_finish('commands/filter/create', cmd_body, hide, apply, network, base_url)
@cy_log
def create_composite_filter(filter_name, filter_list, type='ALL', hide=False, network=None, base_url=DEFAULT_BASE_URL, *, apply=True):
"""Combine filters to control node and edge selection based on previously created filters.
Args:
filter_name (str): Name for new filter.
filter_list (list): List of names of filters to combine.
type (str): Type of composition, requiring ALL (default) or ANY filters to pass for final node and edge selection.
hide (bool): Whether to hide filtered out nodes and edges. Default is FALSE.
Ignored if all nodes or edges are filtered out. This is an alternative to filtering for node and edge selection.
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
apply (bool): True to execute filter immediately; False to define filter but not execute it
Returns:
dict: {'nodes': <node list>, 'edges': <edge list>} returns list of nodes and edges selected after filter executes; None if filter wasn't applied
Raises:
CyError: if filter list contains less than one filter or has filters that don't exist, or filter couldn't be applied
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'])
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'column filter 10x'], type='ANY', network="My network")
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': [{'YPR119W (pd) YMR043W', 'YDR412W (pp) YPR119W'}]}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'], hide=True)
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
>>> create_composite_filter('New Filter', ['degree filter 1x', 'degree filter 2x'], apply=False)
{'nodes': ['YDR395W', 'YLR362W', 'YPL248C', 'YGL035C'], 'edges': None}
"""
networks.set_current_network(network, base_url=base_url)
if len(filter_list) < 2:
raise CyError(f'Filter list "{filter_list}" is invalid. Must provide a list of two or more filter names, e.g., ["filter1", "filter2"]')
def fetch(x):
return commands.commands_post('filter get name="' + x + '"', base_url=base_url)
def extract(y):
return y[0]['transformers'][0] if y else None
trans_list = [extract(fetch(filter)) for filter in filter_list]
if None in trans_list:
raise CyError('Filter name "%s" does not exist' % (filter_list[trans_list.index(None)]))
cmd_json = {'id': 'CompositeFilter', 'parameters': {'type': type}, 'transformers': trans_list}
cmd_body = {'name': filter_name, 'json': json.dumps(cmd_json)}
return _create_filter_and_finish('commands/filter/create', cmd_body, hide, apply, network, base_url)
@cy_log
def get_filter_list(base_url=DEFAULT_BASE_URL):
"""Retrieve list of named filters in current session
Args:
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
list: returns list of available filter names
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_filter_list()
['degree filter 1x', 'degree filter 2x']
"""
res = commands.commands_post('filter list', base_url=base_url)
return res
@cy_log
def export_filters(filename='filters.json', base_url=DEFAULT_BASE_URL, *, overwrite_file=True):
"""Saves filters to file in JSON format.
Args:
filename (str): Full path or path relavtive to current working directory, in addition to the name of the file. Default is "filters.json".
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
overwrite_file (bool): False allows an error to be generated if the file already exists;
True allows Cytoscape to overwrite it without asking
Returns:
list: []
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> export_filters() # Saves all filters in file 'filters.json'
[]
>>> export_filters('test.json') # Saves all filters in file 'test.json'
[]
>>> export_filters('test') # Saves all filters in file 'test.json'
[]
>>> export_filters('test', overwrite_file=False) # Save filters only if test.json doesn't already exist
[]
"""
ext = '.json'
if re.search(ext + '$', filename) is None: filename += ext
file_info = sandbox.sandbox_get_file_info(filename, base_url=base_url)
if len(file_info['modifiedTime']) and file_info['isFile']:
if overwrite_file:
narrate('This file has been overwritten.')
else:
raise CyError(f'File "{filename}" already exists ... filters not saved.')
full_filename = file_info['filePath']
res = commands.commands_get(f'filter export file="{full_filename}"', base_url=base_url)
return res
@cy_log
def import_filters(filename, base_url=DEFAULT_BASE_URL):
"""Loads filters from a file in JSON format.
Adds filters to whatever filters already exist, and renames filters where names already exist. Also executes
each filter.
Note:
To load a filter file from cloud storage, use the file's URL and the ``sandbox_url_to`` function to download
the file to a sandbox, and then use ``import_filters`` to load it from there.
Args:
filename (str): Path and name of the filters file to load.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
list: []
Raises:
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> import_filters('test.json') # Fetches filters in file 'test.json'
[]
>>> import_filters('test') # Fetches filters in file 'test'
[]
"""
res = commands.commands_get(f'filter import file="{get_abs_sandbox_path(filename)}"', base_url=base_url)
time.sleep(
CATCHUP_FILTER_SECS) # give the filters time to finish executing ... this race condition is a Cytoscape bug
return res
def _create_filter_and_finish(cmd, cmd_body, hide, apply, network, base_url):
AUTO_APPLY_THRESHOLD = 100000
if check_supported_versions(cytoscape='3.9') is None:
cmd_body['apply'] = apply
res = commands.cyrest_post(cmd, body=cmd_body, base_url=base_url)
else:
# Before Cytoscape 3.9, the filter was automatically applied when it was created unless
# the total of nodes and edges was 100,000 or more. So, we create the filter and then
# consider applying it if it wasn't automatically applied already.
res = commands.cyrest_post(cmd, body=cmd_body, base_url=base_url)
if networks.get_node_count(network=network, base_url=base_url) \
+ networks.get_edge_count(network=network, base_url=base_url) > AUTO_APPLY_THRESHOLD:
if apply:
show_error('Warning -- Cytoscape version pre-3.9 in | |
<reponame>adamrfox/rbk_nas_report
#!/usr/bin/python
from __future__ import print_function
import rubrik_cdm
import sys
import os
import getopt
import getpass
import urllib3
urllib3.disable_warnings()
import datetime
import pytz
import time
import threading
try:
import queue
except ImportError:
import Queue as queue
import shutil
from random import randrange
from pprint import pprint
def python_input(message):
if int(sys.version[0]) > 2:
val = input(message)
else:
val = raw_input(message)
return (val)
def walk_tree(rubrik, id, inc_date, delim, path, parent, files_to_restore, outfile):
offset = 0
done = False
file_count = 0
if delim == "\\" and path == "/":
job_path = path.split(path)
else:
job_path = path.split(delim)
job_path_s = '_'.join(job_path)
job_path_s = job_path_s.replace(':', '_')
job_id = str(outfile) + str(job_path_s) + '.part'
fh = open(job_id, "w")
while not done:
job_ptr = randrange(len(rubrik_cluster))
params = {"path": path, "offset": offset}
if offset == 0:
if VERBOSE:
print("Starting job " + path + " on " + rubrik_cluster[job_ptr]['name'])
else:
print(' . ', end='')
rbk_walk = rubrik_cluster[job_ptr]['session'].get('v1', '/fileset/snapshot/' + str(id) + '/browse',
params=params, timeout=timeout)
file_count = 0
for dir_ent in rbk_walk['data']:
offset += 1
file_count += 1
if dir_ent == parent:
return
if dir_ent['fileMode'] == "file":
file_date_dt = datetime.datetime.strptime(dir_ent['lastModified'][:-5], "%Y-%m-%dT%H:%M:%S")
file_date_epoch = (file_date_dt - datetime.datetime(1970, 1, 1)).total_seconds()
# dprint("FILE: " + str(dir_ent['filename'] + " : " + str(file_date_epoch) + " : " + str(inc_date)))
if file_date_epoch > inc_date:
if path != delim:
# files_to_restore.append(path + delim + dir_ent['filename'])
oprint(path + delim + str(dir_ent['filename']) + "," + str(dir_ent['size']), fh)
else:
# files_to_restore.append(path + dir_ent['filename'])
oprint(path + str(dir_ent['filename']) + "," + str(dir_ent['size']), fh)
elif dir_ent['fileMode'] == "directory" or dir_ent['fileMode'] == "drive":
if dir_ent['fileMode'] == "drive":
new_path = dir_ent['filename']
elif delim == "/":
if path == "/":
new_path = "/" + dir_ent['path']
else:
new_path = path + "/" + dir_ent['path']
else:
if path == "\\":
new_path = "\\" + dir_ent['path']
else:
new_path = path + "\\" + dir_ent['path']
# files_to_restore = walk_tree(rubrik, id, inc_date, delim, new_path, dir_ent, files_to_restore)
job_queue.put(threading.Thread(name=new_path, target=walk_tree, args=(
rubrik, id, inc_date, delim, new_path, dir_ent, files_to_restore, outfile)))
if not rbk_walk['hasMore']:
done = True
if file_count == 200000:
large_trees.put(path)
fh.close()
parts.put(job_id)
def generate_report(parts, outfile, LOG_FORMAT):
if LOG_FORMAT == "log":
ofh = open(outfile + '.' + LOG_FORMAT, 'wb')
with open(outfile + '.head', 'rb') as hfh:
shutil.copyfileobj(hfh, ofh)
hfh.close()
ofh.close()
else:
ofh = open(outfile + '.' + LOG_FORMAT, 'w')
ofh.close()
while True:
if parts.empty():
time.sleep(10)
if exit_event.is_set():
break
else:
continue
name = parts.get()
dprint("CONSOLIDATING " + name)
with open(name, 'rb') as rfh:
with open(outfile + '.' + LOG_FORMAT, 'ab') as wfh:
shutil.copyfileobj(rfh, wfh)
rfh.close()
wfh.close()
if not DEBUG:
dprint("Deleting " + name)
os.remove(name)
def get_job_time(snap_list, id):
time = ""
dprint("JOB=" + id)
for snap in snap_list:
if snap[0] == id:
time = snap[1]
break
return (time)
def dprint(message):
if DEBUG:
dfh = open(debug_log, 'a')
dfh.write(message + "\n")
dfh.close()
return ()
def oprint(message, fh):
if not fh:
print(message)
else:
fh.write(message + "\n")
def log_clean(name):
files = os.listdir('.')
for f in files:
if f.startswith(name) and (f.endswith('.part') or f.endswith('.head')):
os.remove(f)
def get_rubrik_nodes(rubrik, user, password, token):
node_list = []
cluster_network = rubrik.get('internal', '/cluster/me/network_interface')
for n in cluster_network['data']:
if n['interfaceType'] == "Management":
if token:
try:
rbk_session = rubrik_cdm.Connect(n['ipAddresses'][0], api_token=token)
except Exception as e:
sys.stderr.write("Error on " + n['ipAddresses'][0] + ": " + str(e) + ". Skipping\n")
continue
else:
try:
rbk_session = rubrik_cdm.Connect(n['ipAddresses'][0], user, password)
except Exception as e:
sys.stderr.write("Error on " + n['ipAddresses'][0] + ": " + str(e) + ". Skipping\n")
continue
try:
node_list.append({'session': rbk_session, 'name': n['nodeName']})
except KeyError:
node_list.append({'session': rbk_session, 'name': n['node']})
return (node_list)
def log_job_activity(rubrik, outfile, fs_id, snap_data):
ev_series_id = ""
event_series_id_save = ""
dprint(str(snap_data))
snap_time_dt = datetime.datetime.strptime(snap_data[1], "%Y-%m-%d %H:%M:%S")
snap_time_epoch = (snap_time_dt - datetime.datetime(1970, 1, 1)).total_seconds()
dprint(str(snap_time_epoch))
events = rubrik.get('v1', '/event/latest?limit=1024&event_type=Backup&object_ids=' + str(fs_id), timeout=timeout)
for ev in events['data']:
if ev['latestEvent']['eventType'] != "Backup" or ev['eventSeriesStatus'] not in (
'Success', 'Failure', 'SuccessWithWarnings'):
continue
ev_dt = datetime.datetime.strptime(ev['latestEvent']['time'][:-5], "%Y-%m-%dT%H:%M:%S")
ev_dt_epoch = (ev_dt - datetime.datetime(1970, 1, 1)).total_seconds()
dprint("EV_DT: " + str(ev_dt_epoch))
if ev_dt_epoch < snap_time_epoch:
ev_series_id = event_series_id_save
dprint("selected")
break
else:
event_series_id_save = ev['latestEvent']['eventSeriesId']
if not ev_series_id:
ev_series_id = event_series_id_save
dprint("EVENT_SERIES_ID: " + ev_series_id)
if ev_series_id:
event_series = rubrik.get('v1', '/event_series/' + str(ev_series_id), timeout=timeout)
hfp = open(outfile + '.head', "w")
hfp.write('Backup:' + event_series['location'] + '\n')
hfp.write('Started: ' + event_series['startTime'][:-5] + '\n')
hfp.write('Ended: ' + event_series['endTime'][:-5] + '\n')
hfp.write('Duration: ' + event_series['duration'] + '\n')
hfp.write('Logical Size: ' + str(event_series['logicalSize']) + '\n')
hfp.write('Throughput: ' + str(event_series['throughput']) + ' Bps\n\n')
for e in reversed(event_series['eventDetailList']):
e_dt = datetime.datetime.strptime(e['time'][:-5], "%Y-%m-%dT%H:%M:%S")
e_dt_s = datetime.datetime.strftime(e_dt, "%Y-%m-%d %H:%M:%S")
message_list = e['eventInfo'].split('"')
message = message_list[3].replace('\\\\', '\\')
hfp.write(e_dt_s + ' ' + e['eventSeverity'] + ' ' + message + '\n')
else:
hfp = open(outfile + '.head', "w")
hfp.write("No job activity log found.")
hfp.write('\n')
hfp.close()
def job_queue_length(thread_list):
list_check = []
for thread in threading.enumerate():
if thread.name in thread_list:
list_check.append(thread.name)
# dprint("LIST_CHECK = " + str(list_check))
dprint("JQD returns " + str(len(list_check)))
return(len(list_check))
def usage():
sys.stderr.write(
"Usage: rbk_nas_report.py [-hDrpasl] [-b backup] [-f fileset] [-c creds] [-t token] [-d date] [-m max_threads | -M thread_factor] -o outfile rubrik\n")
sys.stderr.write("-h | --help : Prints Usage\n")
sys.stderr.write("-D | --debug : Debug mode. Prints more information\n")
sys.stderr.write("-o | --output : Specify an output file. Don't include an extention. [REQUIRED]\n")
sys.stderr.write("-b | --backup : Specify backup. Format is server:share for NAS, host for physical\n")
sys.stderr.write("-f | --fileset : Specify a fileset for the share\n")
sys.stderr.write("-c | --creds : Specify cluster credentials. Not secure. Format is user:password\n")
sys.stderr.write("-t | --token : Use an API token instead of credentials\n")
sys.stderr.write("-M | --thread_factor: Specify the number of threads per node [def:10]\n")
sys.stderr.write("-m | --max_threads: Specify a maximum number of threads. Overrides thread factor.\n")
sys.stderr.write("-p | --physical : Specify a physical fileset backup [default: NAS]\n")
sys.stderr.write("-s | --single_node : Only use one node of the Rubrik clsuter for API calls\n")
sys.stderr.write("-l | --latest : Use the latest backup of the fileset\n")
sys.stderr.write("-d | --date : Specify the exact date of the desired backup\n")
sys.stderr.write(
"-a | --all : Report all files in backup. Default is only files backed up in that specific backkup\n")
sys.stderr.write("rubrik : Name or IP of the Rubrik Cluster\n")
exit(0)
if __name__ == "__main__":
backup = ""
rubrik = ""
user = ""
password = ""
fileset = ""
date = ""
latest = False
share_id = ""
restore_job = []
physical = False
snap_list = []
restore_location = ""
restore_share_id = ""
restore_host_id = ""
token = ""
DEBUG = False
VERBOSE = False
REPORT_ONLY = True
ALL_FILES = False
outfile = ""
ofh = ""
timeout = 360
rubrik_cluster = []
thread_list = []
job_queue = queue.Queue()
max_threads = 0
thread_factor = 10
debug_log = "debug_log.txt"
large_trees = queue.Queue()
parts = queue.Queue()
SINGLE_NODE = False
LOG_FORMAT = "csv"
optlist, args = getopt.getopt(sys.argv[1:], 'ab:f:c:d:hDst:o:m:M:vplsF:', ["backup=", "fileset=", "creds=", "date=",
"help", "debug", "token=", "output=",
"max_threads=",
"--physical", "--all", "--latest",
'--single_node'])
for opt, a in optlist:
if opt in ("-b", "--backup"):
backup = a
if opt in ("-f", "--fileset"):
fileset = a
if opt in ("-c", "--creds"):
user, password = a.split(":")
if opt in ("-h", "--help"):
usage()
if opt in ("-d", "--date"):
date = a
date_dt = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
date_dt_s = datetime.datetime.strftime(date_dt, "%Y-%m-%d %H:%M:%S")
if opt in ("-D", "--debug"):
VERBOSE = True
DEBUG = True
dfh = open(debug_log, "w")
dfh.close()
if opt in ("-t", "--token"):
token = a
if opt in ("-o", "--outout"):
outfile = a
if opt in ('-s', '--single_node'):
SINGLE_NODE = True
if opt in ('-m', '--max_threads'):
max_threads = int(a)
if opt in ('-M', '--thread_factor'):
thread_factor = int(a)
if opt in ('-v', '--verbose'):
VERBOSE = True
if opt in ('-p', '--physical'):
physical = True
if opt in ('-a', '--all'):
ALL_FILES = True
if opt in ('-l', '--latest'):
latest = True
if opt in ('-s', '--single_node'):
SINGLE_NODE = True
if opt in ('-F', '--format'):
if a.lower() == "csv" or a.lower() == "log":
LOG_FORMAT = a.lower()
else:
sys.stderr.write("Invalid log format. Must be csv,log\n")
exit(3)
try:
rubrik_node = args[0]
except:
usage()
if not outfile:
usage()
log_clean(outfile)
if not backup:
if not physical:
backup = python_input("Backup (host:share): ")
else:
backup = python_input("Backup Host: ")
if not physical:
(host, share) = backup.split(':')
else:
host = backup
if not fileset:
fileset = python_input("Fileset: ")
if not token:
| |
<reponame>legnaleurc/wcpan.telegram
import json
from typing import List, Awaitable, Union
from tornado import httpclient as thc, web as tw, httputil as thu
from . import types, util
_API_TEMPLATE = 'https://api.telegram.org/bot{api_token}/{api_method}'
ReplyMarkup = Union[
types.InlineKeyboardMarkup,
types.ReplyKeyboardMarkup,
types.ReplyKeyboardRemove,
types.ForceReply,
]
class BotClient(object):
def __init__(self, api_token: str) -> None:
self._api_token = api_token
if not self._api_token:
raise BotError('invalid API token')
async def get_updates(self, offset: int = None, limit: int = None,
timeout: int = None, allowed_updates: List[str] = None
) -> Awaitable[List[types.Update]]:
args = {}
if offset is not None:
args['offset'] = offset
if limit is not None:
args['limit'] = limit
if timeout is not None:
args['timeout'] = timeout
if allowed_updates is not None:
args['allowed_updates'] = allowed_updates
data = await self._get('getUpdates', args)
return [types.Update(u) for u in data]
async def set_webhook(self, url: str, certificate: types.InputFile = None,
max_connections: int = None,
allowed_updates: List[str] = None) -> Awaitable[bool]:
args = {
'url': '' if not url else str(url),
}
if certificate is not None:
args['certificate'] = certificate
if max_connections is not None:
args['max_connections'] = max_connections
if allowed_updates is not None:
args['allowed_updates'] = allowed_updates
if isinstance(certificate, types.InputFile):
data = await self._post('setWebhook', args)
else:
data = await self._get('setWebhook', args)
return data
async def delete_webhook(self) -> Awaitable[bool]:
data = await self._get('deleteWebhook')
return data
async def get_webhook_info(self) -> Awaitable[types.WebhookInfo]:
data = await self._get('getWebhookInfo')
return types.WebhookInfo(data)
async def get_me(self) -> Awaitable[types.User]:
data = await self._get('getMe')
return types.User(data)
async def send_message(self, chat_id: Union[int, str], text: str,
parse_mode: str = None,
disable_web_page_preview: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'text': text,
}
if parse_mode is not None:
args['parse_mode'] = parse_mode
if disable_web_page_preview is not None:
args['disable_web_page_preview'] = disable_web_page_preview
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('sendMessage', args)
return types.Message(data)
async def forward_message(self, chat_id: Union[int, str],
from_chat_id: Union[int, str], message_id: int,
disable_notification: bool = None,
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'from_chat_id': from_chat_id,
'message_id': message_id,
}
if disable_notification is not None:
args['disable_notification'] = disable_notification
data = await self._get('forwardMessage', args)
return types.Message(data)
async def send_photo(self, chat_id: Union[int, str],
photo: Union[types.InputFile, str],
caption: str = None, disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'photo': photo,
}
if caption is not None:
args['caption'] = caption
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(photo, str):
data = await self._get('sendPhoto', args)
else:
data = await self._post('sendPhoto', args)
return types.Message(data)
async def send_audio(self, chat_id: Union[int, str],
audio: Union[types.InputFile, str],
caption: str = None, duration: int = None,
performer: str = None, title: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'audio': audio,
}
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if caption is not None:
args['caption'] = caption
if duration is not None:
args['duration'] = duration
if performer is not None:
args['performer'] = performer
if title is not None:
args['title'] = title
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(audio, str):
data = await self._get('sendAudio', args)
else:
data = await self._post('sendAudio', args)
return types.Message(data)
async def send_document(self, chat_id: Union[int, str],
document: Union[types.InputFile, str],
caption: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'document': document,
}
if caption is not None:
args['caption'] = caption
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(document, str):
data = await self._get('sendDocument', args)
else:
data = await self._post('sendDocument', args)
return types.Message(data)
async def send_video(self, chat_id: Union[int, str],
video: Union[types.InputFile, str],
duration: int = None, width: int = None,
height: int = None, caption: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'video': video,
}
if duration is not None:
args['duration'] = duration
if width is not None:
args['width'] = width
if height is not None:
args['height'] = height
if caption is not None:
args['caption'] = caption
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(video, str):
data = await self._get('sendVideo', args)
else:
data = await self._post('sendVideo', args)
return types.Message(data)
async def send_voice(self, chat_id: Union[int, str],
voice: Union[types.InputFile, str],
caption: str = None, duration: int = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'voice': voice,
}
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if caption is not None:
args['caption'] = caption
if duration is not None:
args['duration'] = duration
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(voice, str):
data = await self._get('sendVoice', args)
else:
data = await self._post('sendVoice', args)
return types.Message(data)
async def send_video_note(self, chat_id: Union[int, str],
video_note: Union[types.InputFile, str],
duration: int = None, length: int = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'video_note': video_note,
}
if duration is not None:
args['duration'] = duration
if length is not None:
args['length'] = length
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
if isinstance(video_note, str):
data = await self._get('sendVideoNote', args)
else:
data = await self._post('sendVideoNote', args)
return types.Message(data)
async def send_location(self, chat_id: Union[int, str], latitude: float,
longitude: float, disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'latitude': latitude,
'longitude': longitude,
}
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('sendLocation', args)
return types.Message(data)
async def send_venue(self, chat_id: Union[int, str], latitude: float,
longitude: float, title: str, address: str,
foursquare_id: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'latitude': latitude,
'longitude': longitude,
'title': title,
'address': address,
}
if foursquare_id is not None:
args['foursquare_id'] = foursquare_id
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('sendVenue', args)
return types.Message(data)
async def send_contact(self, chat_id: Union[int, str], phone_number: str,
first_name: str, last_name: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: ReplyMarkup = None
) -> Awaitable[types.Message]:
args = {
'chat_id': chat_id,
'phone_number': phone_number,
'first_name': first_name,
}
if last_name is not None:
args['last_name'] = last_name
if disable_notification is not None:
args['disable_notification'] = disable_notification
if reply_to_message_id is not None:
args['reply_to_message_id'] = reply_to_message_id
if reply_markup is not None:
args['reply_markup'] = reply_markup
data = await self._get('sendContact', args)
return types.Message(data)
async def send_chat_action(self, chat_id: Union[int, str],
action: str) -> Awaitable[bool]:
args = {
'chat_id': chat_id,
'action': action,
}
data = await self._get('sendChatAction', args)
return data
async def get_user_profile_photos(self, user_id: int, offset: int = None,
limit: int = None
) -> Awaitable[types.UserProfilePhotos]:
args = {
'user_id': user_id,
}
if offset is not None:
args['offset'] = offset
if limit is not None:
args['limit'] = limit
data = await self._get('getUserProfilePhotos', args)
return types.UserProfilePhotos(data)
async def get_file(self, file_id: str) -> Awaitable[types.File]:
args = {
'file_id': file_id,
}
data = await self._get('getFile', args)
return types.File(data)
async def kick_chat_member(self, chat_id: Union[int, str],
user_id: int) -> Awaitable[bool]:
args = {
| |
import math
import torch
from enum import Enum
from torch import Tensor
from typing import List, Tuple, Optional, Dict
from . import functional as F, InterpolationMode
__all__ = ["AutoAugmentPolicy", "AutoAugment", "RandAugment", "TrivialAugmentWide"]
def _apply_op(img: Tensor, op_name: str, magnitude: float,
interpolation: InterpolationMode, fill: Optional[List[float]]):
if op_name == "ShearX":
img = F.affine(img, angle=0.0, translate=[0, 0], scale=1.0, shear=[math.degrees(magnitude), 0.0],
interpolation=interpolation, fill=fill)
elif op_name == "ShearY":
img = F.affine(img, angle=0.0, translate=[0, 0], scale=1.0, shear=[0.0, math.degrees(magnitude)],
interpolation=interpolation, fill=fill)
elif op_name == "TranslateX":
img = F.affine(img, angle=0.0, translate=[int(magnitude), 0], scale=1.0,
interpolation=interpolation, shear=[0.0, 0.0], fill=fill)
elif op_name == "TranslateY":
img = F.affine(img, angle=0.0, translate=[0, int(magnitude)], scale=1.0,
interpolation=interpolation, shear=[0.0, 0.0], fill=fill)
elif op_name == "Rotate":
img = F.rotate(img, magnitude, interpolation=interpolation, fill=fill)
elif op_name == "Brightness":
img = F.adjust_brightness(img, 1.0 + magnitude)
elif op_name == "Color":
img = F.adjust_saturation(img, 1.0 + magnitude)
elif op_name == "Contrast":
img = F.adjust_contrast(img, 1.0 + magnitude)
elif op_name == "Sharpness":
img = F.adjust_sharpness(img, 1.0 + magnitude)
elif op_name == "Posterize":
img = F.posterize(img, int(magnitude))
elif op_name == "Solarize":
img = F.solarize(img, magnitude)
elif op_name == "AutoContrast":
img = F.autocontrast(img)
elif op_name == "Equalize":
img = F.equalize(img)
elif op_name == "Invert":
img = F.invert(img)
elif op_name == "Identity":
pass
else:
raise ValueError("The provided operator {} is not recognized.".format(op_name))
return img
class AutoAugmentPolicy(Enum):
"""AutoAugment policies learned on different datasets.
Available policies are IMAGENET, CIFAR10 and SVHN.
"""
IMAGENET = "imagenet"
CIFAR10 = "cifar10"
SVHN = "svhn"
# FIXME: Eliminate copy-pasted code for fill standardization and _augmentation_space() by moving stuff on a base class
class AutoAugment(torch.nn.Module):
r"""AutoAugment data augmentation method based on
`"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "RGB".
Args:
policy (AutoAugmentPolicy): Desired policy enum defined by
:class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(
self,
policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
fill: Optional[List[float]] = None
) -> None:
super().__init__()
self.policy = policy
self.interpolation = interpolation
self.fill = fill
self.policies = self._get_policies(policy)
def _get_policies(
self,
policy: AutoAugmentPolicy
) -> List[Tuple[Tuple[str, float, Optional[int]], Tuple[str, float, Optional[int]]]]:
if policy == AutoAugmentPolicy.IMAGENET:
return [
(("Posterize", 0.4, 8), ("Rotate", 0.6, 9)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
(("Posterize", 0.6, 7), ("Posterize", 0.6, 6)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Equalize", 0.4, None), ("Rotate", 0.8, 8)),
(("Solarize", 0.6, 3), ("Equalize", 0.6, None)),
(("Posterize", 0.8, 5), ("Equalize", 1.0, None)),
(("Rotate", 0.2, 3), ("Solarize", 0.6, 8)),
(("Equalize", 0.6, None), ("Posterize", 0.4, 6)),
(("Rotate", 0.8, 8), ("Color", 0.4, 0)),
(("Rotate", 0.4, 9), ("Equalize", 0.6, None)),
(("Equalize", 0.0, None), ("Equalize", 0.8, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Rotate", 0.8, 8), ("Color", 1.0, 2)),
(("Color", 0.8, 8), ("Solarize", 0.8, 7)),
(("Sharpness", 0.4, 7), ("Invert", 0.6, None)),
(("ShearX", 0.6, 5), ("Equalize", 1.0, None)),
(("Color", 0.4, 0), ("Equalize", 0.6, None)),
(("Equalize", 0.4, None), ("Solarize", 0.2, 4)),
(("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)),
(("Invert", 0.6, None), ("Equalize", 1.0, None)),
(("Color", 0.6, 4), ("Contrast", 1.0, 8)),
(("Equalize", 0.8, None), ("Equalize", 0.6, None)),
]
elif policy == AutoAugmentPolicy.CIFAR10:
return [
(("Invert", 0.1, None), ("Contrast", 0.2, 6)),
(("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)),
(("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)),
(("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.5, None), ("Equalize", 0.9, None)),
(("ShearY", 0.2, 7), ("Posterize", 0.3, 7)),
(("Color", 0.4, 3), ("Brightness", 0.6, 7)),
(("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)),
(("Equalize", 0.6, None), ("Equalize", 0.5, None)),
(("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)),
(("Color", 0.7, 7), ("TranslateX", 0.5, 8)),
(("Equalize", 0.3, None), ("AutoContrast", 0.4, None)),
(("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)),
(("Brightness", 0.9, 6), ("Color", 0.2, 8)),
(("Solarize", 0.5, 2), ("Invert", 0.0, None)),
(("Equalize", 0.2, None), ("AutoContrast", 0.6, None)),
(("Equalize", 0.2, None), ("Equalize", 0.6, None)),
(("Color", 0.9, 9), ("Equalize", 0.6, None)),
(("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)),
(("Brightness", 0.1, 3), ("Color", 0.7, 0)),
(("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)),
(("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)),
(("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)),
(("Equalize", 0.8, None), ("Invert", 0.1, None)),
(("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)),
]
elif policy == AutoAugmentPolicy.SVHN:
return [
(("ShearX", 0.9, 4), ("Invert", 0.2, None)),
(("ShearY", 0.9, 8), ("Invert", 0.7, None)),
(("Equalize", 0.6, None), ("Solarize", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)),
(("ShearY", 0.9, 8), ("Invert", 0.4, None)),
(("ShearY", 0.9, 5), ("Solarize", 0.2, 6)),
(("Invert", 0.9, None), ("AutoContrast", 0.8, None)),
(("Equalize", 0.6, None), ("Rotate", 0.9, 3)),
(("ShearX", 0.9, 4), ("Solarize", 0.3, 3)),
(("ShearY", 0.8, 8), ("Invert", 0.7, None)),
(("Equalize", 0.9, None), ("TranslateY", 0.6, 6)),
(("Invert", 0.9, None), ("Equalize", 0.6, None)),
(("Contrast", 0.3, 3), ("Rotate", 0.8, 4)),
(("Invert", 0.8, None), ("TranslateY", 0.0, 2)),
(("ShearY", 0.7, 6), ("Solarize", 0.4, 8)),
(("Invert", 0.6, None), ("Rotate", 0.8, 4)),
(("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)),
(("ShearX", 0.1, 6), ("Invert", 0.6, None)),
(("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)),
(("ShearY", 0.8, 4), ("Invert", 0.8, None)),
(("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)),
(("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)),
(("ShearX", 0.7, 2), ("Invert", 0.1, None)),
]
else:
raise ValueError("The provided policy {} is not recognized.".format(policy))
def _augmentation_space(self, num_bins: int, image_size: List[int]) -> Dict[str, Tuple[Tensor, bool]]:
return {
# op_name: (magnitudes, signed)
"ShearX": (torch.linspace(0.0, 0.3, num_bins), True),
"ShearY": (torch.linspace(0.0, 0.3, num_bins), True),
"TranslateX": (torch.linspace(0.0, 150.0 / 331.0 * image_size[0], num_bins), True),
"TranslateY": (torch.linspace(0.0, 150.0 / 331.0 * image_size[1], num_bins), True),
"Rotate": (torch.linspace(0.0, 30.0, num_bins), True),
"Brightness": (torch.linspace(0.0, 0.9, num_bins), True),
"Color": (torch.linspace(0.0, 0.9, num_bins), True),
"Contrast": (torch.linspace(0.0, 0.9, num_bins), True),
"Sharpness": (torch.linspace(0.0, 0.9, num_bins), True),
"Posterize": (8 - (torch.arange(num_bins) / ((num_bins - 1) / 4)).round().int(), False),
"Solarize": (torch.linspace(256.0, 0.0, num_bins), False),
"AutoContrast": (torch.tensor(0.0), False),
"Equalize": (torch.tensor(0.0), False),
"Invert": (torch.tensor(0.0), False),
}
@staticmethod
def get_params(transform_num: int) -> Tuple[int, Tensor, Tensor]:
"""Get parameters for autoaugment transformation
Returns:
params required by the autoaugment transformation
"""
policy_id = int(torch.randint(transform_num, (1,)).item())
probs = torch.rand((2,))
signs = torch.randint(2, (2,))
return policy_id, probs, signs
def forward(self, img: Tensor) -> Tensor:
"""
img (PIL Image or Tensor): Image to be transformed.
Returns:
PIL Image or Tensor: AutoAugmented image.
"""
fill = self.fill
if isinstance(img, Tensor):
if isinstance(fill, (int, float)):
fill = [float(fill)] * F.get_image_num_channels(img)
elif fill is not None:
fill = [float(f) for f in fill]
transform_id, probs, signs = self.get_params(len(self.policies))
for i, (op_name, p, magnitude_id) in enumerate(self.policies[transform_id]):
if probs[i] <= p:
op_meta = self._augmentation_space(10, F.get_image_size(img))
magnitudes, signed = op_meta[op_name]
magnitude = float(magnitudes[magnitude_id].item()) if magnitude_id is not None else 0.0
if signed and signs[i] == 0:
magnitude *= -1.0
img = _apply_op(img, op_name, magnitude, interpolation=self.interpolation, fill=fill)
return img
def __repr__(self) -> str:
return self.__class__.__name__ + '(policy={}, fill={})'.format(self.policy, self.fill)
class RandAugment(torch.nn.Module):
r"""RandAugment data augmentation method based on
`"RandAugment: Practical automated data augmentation with a reduced search space"
<https://arxiv.org/abs/1909.13719>`_.
If the image is torch Tensor, it should be of type torch.uint8, and it is expected
to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions.
If img is PIL Image, it is expected to be in mode "RGB".
Args:
num_ops (int): Number of augmentation transformations to apply sequentially.
magnitude (int): Magnitude for all the transformations.
num_magnitude_bins (int): The number of different magnitude values.
interpolation (InterpolationMode): Desired interpolation enum defined by
:class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``.
If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported.
fill (sequence or number, optional): Pixel fill value for the area outside the transformed
image. If given a number, the value is used for all bands respectively.
"""
def __init__(self, num_ops: int = 2, magnitude: int = 9, num_magnitude_bins: int = 31,
interpolation: InterpolationMode = InterpolationMode.NEAREST,
| |
0x1FFFF), (0x2FFFE, 0x2FFFF),
(0x3FFFE, 0x3FFFF), (0x4FFFE, 0x4FFFF),
(0x5FFFE, 0x5FFFF), (0x6FFFE, 0x6FFFF),
(0x7FFFE, 0x7FFFF), (0x8FFFE, 0x8FFFF),
(0x9FFFE, 0x9FFFF), (0xAFFFE, 0xAFFFF),
(0xBFFFE, 0xBFFFF), (0xCFFFE, 0xCFFFF),
(0xDFFFE, 0xDFFFF), (0xEFFFE, 0xEFFFF),
(0xFFFFE, 0xFFFFF), (0x10FFFE, 0x10FFFF)])
# yapf: enable
_illegal_ranges = ["%s-%s" % (unichr(low), unichr(high)) for (low, high) in _illegal_unichrs]
_illegal_xml_chars_RE = re.compile(u'[%s]' % u''.join(_illegal_ranges))
def ill_cp_escaper(m):
# type: (Match) -> str
codepoint = ord(m.group(0))
if codepoint < 0x100:
return u"\\x%02x" % codepoint
elif codepoint < 0x10000:
return u"\\u%04x" % codepoint
else:
return u"\\U%06x" % codepoint
def ill_cp_unescaper(m):
# type: (Match[str]) -> str
return unichr(int(m.group(1)[1:], 16))
def escape_illegal_xmlchars(text):
# type: (str) -> str
r"""Escape illegal XML characters by \x, \u and \U followed by the hexadecial codepoint.
"""
# First escape \x, \u and \U itself, they will later be unescaped together
# with the illegal XML characters in unescape_illegal_xmlchars.
text = re.sub(r'\\([xuU])', r'\\x5c\1', text)
result = re.sub(_illegal_xml_chars_RE, ill_cp_escaper, text)
return result
def unescape_illegal_xmlchars(text):
# type: (str) -> str
return re.sub(r'\\(x[0-9a-zA-Z]{2}|u[0-9a-zA-Z]{4}|U[0-9a-zA-Z]{6})', ill_cp_unescaper,
text)
def translate_non_sgml_chars(data, enc='utf-8'):
# type: (bytes, str) -> bytes
def replace_non_sgml(m):
# type: (Match) -> str
codepoint = ord(m.group(0))
if 127 <= codepoint <= 159:
try:
return int2byte(codepoint).decode('windows-1252')
except UnicodeDecodeError:
pass
# Unicode Character 'REPLACEMENT CHARACTER'
return u'\ufffd'
text = data.decode(enc, 'replace')
text = re.sub(unistr(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]'), replace_non_sgml, text)
return text.encode(enc, 'replace')
def surrdecode(s, enc='utf-8'):
# type: (bytes, str) -> str
return s.decode(enc, 'replace')
def surrencode(s, enc='utf-8'):
# type: (str, str) -> bytes
data = s.encode(enc, 'replace')
return data
def tree_from_html(htmldata, enc='utf-8'):
# type: (str, str) -> ETree.Element
text = htmldata
text = escape_illegal_xmlchars(text)
if NARROW_BUILD:
# Remove lonely surrogate halfs
text = u''.join(iterchars(text))
text = re.sub(' xmlns="[^"]+"', '', text, count=1)
text = text.replace(' ', ' ')
btext = bytestr('<?xml version="1.0" encoding="%s"?>\n' % enc) + surrencode(text, enc)
tree = ETree.fromstring(btext)
return tree
# The LINENRSEP must not be anything that appears in the line number column of the HTML
# generated by difflib which are digits and the line continuation character '>'.
LINENRSEP = '|'
LINENRSEP_LEN = len(LINENRSEP)
def htmldiff2ansi(htmldata, enc, linenumbers=False, fp=None):
# type: (str, str, bool, Optional[IO[Any]]) -> None
tree = tree_from_html(htmldata, enc=enc)
difftype_colors = {
'diff_add': GREEN + BACKGROUNDCOLOR_OFFSET,
'diff_chg': CYAN + BACKGROUNDCOLOR_OFFSET,
'diff_sub': RED + BACKGROUNDCOLOR_OFFSET,
}
def emit(text, colorvalue=None):
# type: (Union[str, bytes, None], Optional[int]) -> None
if text:
if isinstance(text, binary_type):
s = text.decode(enc, 'replace') # type: str
else:
s = text
# Prevent remaining newlines from messing up the side by side layout.
s = s.replace('\n', '')
# raw is only used for counting characters.
rawline.append(unescape_ill_surrencode(s).decode(enc, 'replace'))
if colorvalue is not None:
s = ansicolor(colorvalue, s)
line.append(s)
line = [] # type: List[str]
rawline = [] # type: List[str]
for table in tree.findall('body/table'):
if table.attrib.get('summary') == 'Legends':
continue
headers = [], [] # type: Tuple[List[str], List[str]]
for sideidx, th in enumerate(table.findall("thead//th[@class='diff_header']")):
line = []
rawline = []
for t in th.findall(".//*"):
emit(t.text)
emit(t.tail)
headers[sideidx].extend(line) # type: ignore
# Equalize number of left and right header rows
headerline_diff = len(headers[0]) - len(headers[1])
if headerline_diff < 0:
headers[0].extend([''] * -headerline_diff)
elif headerline_diff > 0:
headers[1].extend([''] * headerline_diff)
# Display the style differencs before the diff hunks.
# Every header line gets a LINENRSEP prefix to indicate that there is no line number.
hunklines = [[(LINENRSEP + ansicolor(YELLOW, l), LINENRSEP + l) for l in side]
for side in headers]
difflines = []
tbodies = table.findall('tbody')
for bodyidx, tbody in enumerate(tbodies):
for tr in tbody.findall('tr'):
for tdidx, td in enumerate(tr.findall('td')):
if td.attrib.get('class') == 'diff_header':
line, rawline = [], []
lnrcolumn = unistr(td.text or '') # type: ignore
# Always display the line continuation character but the
# linenumber only if requested.
if lnrcolumn and (linenumbers or not re_number.match(lnrcolumn)):
emit(lnrcolumn)
# The LINENRSEP marks the end of the line number in the plain text.
emit(LINENRSEP)
if td.attrib.get('nowrap') == 'nowrap':
sideidx = 0 if tdidx < 3 else 1
emit(td.text)
for t in td.findall('span'):
cls = unistr(t.attrib.get('class'))
emit(t.text, difftype_colors.get(cls))
emit(t.tail)
hunklines[sideidx].append((''.join(line), ''.join(rawline)))
difflines.append(hunklines)
hunklines = [[], []]
emit_hunks(difflines, enc, fp)
outline(end=os.linesep, fp=fp)
def unescape_ill_surrencode(text, enc='utf-8'):
# type: (str, str) -> bytes
return surrencode(unescape_illegal_xmlchars(text), enc=enc)
def soutline(s='', enc='utf-8', fp=None):
# type: (str, str, Optional[IO[Any]]) -> None
data = unescape_ill_surrencode(s, enc=enc)
write(data + b'\n', fp=fp)
def emit_hunks(all_difflines, enc='utf-8', fp=None):
# type: (List[List[List[TextPair]]], str, Optional[IO[Any]]) -> None
"""Writes the diff lines to fp.
all_difflines is a list of hunks.
Each hunk is a pair (left, right) of lists of linepairs (ansicoloredline, rawline).
"""
def lineheaderlen(text):
# type: (str) -> int
return text.find(LINENRSEP)
def difflinelen(text):
# type: (str) -> int
lhlen = lineheaderlen(text)
if lhlen >= 0:
return unilen(text) - lhlen - LINENRSEP_LEN
# Every line should contain a LINENRSEP character after the optional line number.
# In case there isn't one, the normal length is used.
return unilen(text)
len_l = 0
len_r = 0
lhlen_l = 0
lhlen_r = 0
centerpos = 10
for difflines in all_difflines:
fromlines, tolines = difflines
lhlen_l = max([lhlen_l] + [lineheaderlen(rawline) for line, rawline in fromlines])
lhlen_r = max([lhlen_r] + [lineheaderlen(rawline) for line, rawline in tolines])
len_l = max([len_l] + [difflinelen(rawline) for line, rawline in fromlines])
len_r = max([len_r] + [difflinelen(rawline) for line, rawline in tolines])
lhl_fmt = ' %%%ds ' % lhlen_l
lhr_fmt = ' %%%ds ' % lhlen_r
maxlen = max(centerpos, len_l)
width = maxlen + len_r
sepcolor = BACKGROUNDCOLOR_OFFSET + BLUE
sep1 = ansicolor(sepcolor, ' ')
sep_l = ansicolor(sepcolor, lhl_fmt % '')
sep_r = ansicolor(sepcolor, lhr_fmt % '')
diffseparator = ansicolor(sepcolor, ' ' * (width + 1) + lhl_fmt % '' + lhr_fmt % '')
mgcol = BACKGROUNDCOLOR_OFFSET + MAGENTA
hunkseparator = (
sep_l + ansicolor(mgcol, ' ' * maxlen) + sep_r + ansicolor(mgcol, ' ' * len_r) + sep1)
for hunkidx, difflines in enumerate(all_difflines):
if hunkidx == 0:
soutline(diffseparator, enc=enc, fp=fp)
elif hunkidx >= 1:
soutline(hunkseparator, enc=enc, fp=fp)
for idx, ((f, f_raw), (t, t_raw)) in enumerate(izip(*difflines)):
linelen = difflinelen(f_raw)
padding_length = maxlen - linelen
padding = ' ' * max(0, padding_length)
rpad = ' ' * (width - maxlen - difflinelen(t_raw))
lnrsep_pos = lineheaderlen(f)
if lnrsep_pos >= 0:
lnr_l = ansicolor(sepcolor, lhl_fmt % f[:lnrsep_pos])
f = f[lnrsep_pos + LINENRSEP_LEN:]
else:
lnr_l = sep_l
lnrsep_pos = lineheaderlen(t)
if lnrsep_pos >= 0:
lnr_r = ansicolor(sepcolor, lhr_fmt % t[:lnrsep_pos])
t = t[lnrsep_pos + LINENRSEP_LEN:]
else:
lnr_r = sep_r
soutline('%s%s%s%s%s%s%s' % (lnr_l, f, padding, lnr_r, t, rpad, sep1),
enc=enc,
fp=fp)
if hunkidx == len(all_difflines) - 1:
soutline(diffseparator, enc=enc, fp=fp)
# ----------------------------------------------------------------------
def find_style(params, # type: ParameterSet
filenames, # type: List[str]
language=None # type: Optional[str]
):
# type: (...) -> Union[StyleDist, Tuple[StyleDist, StyleDist]]
formatter = params.formatter
formatter.identify_language(filenames, language=language)
try:
return find_style_for_mode(params, filenames)
finally:
formatter.remove_tempfiles()
def concat_files(filenames, mode, references):
# type: (Sequence[str], str, bool) -> List[str]
if references:
if mode == MODE_RESILIENT:
# Transform the files n1, r1, n2, r2, min1, rmin1, min2, rmin2, max1, rmax1,
# max2, rmax2
# into n1+n2, r1+r2, min1+min2+max1+max2, rmin1+rmin2+rmax1+rmax2
numinputfiles = int(len(filenames) / 3)
normalfiles = concat_files(filenames[:numinputfiles], MODE_NORMAL, references)
variantfiles = concat_files(filenames[numinputfiles:], MODE_NORMAL, references)
return normalfiles + variantfiles
else:
# Transform the files normal1, ref1, normal2, ref2
# into normal1+normal2, ref1+ref2
inputs = [filenames[::2], filenames[1::2]]
else:
# Transform the files [normal1, normal2, normal3] into [normal1+normal2+normal3]
inputs = [filenames]
concatted_files = []
for inputfiles in inputs:
content = [get_cached_file(f) for f in inputfiles]
lineterm = lineterminator(content[0])
concatted = lineterm.join(content)
tmpfile = shatempfile(inputfiles[0], concatted)
concatted_files.append(tmpfile)
return concatted_files
def lineterminator(content):
# type: (bytes) -> bytes
m = re.search(br'(\r\n)|\n|\r', content)
if not m:
return bytestr(os.linesep)
return m.group(0)
def shatempfile(filename, content):
# type: (str, bytes) -> str
"""Writes content to a temporary file whose name contains the basename of filename
and a sha of content.
"""
sha = shahex(content)
base = os.path.basename(filename)
tmpfile = os.path.join(tempfile.gettempdir(), 'whatstyle_%s_%s' % (sha, base))
writebinary(tmpfile, content)
return tmpfile
def create_variant_files(params, filenames, metric):
# type: (ParameterSet, List[str], int) -> Tuple[List[str], Optional[Style]]
"""Finds the best style for the given parameters, reformats the input
files in this style, writes the results to temporary files and returns the
list of these temporary filenames and the style that was chosen.
"""
style, bestdist = find_best_style(params, filenames, metric, | |
None,
env: Optional[pulumi.Input[Sequence[pulumi.Input['StorageClusterSpecStorkEnvArgs']]]] = None,
image: Optional[pulumi.Input[str]] = None,
lock_image: Optional[pulumi.Input[bool]] = None):
"""
Contains STORK related spec.
:param pulumi.Input[Mapping[str, Any]] args: It is map of arguments given to STORK. Example: driver: pxd
:param pulumi.Input[bool] enabled: Flag indicating whether STORK needs to be enabled.
:param pulumi.Input[Sequence[pulumi.Input['StorageClusterSpecStorkEnvArgs']]] env: List of environment variables used by STORK. This is an array of Kubernetes EnvVar where the value can be given directly or from a source like field, config map or secret.
:param pulumi.Input[str] image: Docker image of the STORK container.
:param pulumi.Input[bool] lock_image: Flag indicating if the STORK image needs to be locked to the given image. If the image is not locked, it can be updated by the storage driver during upgrades.
"""
if args is not None:
pulumi.set(__self__, "args", args)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if env is not None:
pulumi.set(__self__, "env", env)
if image is not None:
pulumi.set(__self__, "image", image)
if lock_image is not None:
pulumi.set(__self__, "lock_image", lock_image)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
It is map of arguments given to STORK. Example: driver: pxd
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag indicating whether STORK needs to be enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def env(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StorageClusterSpecStorkEnvArgs']]]]:
"""
List of environment variables used by STORK. This is an array of Kubernetes EnvVar where the value can be given directly or from a source like field, config map or secret.
"""
return pulumi.get(self, "env")
@env.setter
def env(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['StorageClusterSpecStorkEnvArgs']]]]):
pulumi.set(self, "env", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Docker image of the STORK container.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="lockImage")
def lock_image(self) -> Optional[pulumi.Input[bool]]:
"""
Flag indicating if the STORK image needs to be locked to the given image. If the image is not locked, it can be updated by the storage driver during upgrades.
"""
return pulumi.get(self, "lock_image")
@lock_image.setter
def lock_image(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "lock_image", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromArgs']] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromArgs']]:
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvValueFromArgs:
def __init__(__self__, *,
config_map_key_ref: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromConfigMapKeyRefArgs']] = None,
field_ref: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromFieldRefArgs']] = None,
resource_field_ref: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromResourceFieldRefArgs']] = None,
secret_key_ref: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromSecretKeyRefArgs']] = None):
if config_map_key_ref is not None:
pulumi.set(__self__, "config_map_key_ref", config_map_key_ref)
if field_ref is not None:
pulumi.set(__self__, "field_ref", field_ref)
if resource_field_ref is not None:
pulumi.set(__self__, "resource_field_ref", resource_field_ref)
if secret_key_ref is not None:
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="configMapKeyRef")
def config_map_key_ref(self) -> Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromConfigMapKeyRefArgs']]:
return pulumi.get(self, "config_map_key_ref")
@config_map_key_ref.setter
def config_map_key_ref(self, value: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromConfigMapKeyRefArgs']]):
pulumi.set(self, "config_map_key_ref", value)
@property
@pulumi.getter(name="fieldRef")
def field_ref(self) -> Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromFieldRefArgs']]:
return pulumi.get(self, "field_ref")
@field_ref.setter
def field_ref(self, value: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromFieldRefArgs']]):
pulumi.set(self, "field_ref", value)
@property
@pulumi.getter(name="resourceFieldRef")
def resource_field_ref(self) -> Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromResourceFieldRefArgs']]:
return pulumi.get(self, "resource_field_ref")
@resource_field_ref.setter
def resource_field_ref(self, value: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromResourceFieldRefArgs']]):
pulumi.set(self, "resource_field_ref", value)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromSecretKeyRefArgs']]:
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: Optional[pulumi.Input['StorageClusterSpecStorkEnvValueFromSecretKeyRefArgs']]):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvValueFromConfigMapKeyRefArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvValueFromFieldRefArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
field_path: Optional[pulumi.Input[str]] = None):
if api_version is not None:
pulumi.set(__self__, "api_version", api_version)
if field_path is not None:
pulumi.set(__self__, "field_path", field_path)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter(name="fieldPath")
def field_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "field_path")
@field_path.setter
def field_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "field_path", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvValueFromResourceFieldRefArgs:
def __init__(__self__, *,
container_name: Optional[pulumi.Input[str]] = None,
divisor: Optional[pulumi.Input[str]] = None,
resource: Optional[pulumi.Input[str]] = None):
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if divisor is not None:
pulumi.set(__self__, "divisor", divisor)
if resource is not None:
pulumi.set(__self__, "resource", resource)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def divisor(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "divisor")
@divisor.setter
def divisor(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "divisor", value)
@property
@pulumi.getter
def resource(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource")
@resource.setter
def resource(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource", value)
@pulumi.input_type
class StorageClusterSpecStorkEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
optional: Optional[pulumi.Input[bool]] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class StorageClusterSpecUpdateStrategyArgs:
def __init__(__self__, *,
rolling_update: Optional[pulumi.Input['StorageClusterSpecUpdateStrategyRollingUpdateArgs']] = None,
type: Optional[pulumi.Input[str]] = None):
"""
An update strategy to replace existing StorageCluster pods with new pods.
:param pulumi.Input['StorageClusterSpecUpdateStrategyRollingUpdateArgs'] rolling_update: Spec to control the desired behavior of storage cluster rolling update.
:param pulumi.Input[str] type: Type of storage cluster update. Can be RollingUpdate or OnDelete. Default is RollingUpdate.
"""
if rolling_update is not None:
pulumi.set(__self__, "rolling_update", rolling_update)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="rollingUpdate")
def rolling_update(self) -> Optional[pulumi.Input['StorageClusterSpecUpdateStrategyRollingUpdateArgs']]:
"""
Spec to control the desired behavior of storage cluster rolling update.
"""
return pulumi.get(self, "rolling_update")
@rolling_update.setter
def rolling_update(self, value: Optional[pulumi.Input['StorageClusterSpecUpdateStrategyRollingUpdateArgs']]):
pulumi.set(self, "rolling_update", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Type of storage cluster update. Can be RollingUpdate or OnDelete. Default is RollingUpdate.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class StorageClusterSpecUpdateStrategyRollingUpdateArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input[Union[int, str]]] = None):
"""
Spec to control the desired behavior of storage cluster rolling update.
:param pulumi.Input[Union[int, str]] max_unavailable: The maximum number of StorageCluster pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of StorageCluster pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the storage pod can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those StorageCluster pods and then brings up new StorageCluster pods in their place. Once the new pods are available, it then proceeds onto other StorageCluster pods, thus ensuring that at least 70% of original number of StorageCluster pods are available at all times during the update.
"""
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input[Union[int, str]]]:
"""
The maximum number of StorageCluster pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of StorageCluster pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of | |
from datetime import datetime
from corehq.apps.sms.models import (CallLog, INCOMING, OUTGOING,
MessagingSubEvent, MessagingEvent)
from corehq.apps.sms.mixin import VerifiedNumber, MobileBackend
from corehq.apps.sms.util import strip_plus
from corehq.apps.smsforms.app import start_session, _get_responses
from corehq.apps.smsforms.models import XFORMS_SESSION_IVR, get_session_by_session_id
from corehq.apps.app_manager.models import Form
from corehq.apps.hqmedia.models import HQMediaMapItem
from django.http import HttpResponse
from django.conf import settings
from dimagi.utils.web import get_url_base
from touchforms.formplayer.api import current_question, TouchformsError
from corehq.apps.smsforms.app import submit_unfinished_form
from corehq.apps.smsforms.util import form_requires_input
IVR_EVENT_NEW_CALL = "NEW_CALL"
IVR_EVENT_INPUT = "INPUT"
IVR_EVENT_DISCONNECT = "DISCONNECT"
class GatewayConnectionError(Exception):
pass
class IVRResponseData(object):
def __init__(self, ivr_responses, input_length, session):
self.ivr_responses = ivr_responses
self.input_length = input_length
self.session = session
def convert_media_path_to_hq_url(path, app):
media = app.multimedia_map.get(path, None)
if media is None:
return None
else:
url_base = get_url_base()
return url_base + HQMediaMapItem.format_match_map(path, media_type=media.media_type, media_id=media.multimedia_id)["url"] + "foo.wav"
def validate_answer(answer, question):
"""
Return True if answer is a valid response to question, False if not.
(question is expected to be the XFormsResponse object for the question)
"""
if question.event.datatype == "select":
try:
assert answer is not None
answer = int(answer)
assert answer >= 1 and answer <= len(question.event.choices)
return True
except (ValueError, AssertionError):
return False
else:
try:
assert answer is not None
if isinstance(answer, basestring):
assert len(answer.strip()) > 0
return True
except AssertionError:
return False
def format_ivr_response(text, app):
return {
"text_to_say" : text,
"audio_file_url" : convert_media_path_to_hq_url(text, app) if text.startswith("jr://") else None,
}
def get_input_length(question):
if question.event.type == "question" and question.event.datatype == "select":
return 1
else:
return None
def hang_up_response(gateway_session_id, backend_module=None):
if backend_module:
return HttpResponse(backend_module.get_http_response_string(
gateway_session_id,
[],
collect_input=False,
hang_up=True
))
else:
return HttpResponse("")
def add_metadata(call_log_entry, duration=None):
try:
call_log_entry.duration = int(round(float(duration)))
call_log_entry.save()
except (TypeError, ValueError):
pass
def get_app_module_form(call_log_entry, logged_subevent):
"""
Returns (app, module, form, error)
"""
try:
form = Form.get_form(call_log_entry.form_unique_id)
app = form.get_app()
module = form.get_module()
return (app, module, form, False)
except:
log_error(MessagingEvent.ERROR_CANNOT_FIND_FORM,
call_log_entry, logged_subevent)
return (None, None, None, True)
def start_call_session(recipient, call_log_entry, logged_subevent, app, module, form):
"""
Returns (session, responses, error)
"""
try:
session, responses = start_session(recipient.domain, recipient, app,
module, form, call_log_entry.case_id, yield_responses=True,
session_type=XFORMS_SESSION_IVR,
case_for_case_submission=call_log_entry.case_for_case_submission)
if logged_subevent:
logged_subevent.xforms_session = session
logged_subevent.save()
if len(responses) == 0:
log_error(MessagingEvent.ERROR_FORM_HAS_NO_QUESTIONS,
call_log_entry, logged_subevent)
return (session, responses, True)
return (session, responses, False)
except TouchformsError as e:
additional_error_text = e.response_data.get('human_readable_message', None)
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR,
call_log_entry, logged_subevent, additional_error_text=additional_error_text)
return (None, None, True)
def get_ivr_responses_from_touchforms_responses(call_log_entry, responses, app):
"""
responses is a list of XFormsResponse objects
app is the app from which the form came
"""
ivr_responses = []
question_constraint_failed = False
hang_up = False
for response in responses:
if response.status == 'validation-error':
question_constraint_failed = True
call_log_entry.current_question_retry_count += 1
ivr_responses.append(format_ivr_response(response.text_prompt, app))
elif response.status == 'http-error':
ivr_responses = []
hang_up = True
break
elif response.event.type == "question":
ivr_responses.append(format_ivr_response(response.event.caption, app))
elif response.event.type == "form-complete":
hang_up = True
return (ivr_responses, question_constraint_failed, hang_up)
def process_disconnect(call_log_entry):
if call_log_entry.xforms_session_id is not None:
session = get_session_by_session_id(call_log_entry.xforms_session_id)
if session.is_open:
if call_log_entry.submit_partial_form:
submit_unfinished_form(session.session_id,
call_log_entry.include_case_side_effects)
else:
session.end(completed=False)
session.save()
def answer_question(call_log_entry, recipient, input_data, logged_subevent=None):
"""
Returns a list of (responses, answer_is_valid), where responses is the
list of XFormsResponse objects from touchforms and answer_is_valid is
True if input_data passes validation and False if not.
Returning an empty list for responses will end up forcing a hangup
later on in the workflow.
"""
if call_log_entry.xforms_session_id is None:
return ([], None)
try:
current_q = current_question(call_log_entry.xforms_session_id)
except TouchformsError as e:
log_touchforms_error(e, call_log_entry, logged_subevent)
return ([], None)
if current_q.status == 'http-error':
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR, call_log_entry,
logged_subevent)
return ([], None)
if validate_answer(input_data, current_q):
answer_is_valid = True
try:
responses = _get_responses(recipient.domain, recipient._id,
input_data, yield_responses=True,
session_id=call_log_entry.xforms_session_id)
except TouchformsError as e:
log_touchforms_error(e, call_log_entry, logged_subevent)
return ([], None)
else:
answer_is_valid = False
call_log_entry.current_question_retry_count += 1
responses = [current_q]
return (responses, answer_is_valid)
def handle_known_call_session(call_log_entry, backend_module, ivr_event,
input_data=None, logged_subevent=None):
if (ivr_event == IVR_EVENT_NEW_CALL and
call_log_entry.use_precached_first_response):
# This means we precached the first IVR response when we
# initiated the call, so all we need to do is return that
# response.
return HttpResponse(call_log_entry.first_response)
app, module, form, error = get_app_module_form(call_log_entry, logged_subevent)
if error:
return hang_up_response(call_log_entry.gateway_session_id,
backend_module=backend_module)
recipient = call_log_entry.recipient
answer_is_valid = True
if ivr_event == IVR_EVENT_NEW_CALL:
session, responses, error = start_call_session(recipient,
call_log_entry, logged_subevent, app, module, form)
if error:
return hang_up_response(call_log_entry.gateway_session_id,
backend_module=backend_module)
call_log_entry.xforms_session_id = session.session_id
elif ivr_event == IVR_EVENT_INPUT:
responses, answer_is_valid = answer_question(call_log_entry, recipient,
input_data, logged_subevent=logged_subevent)
else:
responses = []
ivr_responses, question_constraint_failed, hang_up = \
get_ivr_responses_from_touchforms_responses(call_log_entry, responses, app)
if answer_is_valid and not question_constraint_failed:
# If there were no validation errors (including question contraint errors),
# then reset the current question retry count to 0.
call_log_entry.current_question_retry_count = 0
if (call_log_entry.max_question_retries is not None and
call_log_entry.current_question_retry_count > call_log_entry.max_question_retries):
# We have retried to current question too many times without
# getting a valid answer, so force a hang-up.
ivr_responses = []
if len(ivr_responses) == 0:
hang_up = True
input_length = None
if hang_up:
process_disconnect(call_log_entry)
else:
# Set input_length to let the ivr gateway know how many digits we need to collect.
# If the latest XFormsResponse we have was a response to a contraint error, then
# it won't have an event, so in that case we have to get the current question again.
if question_constraint_failed:
current_q = current_question(call_log_entry.xforms_session_id)
else:
current_q = responses[-1]
input_length = get_input_length(current_q)
call_log_entry.save()
return HttpResponse(
backend_module.get_http_response_string(call_log_entry.gateway_session_id,
ivr_responses, collect_input=(not hang_up), hang_up=hang_up,
input_length=input_length))
def log_call(phone_number, gateway_session_id, backend_api=None):
cleaned_number = strip_plus(phone_number)
v = VerifiedNumber.by_extensive_search(cleaned_number)
call = CallLog(
phone_number=cleaned_number,
direction=INCOMING,
date=datetime.utcnow(),
backend_api=backend_api,
gateway_session_id=gateway_session_id,
)
if v:
call.domain = v.domain
call.couch_recipient_doc_type = v.owner_doc_type
call.couch_recipient = v.owner_id
call.save()
def incoming(phone_number, backend_module, gateway_session_id, ivr_event, input_data=None,
duration=None):
"""
The main entry point for all incoming IVR requests.
"""
call_log_entry = CallLog.get_call_by_gateway_session_id(gateway_session_id)
logged_subevent = None
if call_log_entry and call_log_entry.messaging_subevent_id:
logged_subevent = MessagingSubEvent.objects.get(
pk=call_log_entry.messaging_subevent_id)
if call_log_entry:
add_metadata(call_log_entry, duration)
if call_log_entry and call_log_entry.form_unique_id is None:
# If this request is for a call with no form,
# then just short circuit everything and hang up
return hang_up_response(gateway_session_id, backend_module=backend_module)
if call_log_entry and backend_module:
return handle_known_call_session(call_log_entry, backend_module, ivr_event,
input_data=input_data, logged_subevent=logged_subevent)
else:
if not call_log_entry:
log_call(phone_number, gateway_session_id,
backend_api=(backend_module.API_ID if backend_module else None))
return hang_up_response(gateway_session_id, backend_module=backend_module)
def get_ivr_backend(recipient, verified_number=None, unverified_number=None):
if verified_number and verified_number.ivr_backend_id:
return MobileBackend.get(verified_number.ivr_backend_id)
else:
phone_number = (verified_number.phone_number if verified_number
else unverified_number)
phone_number = strip_plus(str(phone_number))
prefixes = settings.IVR_BACKEND_MAP.keys()
prefixes = sorted(prefixes, key=lambda x: len(x), reverse=True)
for prefix in prefixes:
if phone_number.startswith(prefix):
return MobileBackend.get(settings.IVR_BACKEND_MAP[prefix])
return None
def log_error(error, call_log_entry=None, logged_subevent=None,
additional_error_text=None):
if call_log_entry:
call_log_entry.error = True
call_log_entry.error_message = dict(MessagingEvent.ERROR_MESSAGES).get(error)
if additional_error_text:
call_log_entry.error_message += ' %s' % additional_error_text
call_log_entry.save()
if logged_subevent:
logged_subevent.error(error, additional_error_text=additional_error_text)
def log_touchforms_error(touchforms_error, call_log_entry=None, logged_subevent=None):
"""
touchforms_error should be an instance of TouchformsError
"""
additional_error_text = touchforms_error.response_data.get('human_readable_message', None)
log_error(MessagingEvent.ERROR_TOUCHFORMS_ERROR,
call_log_entry, logged_subevent, additional_error_text)
def get_first_ivr_response_data(recipient, call_log_entry, logged_subevent):
"""
As long as the form has at least one question in it (i.e., it
doesn't consist of all labels), then we can start the touchforms
session now and cache the first IVR response, so that all we
need to do later is serve it up. This makes for less time ringing
when the user is on the phone, waiting for the line to pick up.
If the form consists of all labels, we don't do anything here,
because then we would end up submitting the form right away
regardless of whether the user actually got the call.
Returns (ivr_data, error) where ivr_data is an instance of IVRResponseData
"""
app, module, form, error = get_app_module_form(call_log_entry,
logged_subevent)
if error:
return (None, True)
if form_requires_input(form):
session, responses, error = start_call_session(recipient, call_log_entry,
logged_subevent, app, module, form)
if error:
return (None, True)
ivr_responses = []
for response in responses:
ivr_responses.append(format_ivr_response(response.event.caption, app))
ivr_data = IVRResponseData(ivr_responses, get_input_length(responses[-1]),
session)
return (ivr_data, False)
return (None, False)
def set_first_ivr_response(call_log_entry, gateway_session_id, ivr_data, get_response_function):
call_log_entry.xforms_session_id = ivr_data.session.session_id
call_log_entry.use_precached_first_response = True
call_log_entry.first_response = get_response_function(
gateway_session_id, ivr_data.ivr_responses, collect_input=True,
hang_up=False, input_length=ivr_data.input_length)
def initiate_outbound_call(recipient, form_unique_id, submit_partial_form,
include_case_side_effects, max_question_retries, messaging_event_id,
verified_number=None, unverified_number=None, case_id=None,
case_for_case_submission=False, timestamp=None):
"""
Returns False if an error occurred and the call should be retried.
Returns True if the call should not be retried (either because it was
queued successfully or because an unrecoverable error occurred).
"""
call_log_entry = None
logged_event = MessagingEvent.objects.get(pk=messaging_event_id)
logged_subevent = logged_event.create_ivr_subevent(recipient,
form_unique_id, case_id=case_id)
if not verified_number and not unverified_number:
log_error(MessagingEvent.ERROR_NO_PHONE_NUMBER,
logged_subevent=logged_subevent)
return True
backend = get_ivr_backend(recipient, verified_number, unverified_number)
if not backend:
log_error(MessagingEvent.ERROR_NO_SUITABLE_GATEWAY,
logged_subevent=logged_subevent)
return True
phone_number = (verified_number.phone_number if verified_number
else unverified_number)
call_log_entry = CallLog(
couch_recipient_doc_type=recipient.doc_type,
couch_recipient=recipient.get_id,
phone_number='+%s' % str(phone_number),
direction=OUTGOING,
date=timestamp or datetime.utcnow(),
domain=recipient.domain,
form_unique_id=form_unique_id,
submit_partial_form=submit_partial_form,
include_case_side_effects=include_case_side_effects,
max_question_retries=max_question_retries,
current_question_retry_count=0,
case_id=case_id,
case_for_case_submission=case_for_case_submission,
messaging_subevent_id=logged_subevent.pk,
)
ivr_data, error = get_first_ivr_response_data(recipient,
call_log_entry, logged_subevent)
if error:
| |
'''
## Aliyun ROS FNF Construct Library
This module is part of the AliCloud ROS Cloud Development Kit (ROS CDK) project.
```python
import * as FNF from '@alicloud/ros-cdk-fnf';
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import ros_cdk_core
class Flow(
ros_cdk_core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-fnf.Flow",
):
'''A ROS resource type: ``ALIYUN::FNF::Flow``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "FlowProps",
enable_resource_property_constraint: typing.Optional[builtins.bool] = None,
) -> None:
'''Create a new ``ALIYUN::FNF::Flow``.
Param scope - scope in which this resource is defined
Param id - scoped id of the resource
Param props - resource properties
:param scope: -
:param id: -
:param props: -
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCreatedTime")
def attr_created_time(self) -> ros_cdk_core.IResolvable:
'''Attribute CreatedTime: Flow creation time.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCreatedTime"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrId")
def attr_id(self) -> ros_cdk_core.IResolvable:
'''Attribute Id: The unique ID of the flow.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLastModifiedTime")
def attr_last_modified_time(self) -> ros_cdk_core.IResolvable:
'''Attribute LastModifiedTime: The most recently modified time of the flow.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLastModifiedTime"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrName")
def attr_name(self) -> ros_cdk_core.IResolvable:
'''Attribute Name: The name of the flow created.'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrName"))
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-fnf.FlowProps",
jsii_struct_bases=[],
name_mapping={
"definition": "definition",
"name": "name",
"description": "description",
"request_id": "requestId",
"role_arn": "roleArn",
},
)
class FlowProps:
def __init__(
self,
*,
definition: typing.Union[builtins.str, ros_cdk_core.IResolvable],
name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
request_id: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
role_arn: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::FNF::Flow``.
:param definition: Property definition: The definition of the created flow following the FDL syntax standard.
:param name: Property name: The name of the flow created. This name is unique under the account.
:param description: Property description: Create a description of the flow.
:param request_id: Property requestId: The specified Request ID for this request. If not specified, our system will help you generate a random one.
:param role_arn: Property roleArn: Optional parameter, the resource descriptor information required for the execution of the flow, used to perform the assume role during FnF execution.
'''
self._values: typing.Dict[str, typing.Any] = {
"definition": definition,
"name": name,
}
if description is not None:
self._values["description"] = description
if request_id is not None:
self._values["request_id"] = request_id
if role_arn is not None:
self._values["role_arn"] = role_arn
@builtins.property
def definition(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property definition: The definition of the created flow following the FDL syntax standard.'''
result = self._values.get("definition")
assert result is not None, "Required property 'definition' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''Property name: The name of the flow created.
This name is unique under the account.
'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property description: Create a description of the flow.'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def request_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property requestId: The specified Request ID for this request.
If not specified, our system will help you generate a random one.
'''
result = self._values.get("request_id")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def role_arn(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''Property roleArn: Optional parameter, the resource descriptor information required for the execution of the flow, used to perform the assume role during FnF execution.'''
result = self._values.get("role_arn")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "FlowProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
class RosFlow(
ros_cdk_core.RosResource,
metaclass=jsii.JSIIMeta,
jsii_type="@alicloud/ros-cdk-fnf.RosFlow",
):
'''A ROS template type: ``ALIYUN::FNF::Flow``.'''
def __init__(
self,
scope: ros_cdk_core.Construct,
id: builtins.str,
props: "RosFlowProps",
enable_resource_property_constraint: builtins.bool,
) -> None:
'''Create a new ``ALIYUN::FNF::Flow``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param props: - resource properties.
:param enable_resource_property_constraint: -
'''
jsii.create(self.__class__, self, [scope, id, props, enable_resource_property_constraint])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="ROS_RESOURCE_TYPE_NAME")
def ROS_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "ROS_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrCreatedTime")
def attr_created_time(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: CreatedTime: Flow creation time.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrCreatedTime"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrId")
def attr_id(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Id: The unique ID of the flow.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrId"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrLastModifiedTime")
def attr_last_modified_time(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: LastModifiedTime: The most recently modified time of the flow.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrLastModifiedTime"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrName")
def attr_name(self) -> ros_cdk_core.IResolvable:
'''
:Attribute: Name: The name of the flow created.
'''
return typing.cast(ros_cdk_core.IResolvable, jsii.get(self, "attrName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="rosProperties")
def _ros_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "rosProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="definition")
def definition(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: definition: The definition of the created flow following the FDL syntax standard.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "definition"))
@definition.setter
def definition(
self,
value: typing.Union[builtins.str, ros_cdk_core.IResolvable],
) -> None:
jsii.set(self, "definition", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="enableResourcePropertyConstraint")
def enable_resource_property_constraint(self) -> builtins.bool:
return typing.cast(builtins.bool, jsii.get(self, "enableResourcePropertyConstraint"))
@enable_resource_property_constraint.setter
def enable_resource_property_constraint(self, value: builtins.bool) -> None:
jsii.set(self, "enableResourcePropertyConstraint", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="name")
def name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: name: The name of the flow created. This name is unique under the account.
'''
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], jsii.get(self, "name"))
@name.setter
def name(self, value: typing.Union[builtins.str, ros_cdk_core.IResolvable]) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="description")
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Create a description of the flow.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "description"))
@description.setter
def description(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "description", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="requestId")
def request_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: requestId: The specified Request ID for this request. If not specified, our system will help you generate a random one.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "requestId"))
@request_id.setter
def request_id(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "requestId", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="roleArn")
def role_arn(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: roleArn: Optional parameter, the resource descriptor information required for the execution of the flow, used to perform the assume role during FnF execution.
'''
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], jsii.get(self, "roleArn"))
@role_arn.setter
def role_arn(
self,
value: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]],
) -> None:
jsii.set(self, "roleArn", value)
@jsii.data_type(
jsii_type="@alicloud/ros-cdk-fnf.RosFlowProps",
jsii_struct_bases=[],
name_mapping={
"definition": "definition",
"name": "name",
"description": "description",
"request_id": "requestId",
"role_arn": "roleArn",
},
)
class RosFlowProps:
def __init__(
self,
*,
definition: typing.Union[builtins.str, ros_cdk_core.IResolvable],
name: typing.Union[builtins.str, ros_cdk_core.IResolvable],
description: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
request_id: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
role_arn: typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]] = None,
) -> None:
'''Properties for defining a ``ALIYUN::FNF::Flow``.
:param definition:
:param name:
:param description:
:param request_id:
:param role_arn:
'''
self._values: typing.Dict[str, typing.Any] = {
"definition": definition,
"name": name,
}
if description is not None:
self._values["description"] = description
if request_id is not None:
self._values["request_id"] = request_id
if role_arn is not None:
self._values["role_arn"] = role_arn
@builtins.property
def definition(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: definition: The definition of the created flow following the FDL syntax standard.
'''
result = self._values.get("definition")
assert result is not None, "Required property 'definition' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def name(self) -> typing.Union[builtins.str, ros_cdk_core.IResolvable]:
'''
:Property: name: The name of the flow created. This name is unique under the account.
'''
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return typing.cast(typing.Union[builtins.str, ros_cdk_core.IResolvable], result)
@builtins.property
def description(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: description: Create a description of the flow.
'''
result = self._values.get("description")
return typing.cast(typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]], result)
@builtins.property
def request_id(
self,
) -> typing.Optional[typing.Union[builtins.str, ros_cdk_core.IResolvable]]:
'''
:Property: requestId: The specified Request ID for this request. If not specified, our system will help you generate a random one.
| |
# TSYGANENKO module __init__.py
"""
*******************************
MODULE: tsyganenko
*******************************
This modules containes the following object(s):
tsygTrace: Wraps fortran subroutines in one convenient class
This module contains the following module(s):
tsygFort: Fortran subroutines
Written by <NAME> 2012-10
*******************************
"""
import tsygFort
class tsygTrace(object):
def __init__(self, lat=None, lon=None, rho=None, filename=None,
coords='geo', datetime=None,
vswgse=[-400.,0.,0.], pdyn=2., dst=-5., byimf=0., bzimf=-5.,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| **PACKAGE**: models.tsyganenko.trace
| **FUNCTION**: trace(lat, lon, rho, coords='geo', datetime=None,
| vswgse=[-400.,0.,0.], Pdyn=2., Dst=-5., ByIMF=0., BzIMF=-5.
| lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001)
| **PURPOSE**: trace magnetic field line(s) from point(s)
|
| **INPUTS**:
| **lat**: latitude [degrees]
| **lon**: longitude [degrees]
| **rho**: distance from center of the Earth [km]
| **filename**: load a trace object directly from a file
| **[coords]**: coordinates used for start point ['geo']
| **[datetime]**: a python datetime object
| **[vswgse]**: solar wind velocity in GSE coordinates [m/s, m/s, m/s]
| **[pdyn]**: solar wind dynamic pressure [nPa]
| **[dst]**: Dst index [nT]
| **[byimf]**: IMF By [nT]
| **[bzimf]**: IMF Bz [nT]
| **[lmax]**: maximum number of points to trace
| **[rmax]**: upper trace boundary in Re
| **[rmin]**: lower trace boundary in Re
| **[dsmax]**: maximum tracing step size
| **[err]**: tracing step tolerance
|
| **OUTPUTS**:
| Elements of this object:
| **.lat[N/S]H**: latitude of the trace footpoint in Northern/Southern hemispher
| **.lon[N/S]H**: longitude of the trace footpoint in Northern/Southern hemispher
| **.rho[N/S]H**: distance of the trace footpoint in Northern/Southern hemispher
|
| **EXAMPLES**:
from numpy import arange, zeros, ones
import tsyganenko
# trace a series of points
lats = arange(10, 90, 10)
lons = zeros(len(lats))
rhos = 6372.*ones(len(lats))
trace = tsyganenko.tsygTrace(lats, lons, rhos)
# Print the results nicely
print trace
# Plot the traced field lines
ax = trace.plot()
# Or generate a 3d view of the traced field lines
ax = trace.plot3d()
# Save your trace to a file for later use
trace.save('trace.dat')
# And when you want to re-use the saved trace
trace = tsyganenko.tsygTrace(filename='trace.dat')
|
| Written by Sebastien 2012-10
"""
from datetime import datetime as pydt
assert (None not in [lat, lon, rho]) or filename, 'You must provide either (lat, lon, rho) or a filename to read from'
if None not in [lat, lon, rho]:
self.lat = lat
self.lon = lon
self.rho = rho
self.coords = coords
self.vswgse = vswgse
self.pdyn = pdyn
self.dst = dst
self.byimf = byimf
self.bzimf = bzimf
# If no datetime is provided, defaults to today
if datetime==None: datetime = pydt.utcnow()
self.datetime = datetime
iTest = self.__test_valid__()
if not iTest: self.__del__()
self.trace()
elif filename:
self.load(filename)
def __test_valid__(self):
"""
| Test the validity of input arguments to the tsygTrace class and trace method
|
| Written by Sebastien 2012-10
"""
assert (len(self.vswgse) == 3), 'vswgse must have 3 elements'
assert (self.coords.lower() == 'geo'), '{}: this coordinae system is not supported'.format(self.coords.lower())
# A provision for those who want to batch trace
try:
[l for l in self.lat]
except:
self.lat = [self.lat]
try:
[l for l in self.lon]
except:
self.lon = [self.lon]
try:
[r for r in self.rho]
except:
self.rho = [self.rho]
try:
[d for d in self.datetime]
except:
self.datetime = [self.datetime for l in self.lat]
# Make sure they're all the sam elength
assert (len(self.lat) == len(self.lon) == len(self.rho) == len(self.datetime)), \
'lat, lon, rho and datetime must me the same length'
return True
def trace(self, lat=None, lon=None, rho=None, coords=None, datetime=None,
vswgse=None, pdyn=None, dst=None, byimf=None, bzimf=None,
lmax=5000, rmax=60., rmin=1., dsmax=0.01, err=0.000001):
"""
| See tsygTrace for a description of each parameter
| Any unspecified parameter default to the one stored in the object
| Unspecified lmax, rmax, rmin, dsmax, err has a set default value
|
| Written by Sebastien 2012-10
"""
from numpy import radians, degrees, zeros
# Store existing values of class attributes in case something is wrong
# and we need to revert back to them
if lat: _lat = self.lat
if lon: _lon = self.lon
if rho: _rho = self.rho
if coords: _coords = self.coords
if vswgse: _vswgse = self.vswgse
if not datetime==None: _datetime = self.datetime
# Pass position if new
if lat: self.lat = lat
lat = self.lat
if lon: self.lon = lon
lon = self.lon
if rho: self.rho = rho
rho = self.rho
if not datetime==None: self.datetime = datetime
datetime = self.datetime
# Set necessary parameters if new
if coords: self.coords = coords
coords = self.coords
if not datetime==None: self.datetime = datetime
datetime = self.datetime
if vswgse: self.vswgse = vswgse
vswgse = self.vswgse
if pdyn: self.pdyn = pdyn
pdyn = self.pdyn
if dst: self.dst = dst
dst = self.dst
if byimf: self.byimf = byimf
byimf = self.byimf
if bzimf: self.bzimf = bzimf
bzimf = self.bzimf
# Test that everything is in order, if not revert to existing values
iTest = self.__test_valid__()
if not iTest:
if lat: self.lat = _lat
if lon: _self.lon = lon
if rho: self.rho = _rho
if coords: self.coords = _coords
if vswgse: self.vswgse = _vswgse
if not datetime==None: self.datetime = _datetime
# Declare the same Re as used in Tsyganenko models [km]
Re = 6371.2
# Initialize trace array
self.l = zeros(len(lat))
self.xTrace = zeros((len(lat),2*lmax))
self.yTrace = self.xTrace.copy()
self.zTrace = self.xTrace.copy()
self.xGsw = self.l.copy()
self.yGsw = self.l.copy()
self.zGsw = self.l.copy()
self.latNH = self.l.copy()
self.lonNH = self.l.copy()
self.rhoNH = self.l.copy()
self.latSH = self.l.copy()
self.lonSH = self.l.copy()
self.rhoSH = self.l.copy()
# And now iterate through the desired points
for ip in xrange(len(lat)):
# This has to be called first
tsygFort.recalc_08(datetime[ip].year,datetime[ip].timetuple().tm_yday,
datetime[ip].hour,datetime[ip].minute,datetime[ip].second,
vswgse[0],vswgse[1],vswgse[2])
# Convert lat,lon to geographic cartesian and then gsw
r, theta, phi, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
rho[ip]/Re, radians(90.-lat[ip]), radians(lon[ip]),
0., 0., 0.,
1)
if coords.lower() == 'geo':
xgeo, ygeo, zgeo, xgsw, ygsw, zgsw = tsygFort.geogsw_08(
xgeo, ygeo, zgeo,
0. ,0. ,0. ,
1)
self.xGsw[ip] = xgsw
self.yGsw[ip] = ygsw
self.zGsw[ip] = zgsw
# Trace field line
inmod = 'IGRF_GSW_08'
exmod = 'T96_01'
parmod = [pdyn, dst, byimf, bzimf, 0, 0, 0, 0, 0, 0]
# First towards southern hemisphere
maptoL = [-1, 1]
for mapto in maptoL:
xfgsw, yfgsw, zfgsw, xarr, yarr, zarr, l = tsygFort.trace_08( xgsw, ygsw, zgsw,
mapto, dsmax, err, rmax, rmin, 0,
parmod, exmod, inmod,
lmax )
# Convert back to spherical geographic coords
xfgeo, yfgeo, zfgeo, xfgsw, yfgsw, zfgsw = tsygFort.geogsw_08(
0. ,0. ,0. ,
xfgsw, yfgsw, zfgsw,
-1)
geoR, geoColat, geoLon, xgeo, ygeo, zgeo = tsygFort.sphcar_08(
0., 0., 0.,
xfgeo, yfgeo, zfgeo,
-1)
# Get coordinates of traced point
if mapto == 1:
self.latSH[ip] = 90. - degrees(geoColat)
self.lonSH[ip] = degrees(geoLon)
self.rhoSH[ip] = geoR*Re
elif mapto == -1:
self.latNH[ip] = 90. - degrees(geoColat)
self.lonNH[ip] = degrees(geoLon)
self.rhoNH[ip] = geoR*Re
# Store trace
if mapto == -1:
self.xTrace[ip,0:l] = xarr[l-1::-1]
self.yTrace[ip,0:l] = yarr[l-1::-1]
self.zTrace[ip,0:l] = zarr[l-1::-1]
elif mapto == 1:
self.xTrace[ip,self.l[ip]:self.l[ip]+l] = xarr[0:l]
self.yTrace[ip,self.l[ip]:self.l[ip]+l] = yarr[0:l]
self.zTrace[ip,self.l[ip]:self.l[ip]+l] = zarr[0:l]
self.l[ip] += l
# Resize trace output to more minimum possible length
self.xTrace = self.xTrace[:,0:self.l.max()]
self.yTrace = self.yTrace[:,0:self.l.max()]
self.zTrace = self.zTrace[:,0:self.l.max()]
def __str__(self):
"""
| Print object information in a nice way
|
| Written by Sebastien 2012-10
"""
# Declare print format
outstr = '''
vswgse=[{:6.0f},{:6.0f},{:6.0f}] [m/s]
pdyn={:3.0f} [nPa]
dst={:3.0f} [nT]
byimf={:3.0f} [nT]
bzimf={:3.0f} [nT]
'''.format(self.vswgse[0],
self.vswgse[1],
self.vswgse[2],
self.pdyn,
self.dst,
self.byimf,
self.bzimf)
outstr += '\nCoords: {}\n'.format(self.coords)
outstr += '(latitude [degrees], longitude [degrees], distance from center of the Earth [km])\n'
# Print stuff
for ip in xrange(len(self.lat)):
outstr += '''
({:6.3f}, {:6.3f}, {:6.3f}) @ {}
--> NH({:6.3f}, {:6.3f}, {:6.3f})
--> SH({:6.3f}, {:6.3f}, {:6.3f})
'''.format(self.lat[ip], self.lon[ip], self.rho[ip],
self.datetime[ip].strftime('%H:%M UT (%d-%b-%y)'),
self.latNH[ip], self.lonNH[ip], self.rhoNH[ip],
self.latSH[ip], self.lonSH[ip], self.rhoSH[ip])
return outstr
def save(self, filename):
"""
| Save trace information to a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "wb" ) as fileObj:
pickle.dump(self, fileObj)
def load(self, filename):
"""
| load trace information from a file
|
| Written by Sebastien 2012-10
"""
import cPickle as pickle
with open( filename, "rb" ) as fileObj:
obj = pickle.load(fileObj)
for k, v in obj.__dict__.items():
self.__dict__[k] = v
def plot(self, proj='xz', color='b', onlyPts=None, showPts=False,
showEarth=True, disp=True, **kwargs):
"""
| Generate a 2D plot of the trace projected onto a given plane
| Graphic keywords apply to the plot method for the field lines
|
| **INPUTS**:
| **plane**: the projection plane in GSW coordinates
| **onlyPts**: if the trace countains multiple point, only show | |
= self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
ctx_no_pwd, stack_name,
template, params, None, {}, None)
self.assertEqual(ex.exc_info[0], exception.MissingCredentialError)
self.assertEqual(
'Missing required credential: X-Auth-Key',
six.text_type(ex.exc_info[1]))
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack,
ctx_no_user, stack_name,
template, params, None, {})
self.assertEqual(ex.exc_info[0], exception.MissingCredentialError)
self.assertEqual(
'Missing required credential: X-Auth-User',
six.text_type(ex.exc_info[1]))
def test_stack_create_total_resources_equals_max(self):
stack_name = 'service_create_stack_total_resources_equals_max'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = templatem.Template(tpl)
stack = parser.Stack(self.ctx, stack_name, template,
environment.Environment({}))
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
self.m.StubOutWithMock(parser, 'Stack')
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t,
stack.env,
owner_id=None).AndReturn(stack)
self.m.ReplayAll()
cfg.CONF.set_override('max_resources_per_stack', 3)
result = self.man.create_stack(self.ctx, stack_name, template, params,
None, {})
self.m.VerifyAll()
self.assertEqual(stack.identifier(), result)
self.assertEqual(3, stack.total_resources())
self.man.thread_group_mgr.groups[stack.id].wait()
stack.delete()
def test_stack_create_total_resources_exceeds_max(self):
stack_name = 'service_create_stack_total_resources_exceeds_max'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
cfg.CONF.set_override('max_resources_per_stack', 2)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.create_stack, self.ctx, stack_name,
tpl, params, None, {})
self.assertEqual(ex.exc_info[0], exception.RequestLimitExceeded)
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
six.text_type(ex.exc_info[1]))
def test_stack_validate(self):
stack_name = 'service_create_test_validate'
stack = get_wordpress_stack(stack_name, self.ctx)
setup_mocks(self.m, stack, mock_image_constraint=False)
resource = stack['WebServer']
setup_mock_for_image_constraint(self.m, 'CentOS 5.2')
self.m.ReplayAll()
resource.properties = Properties(
resource.properties_schema,
{
'ImageId': 'CentOS 5.2',
'KeyName': 'test',
'InstanceType': 'm1.large'
},
context=self.ctx)
stack.validate()
resource.properties = Properties(
resource.properties_schema,
{
'KeyName': 'test',
'InstanceType': 'm1.large'
},
context=self.ctx)
self.assertRaises(exception.StackValidationFailed, stack.validate)
def test_stack_delete(self):
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
s = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(stack)
self.m.ReplayAll()
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def test_stack_delete_nonexist(self):
stack_name = 'service_delete_nonexist_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(ex.exc_info[0], exception.StackNotFound)
self.m.VerifyAll()
def test_stack_delete_acquired_lock(self):
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).MultipleTimes().AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn(self.man.engine_id)
self.m.ReplayAll()
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def test_stack_delete_acquired_lock_stop_timers(self):
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).MultipleTimes().AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn(self.man.engine_id)
self.m.ReplayAll()
self.man.thread_group_mgr.add_timer(stack.id, 'test')
self.assertEqual(1, len(self.man.thread_group_mgr.groups[sid].timers))
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.assertEqual(0, len(self.man.thread_group_mgr.groups[sid].timers))
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def test_stack_delete_current_engine_active_lock(self):
self.man.start()
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
db_api.stack_lock_create(stack.id, self.man.engine_id)
# Create a fake ThreadGroup too
self.man.thread_group_mgr.groups[stack.id] = DummyThreadGroup()
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).MultipleTimes().AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn(self.man.engine_id)
# this is to simulate lock release on DummyThreadGroup stop
self.m.StubOutWithMock(stack_lock.StackLock, 'acquire')
stack_lock.StackLock.acquire().AndReturn(None)
self.m.StubOutWithMock(self.man.thread_group_mgr, 'stop')
self.man.thread_group_mgr.stop(stack.id).AndReturn(None)
self.m.ReplayAll()
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.m.VerifyAll()
def test_stack_delete_other_engine_active_lock_failed(self):
self.man.start()
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
db_api.stack_lock_create(stack.id, "other-engine-fake-uuid")
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid")
self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive')
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
.AndReturn(True)
self.m.StubOutWithMock(self.man, '_remote_call')
self.man._remote_call(
self.ctx, 'other-engine-fake-uuid', 'stop_stack',
stack_identity=mox.IgnoreArg()
).AndReturn(False)
self.m.ReplayAll()
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.delete_stack,
self.ctx, stack.identifier())
self.assertEqual(ex.exc_info[0], exception.StopActionFailed)
self.m.VerifyAll()
def test_stack_delete_other_engine_active_lock_succeeded(self):
self.man.start()
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
db_api.stack_lock_create(stack.id, "other-engine-fake-uuid")
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).MultipleTimes().AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid")
self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive')
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
.AndReturn(True)
self.m.StubOutWithMock(self.man, '_remote_call')
self.man._remote_call(
self.ctx, 'other-engine-fake-uuid', 'stop_stack',
stack_identity=mox.IgnoreArg()).AndReturn(None)
self.m.StubOutWithMock(stack_lock.StackLock, 'acquire')
stack_lock.StackLock.acquire().AndReturn(None)
self.m.ReplayAll()
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def test_stack_delete_other_dead_engine_active_lock(self):
stack_name = 'service_delete_test_stack'
stack = get_wordpress_stack(stack_name, self.ctx)
sid = stack.store()
# Insert a fake lock into the db
db_api.stack_lock_create(stack.id, "other-engine-fake-uuid")
st = db_api.stack_get(self.ctx, sid)
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=st).MultipleTimes().AndReturn(stack)
self.m.StubOutWithMock(stack_lock.StackLock, 'try_acquire')
stack_lock.StackLock.try_acquire().AndReturn("other-engine-fake-uuid")
self.m.StubOutWithMock(stack_lock.StackLock, 'engine_alive')
stack_lock.StackLock.engine_alive(self.ctx, "other-engine-fake-uuid")\
.AndReturn(False)
self.m.StubOutWithMock(stack_lock.StackLock, 'acquire')
stack_lock.StackLock.acquire().AndReturn(None)
self.m.ReplayAll()
self.assertIsNone(self.man.delete_stack(self.ctx, stack.identifier()))
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def _stub_update_mocks(self, stack_to_load, stack_to_return):
self.m.StubOutWithMock(parser, 'Stack')
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=stack_to_load
).AndReturn(stack_to_return)
self.m.StubOutWithMock(templatem, 'Template')
self.m.StubOutWithMock(environment, 'Environment')
def test_stack_update(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = get_wordpress_stack(stack_name, self.ctx)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = get_wordpress_stack(stack_name, self.ctx)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env,
timeout_mins=60, disable_rollback=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
evt_mock = self.m.CreateMockAnything()
self.m.StubOutWithMock(grevent, 'Event')
grevent.Event().AndReturn(evt_mock)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
api_args = {'timeout_mins': 60}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.assertEqual(self.man.thread_group_mgr.events[sid], [evt_mock])
self.m.VerifyAll()
def test_stack_update_existing_parameters(self):
'''Use a template with default parameter and no input parameter
then update with a template without default and no input
parameter, using the existing parameter.
'''
stack_name = 'service_update_test_stack_existing_parameters'
no_params = {}
with_params = {'KeyName': 'foo'}
old_stack = get_wordpress_stack_no_params(stack_name, self.ctx)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
t = template_format.parse(wp_template_no_default)
template = parser.Template(t)
env = environment.Environment({'parameters': with_params,
'resource_registry': {'rsc': 'test'}})
stack = parser.Stack(self.ctx, stack_name, template, env)
self._stub_update_mocks(s, old_stack)
templatem.Template(wp_template_no_default,
files=None).AndReturn(stack.t)
environment.Environment(no_params).AndReturn(old_stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, old_stack.env,
timeout_mins=60, disable_rollback=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
evt_mock = self.m.CreateMockAnything()
self.m.StubOutWithMock(grevent, 'Event')
grevent.Event().AndReturn(evt_mock)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
api_args = {engine_api.PARAM_TIMEOUT: 60,
engine_api.PARAM_EXISTING: True}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
wp_template_no_default, no_params,
None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.assertEqual(self.man.thread_group_mgr.events[sid], [evt_mock])
self.m.VerifyAll()
def test_stack_update_reuses_api_params(self):
stack_name = 'service_update_test_stack'
params = {'foo': 'bar'}
template = '{ "Template": "data" }'
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.timeout_mins = 1
old_stack.disable_rollback = False
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = get_wordpress_stack(stack_name, self.ctx)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env,
timeout_mins=1, disable_rollback=False).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
api_args = {}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.m.VerifyAll()
def test_stack_cancel_update_same_engine(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.IN_PROGRESS,
'test_override')
old_stack.disable_rollback = False
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
lock_mock = self.patchobject(stack_lock.StackLock, 'try_acquire')
lock_mock.return_value = self.man.engine_id
self.patchobject(self.man.thread_group_mgr, 'send')
self.man.stack_cancel_update(self.ctx, old_stack.identifier())
self.man.thread_group_mgr.send.assert_called_once_with(old_stack.id,
'cancel')
def test_stack_cancel_update_different_engine(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.IN_PROGRESS,
'test_override')
old_stack.disable_rollback = False
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
lock_mock = self.patchobject(stack_lock.StackLock, 'try_acquire')
another_engine_has_lock = str(uuid.uuid4())
lock_mock.return_value = another_engine_has_lock
self.patchobject(stack_lock.StackLock,
'engine_alive').return_value(True)
self.man.listener = mock.Mock()
self.man.listener.SEND = 'send'
self.man._client = messaging.get_rpc_client(
version=self.man.RPC_API_VERSION)
# In fact the another engine is not alive, so the call will timeout
self.assertRaises(dispatcher.ExpectedException,
self.man.stack_cancel_update,
self.ctx, old_stack.identifier())
def test_stack_cancel_update_wrong_state_fails(self):
stack_name = 'service_update_cancel_test_stack'
old_stack = get_wordpress_stack(stack_name, self.ctx)
old_stack.state_set(old_stack.UPDATE, old_stack.COMPLETE,
'test_override')
old_stack.store()
load_mock = self.patchobject(parser.Stack, 'load')
load_mock.return_value = old_stack
ex = self.assertRaises(
dispatcher.ExpectedException,
self.man.stack_cancel_update, self.ctx, old_stack.identifier())
self.assertEqual(ex.exc_info[0], exception.NotSupported)
self.assertIn("Cancelling update when stack is "
"('UPDATE', 'COMPLETE')",
six.text_type(ex.exc_info[1]))
def test_stack_update_equals(self):
stack_name = 'test_stack_update_equals_resource_limit'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = templatem.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
s = db_api.stack_get(self.ctx, sid)
stack = parser.Stack(self.ctx, stack_name, template)
self._stub_update_mocks(s, old_stack)
templatem.Template(template, files=None).AndReturn(stack.t)
environment.Environment(params).AndReturn(stack.env)
parser.Stack(self.ctx, stack.name,
stack.t, stack.env,
timeout_mins=60, disable_rollback=True).AndReturn(stack)
self.m.StubOutWithMock(stack, 'validate')
stack.validate().AndReturn(None)
self.m.StubOutWithMock(threadgroup, 'ThreadGroup')
threadgroup.ThreadGroup().AndReturn(DummyThreadGroup())
self.m.ReplayAll()
cfg.CONF.set_override('max_resources_per_stack', 3)
api_args = {'timeout_mins': 60}
result = self.man.update_stack(self.ctx, old_stack.identifier(),
template, params, None, api_args)
self.assertEqual(old_stack.identifier(), result)
self.assertIsInstance(result, dict)
self.assertTrue(result['stack_id'])
self.assertEqual(3, old_stack.root_stack.total_resources())
self.m.VerifyAll()
def test_stack_update_stack_id_equal(self):
stack_name = 'test_stack_update_stack_id_equal'
res._register_class('ResourceWithPropsType',
generic_rsrc.ResourceWithProps)
tpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AWS::StackId'}
}
}
}
}
template = templatem.Template(tpl)
create_stack = parser.Stack(self.ctx, stack_name, template)
sid = create_stack.store()
create_stack.create()
self.assertEqual((create_stack.CREATE, create_stack.COMPLETE),
create_stack.state)
s = db_api.stack_get(self.ctx, sid)
old_stack = parser.Stack.load(self.ctx, stack=s)
self.assertEqual((old_stack.CREATE, old_stack.COMPLETE),
old_stack.state)
self.assertEqual(create_stack.identifier().arn(),
old_stack['A'].properties['Foo'])
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.ReplayAll()
result = self.man.update_stack(self.ctx, create_stack.identifier(),
tpl, {}, None, {})
self.man.thread_group_mgr.groups[sid].wait()
self.assertEqual((old_stack.UPDATE, old_stack.COMPLETE),
old_stack.state)
self.assertEqual(create_stack.identifier(), result)
self.assertIsNotNone(create_stack.identifier().stack_id)
self.assertEqual(create_stack.identifier().arn(),
old_stack['A'].properties['Foo'])
self.assertEqual(create_stack['A'].id, old_stack['A'].id)
self.man.thread_group_mgr.groups[sid].wait()
self.m.VerifyAll()
def test_nested_stack_update_stack_id_equal(self):
stack_name = 'test_stack_update_stack_id_equal'
res._register_class('ResourceWithPropsType',
generic_rsrc.ResourceWithProps)
tpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'some_param': {'Type': 'String'}
},
'Resources': {
'nested': {
'Type': 'AWS::CloudFormation::Stack',
'Properties': {
'TemplateURL': 'https://server.test/nested_tpl',
'Parameters': {'some_param': {'Ref': 'some_param'}}
}
}
}
}
nested_tpl = {
'HeatTemplateFormatVersion': '2012-12-12',
'Parameters': {
'some_param': {'Type': 'String'}
},
'Resources': {
'A': {
'Type': 'ResourceWithPropsType',
'Properties': {
'Foo': {'Ref': 'AWS::StackId'}
}
}
}
}
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get('https://server.test/nested_tpl').MultipleTimes().\
AndReturn(json.dumps(nested_tpl))
mox.Replay(urlfetch.get)
template = templatem.Template(tpl)
create_env = environment.Environment({'some_param': 'foo'})
create_stack = parser.Stack(self.ctx, stack_name, template, create_env)
sid = create_stack.store()
create_stack.create()
self.assertEqual((create_stack.CREATE, create_stack.COMPLETE),
create_stack.state)
s = db_api.stack_get(self.ctx, sid)
old_stack = parser.Stack.load(self.ctx, stack=s)
self.assertEqual((old_stack.CREATE, old_stack.COMPLETE),
old_stack.state)
old_nested = old_stack['nested'].nested()
self.m.StubOutWithMock(parser.Stack, 'load')
parser.Stack.load(self.ctx, stack=s).AndReturn(old_stack)
self.m.ReplayAll()
result = self.man.update_stack(self.ctx, create_stack.identifier(),
tpl, {'some_param': 'bar'}, None, {})
self.man.thread_group_mgr.groups[sid].wait()
create_nested = create_stack['nested'].nested()
self.assertEqual((old_nested.UPDATE, old_nested.COMPLETE),
old_nested.state)
self.assertEqual(create_stack.identifier(), result)
self.assertIsNotNone(create_stack.identifier().stack_id)
self.assertEqual(create_nested.identifier().arn(),
old_nested['A'].properties['Foo'])
self.assertEqual(create_nested['A'].id, old_nested['A'].id)
self.m.VerifyAll()
def test_stack_update_exceeds_resource_limit(self):
stack_name = 'test_stack_update_exceeds_resource_limit'
params = {}
res._register_class('GenericResourceType',
generic_rsrc.GenericResource)
tpl = {'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'A': {'Type': 'GenericResourceType'},
'B': {'Type': 'GenericResourceType'},
'C': {'Type': 'GenericResourceType'}}}
template = templatem.Template(tpl)
old_stack = parser.Stack(self.ctx, stack_name, template)
sid = old_stack.store()
self.assertIsNotNone(sid)
cfg.CONF.set_override('max_resources_per_stack', 2)
ex = self.assertRaises(dispatcher.ExpectedException,
self.man.update_stack, self.ctx,
old_stack.identifier(), tpl, params,
None, {})
self.assertEqual(ex.exc_info[0], exception.RequestLimitExceeded)
self.assertIn(exception.StackResourceLimitExceeded.msg_fmt,
six.text_type(ex.exc_info[1]))
def test_stack_update_verify_err(self):
stack_name = | |
grep_next_subtree(main_clause_mp, 'VP')
main_clause_vp = grep_next_subtree(main_clause_vp, 'VP') # do this twice because of how the german grammar is set up
main_clause_vp = grep_next_subtree(main_clause_vp, '(IVP|TVP(Masc|Fem|Neut)?)')
main_clause_v = grep_next_subtree(main_clause_vp, '(IV|TV)$')
metadata.update({'v_trans': 'intransitive' if main_clause_v.label().symbol() == 'IV' else 'transitive'})
# definiteness of main clause subject
main_clause_subj = grep_next_subtree(main_clause, 'NP(Sg|Pl)Nom')
main_clause_subj = grep_next_subtree(main_clause_subj, '(NP(Masc|Fem)(Sg|Pl)Nom|PN)')
if main_clause_subj.label().symbol() == 'PN':
metadata.update({'subj_def': 'definite'})
else:
main_clause_subj_det = grep_next_subtree(main_clause, 'Det')
if main_clause_subj_det[0] in ['der', 'die', 'das']:
metadata.update({'subj_def': 'definite'})
else:
metadata.update({'subj_def': 'indefinite'})
# definiteness of main clause object
if main_clause_v.label().symbol() == 'TV':
main_clause_obj = grep_next_subtree(main_clause_vp, 'NP(Masc|Fem|Neut)(Sg|Pl)Acc')
main_clause_obj_det = grep_next_subtree(main_clause_obj, 'Det')
if main_clause_obj_det[0] in ['den', 'die', 'das']:
metadata.update({'obj_def': 'definite'})
else:
metadata.update({'obj_def': 'indefinite'})
else:
metadata.update({'obj_def': None})
# number of main clause subject
if main_clause_subj.label().symbol() == 'PN':
metadata.update({'subj_num': 'sg'})
else:
metadata.update({
'subj_num': 'sg'
if any(
l for l in get_pos_labels(main_clause_subj)
if re.match('N(Masc|Fem|Neut)SgNom', l)
)
else 'pl'
})
# number of main clause object
if main_clause_v.label().symbol() == 'TV':
if 'Pl' in main_clause_obj_det.label().symbol():
metadata.update({'obj_num': 'pl'})
else:
metadata.update({'obj_num': 'sg'})
else:
metadata.update({'obj_num': None})
# main auxiliary
main_clause_m = grep_next_subtree(main_clause_mp, 'M(?!P)')
metadata.update({'main_aux': GERMAN_MODAL_MAP.get(main_clause_m[0], main_clause_m[0])})
# is the main clause subject initial?
labels = get_labels(source)
metadata.update({'main_clause_subj_initial': not 'SInv2' in labels})
# number of AdvPs before and after main clause
main_clause_start = labels.index('S2') if 'S2' in labels else labels.index('SInv2')
pre_main_advps = len([l for i, l in enumerate(labels) if l == 'AdvP' and i < main_clause_start])
post_main_advps = len([l for i, l in enumerate(labels) if l == 'AdvP' and i > main_clause_start])
metadata.update({
'pre_main_advps' : pre_main_advps,
'post_main_advps' : post_main_advps,
'total_advps' : pre_main_advps + post_main_advps
})
# get pos seq with details suppressed
source_pos_seq = get_german_pos_seq(source)
metadata.update({'source_pos_seq': source_pos_seq})
if pfx == 'pos':
metadata.update({'target_pos_seq': source_pos_seq})
else:
tgt_main_clause = grep_next_subtree(target, '(S2|SInv2)')
if not 'kein' in ' '.join(tgt_main_clause.leaves()):
tgt_main_clause_mp = grep_next_subtree(tgt_main_clause, 'MP(Sg|Pl)(Inv)?')
tgt_main_clause_vp = grep_next_subtree(tgt_main_clause_mp, 'VP')
tgt_main_clause_vp = grep_next_subtree(tgt_main_clause_vp, 'VP') # do this twice because of how the german grammar is set up
tgt_main_clause_vp = grep_next_subtree(tgt_main_clause_vp, '(IVP|TVP(Masc|Fem|Neut)?)')
tgt_main_clause_v = grep_next_subtree(tgt_main_clause_vp, '(IV|TV)$')
tgt_main_clause_v.set_label(f'Neg {tgt_main_clause_v.label().symbol()}')
else:
main_clause_indef_det = next(
tgt_main_clause.subtrees(
filter = lambda x: len(x.leaves()) == 1 and 'kein' in x.leaves()[0]
)
)
main_clause_indef_det.set_label(f'Neg {main_clause_indef_det.label().symbol()}')
tgt_pos_seq = get_german_pos_seq(target)
metadata.update({'target_pos_seq': tgt_pos_seq})
metadata.update({'polarity': pfx})
return metadata
def get_turkish_pos_seq(t: Tree) -> str:
'''Remove unwanted info from Turkish pos tags for comparison purposes and return as a string.'''
pos_seq = get_pos_labels(t)
pos_seq = [l for tag in [pos_tag.split() for pos_tag in pos_seq] for l in tag]
pos_seq = [pos_tag.split('_',1)[0] for pos_tag in pos_seq]
pos_seq = [re.sub('P$', '', pos_tag) for pos_tag in pos_seq]
pos_seq = [re.sub('(Tense|Person[1-3])', '', pos_tag) for pos_tag in pos_seq]
pos_seq = [l for tag in [pos_tag.split() for pos_tag in pos_seq] for l in tag]
pos_seq = '[' + '] ['.join([pos_tag for pos_tag in pos_seq if pos_tag]) + ']'
return pos_seq
def get_turkish_example_metadata(
source: Tree,
pfx: str,
target: Tree
) -> Dict:
"""
Gets metadata about the passed example, consisting of a seq2seq mapping with a source, prefix, and target.
:param source: Tree: the source Tree
:param pfx: str: the task prefix passed to the model
:param target: the target Tree
:returns metadata: a dictionary recording the following properties for the example:
- transitivity of the main verb (v_trans)
- definiteness of main clause subject/object (subj_def, obj_def)
- number of main clause subject/object (subj_num, obj_num)
- the identity of the main auxiliary (main_aux)
- how many adverbial clauses before the main clause
- how many adverbial clause after the main clause
- the number of adverbial clauses
- pos tags
(not all of these are currently in the turkish grammar, so we'll use a default value)
"""
source = source.copy(deep=True)
target = target.copy(deep=True)
metadata = {}
main_clause = grep_next_subtree(source, 'S')
main_clause_vp = grep_next_subtree(main_clause, 'VP')
main_clause_v = grep_next_subtree(main_clause_vp, 'V_(in)?trans')
metadata.update({'v_trans': 'intransitive' if main_clause_v.label().symbol() == 'V_intrans' else 'transitive'})
# placeholders
metadata.update({'subj_def': 'definite'})
metadata.update({'obj_def': 'definite'})
metadata.update({'subj_num': 'sg'})
metadata.update({'obj_num': 'sg'})
# no aux in turkish grammar, so use the main clause verb stem instead for now
metadata.update({'main_v': grep_next_subtree(main_clause_v, 'stem')[0].strip()})
# currently no adverbial clauses in turkish
pre_main_advps = 0
post_main_advps = 0
metadata.update({'pre_main_advps': pre_main_advps})
metadata.update({'post_main_advps': post_main_advps})
metadata.update({'total_advps': pre_main_advps + post_main_advps})
source_pos_seq = get_turkish_pos_seq(source)
metadata.update({'source_pos_seq': source_pos_seq})
if pfx == 'pos':
metadata.update({'target_pos_seq': source_pos_seq})
else:
# this may need to be changed later based on what they said about turkish negation
# if those other strategies are added, more will be needed here
tgt_main_clause = grep_next_subtree(target, 'S')
tgt_main_clause_vp = grep_next_subtree(tgt_main_clause, 'VP')
tgt_main_clause_v = grep_next_subtree(tgt_main_clause_vp, 'stem')
tgt_main_clause_v.set_label(f'{tgt_main_clause_v.label().symbol()} Neg')
tgt_pos_seq = get_turkish_pos_seq(target)
metadata.update({'target_pos_seq': tgt_pos_seq})
metadata.update({'polarity': pfx})
return metadata
def get_example_metadata(
grammar: PCFG,
*args, **kwargs,
) -> Dict:
"""
Gets metadata about the passed example, consisting of a seq2seq mapping with a source, prefix, and target.
:param grammar: the grammar that generated the example
:param args: passed to get_lang_example_metadata()
:param kwargs: passed to get_lang_example_metadata()
:returns metadata: a dictionary recording language-specific properties for the example
"""
function_map = {
'en': get_english_example_metadata,
'de': get_german_example_metadata,
'tu': get_turkish_example_metadata
}
try:
metadata = function_map[grammar.lang](*args, **kwargs)
except KeyError:
metadata = {}
return metadata
def create_dataset_json(
grammar: PCFG,
ex_generator: Callable,
file_prefix: str = '',
overwrite: bool = False,
**splits: Dict[str,int]
) -> None:
"""
Create a dataset json file that can be read using the datasets module's dataset loader.
Also outputs a companion json that records various linguistic properties of each sentence.
:param grammar: PCFG: a PCFG object
:param ex_generator: function: a function that creates a pair of sentences and associated tags from the grammar
:param file_prefix: str: an identifier to add to the beginning of the output file names
:param overwrite: bool: whether to overwrite existing datasets with matching names
:param splits: kwargs mapping a set label to the number of examples to generate for that set
ex: train=10000, dev=1000, test=10000
"""
file_prefix = file_prefix + '_' if file_prefix and not (file_prefix[-1] in ['-', '_']) else ''
create_data_path(os.path.join('data', file_prefix))
for name, n_examples in splits.items():
metadata = []
if not os.path.exists(os.path.join('data', file_prefix + name + '.json.gz')) or overwrite:
prefixes = {}
l = []
print(f'Generating {name} examples')
for n in tqdm(range(n_examples)):
source, pfx, target = ex_generator(grammar)
metadata.append(get_example_metadata(grammar, source, pfx, target))
prefixes[pfx] = 1 if not pfx in prefixes else prefixes[pfx] + 1
l += [{
'translation': {
'src' : format_tree_string(source, grammar.lang, pfx),
'prefix': pfx,
'tgt' : format_tree_string(target, grammar.lang, pfx)
}
}]
for pfx in prefixes:
print(f'{name} prop {pfx} examples: {prefixes[pfx]/n_examples}')
if l:
print('Saving examples to data/' + file_prefix + name + '.json.gz')
with gzip.open(os.path.join('data', file_prefix + name + '.json.gz'), 'wt', encoding='utf-8') as f:
for ex in tqdm(l):
json.dump(ex, f, ensure_ascii=False)
f.write('\n')
print('Saving metadata to data/' + file_prefix + name + '_metadata.json.gz')
with gzip.open(os.path.join('data', file_prefix + name + '_metadata.json.gz'), 'wt', encoding='utf-8') as f:
for ex in tqdm(metadata):
json.dump(ex, f, ensure_ascii=False)
f.write('\n')
print('')
else:
print(f'{name} dataset already exists. Skipping. Use overwrite=True to force regeneration.')
def combine_dataset_jsons(
file_prefix: str = '',
*files: Tuple[str],
overwrite: bool = False,
) -> None:
'''
Combines dataset jsons.
:param file_prefix: str: a prefix (without extension) to give to the combine file
:param *files: Tuple[str]: tuple of strings containing the files to combine
(in the order they should be put into the resulting file)
:param overwrite: bool: whether to overwrite existing files
'''
if not os.path.exists(os.path.join('data', file_prefix + '.json.gz')) or overwrite:
create_data_path(os.path.join('data', file_prefix))
combined = ''
for file in files:
with gzip.open(os.path.join('data', file + ('.json.gz' if not file.endswith('.json.gz') else '')), 'rt', encoding='utf-8') as in_file:
combined += in_file.read()
with gzip.open(os.path.join('data', file_prefix + '.json.gz'), 'wt', encoding='utf-8') as f:
f.write(combined)
def create_negation_datasets(
configs: Dict[str,List] = None,
**kwargs
) -> None:
'''
Create json datasets according to the passed configs.
:param configs: (List[Dict]): This should be in the following format:
A dict mapping a language id to a List of arguments.
Each list of arguments consists of a Dict mapping str to floats, a PCFG, and an example generator function.
The dict maps strings to a list containing a float and a dictionary containing splits.
Each float is passed to the ex_generator function, with splits mapping strings to numbers that define how many examples to create for each split
when that float is passed to ex_generator.
The PCFG is the grammar from which to generate examples.
The example generator function should take the grammar and the probability of generating a negative example as argument.
example:
configs = {
'en': [
{
'neg': [
0.5,
{
'train': 100000,
'dev': 1000,
'test': 10000
}
],
'pos': [
0.,
{
'train': 500
}
]
},
english_grammar.not_grammar,
english_grammar.neg_or_pos
],
'de': [
{
'neg': [
0.5,
{
'train': 100000,
'dev': 1000,
'test': 10000
}
],
'pos': [
0.,
{
'train': 500
}
]
},
german_grammar.nicht_grammar,
german_grammar.neg_or_pos
]
}
This config will create for each split neg_en dataset consisting of approximately 50% positive-negative/positive-positive examples,
and a pos_en dataset consisting of 100% positive-positive examples, and likewise for german.
:param kwargs: passed to create_dataset_json
If no argument is passed, attempt to load the configs from a file ./data/config.json
'''
configs = load_configs(configs) if configs is None or isinstance(configs,str) else configs
for lang in configs:
print(f'Creating datasets for {lang}')
prob_map = configs[lang][0]
# if we're loading from a file, we have to store these as strings,
# so we need to import the actual objects
if isinstance(configs[lang][1],str) and isinstance(configs[lang][2],str):
module1 = configs[lang][1].split('.')[0]
module2 = configs[lang][2].split('.')[0]
exec(f'import {module1}, {module2}')
grammar = eval(configs[lang][1])
ex_generator = eval(configs[lang][2])
else:
grammar = configs[lang][1]
ex_generator = configs[lang][2]
for dataset_type in prob_map:
p = prob_map[dataset_type][0]
splits = prob_map[dataset_type][1]
file_prefix | |
"y":
default_x = stat
self._add_axis_labels(ax, default_x, default_y)
if "hue" in self.variables and legend:
artist = partial(mpl.lines.Line2D, [], [])
alpha = plot_kws.get("alpha", 1)
self._add_legend(
ax, artist, False, False, None, alpha, plot_kws, {},
)
def plot_rug(self, height, expand_margins, legend, ax, kws):
kws = _normalize_kwargs(kws, mpl.lines.Line2D)
# TODO we need to abstract this logic
scout, = ax.plot([], [], **kws)
kws["color"] = kws.pop("color", scout.get_color())
scout.remove()
kws.setdefault("linewidth", 1)
if expand_margins:
xmarg, ymarg = ax.margins()
if "x" in self.variables:
ymarg += height * 2
if "y" in self.variables:
xmarg += height * 2
ax.margins(x=xmarg, y=ymarg)
if "hue" in self.variables:
kws.pop("c", None)
kws.pop("color", None)
if "x" in self.variables:
self._plot_single_rug("x", height, ax, kws)
if "y" in self.variables:
self._plot_single_rug("y", height, ax, kws)
# --- Finalize the plot
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO ideally i'd like the legend artist to look like a rug
legend_artist = partial(mpl.lines.Line2D, [], [])
self._add_legend(
ax, legend_artist, False, False, None, 1, {}, {},
)
def _plot_single_rug(self, var, height, ax, kws):
"""Draw a rugplot along one axis of the plot."""
vector = self.plot_data[var]
n = len(vector)
# We'll always add a single collection with varying colors
if "hue" in self.variables:
colors = self._hue_map(self.plot_data["hue"])
else:
colors = None
# Build the array of values for the LineCollection
if var == "x":
trans = tx.blended_transform_factory(ax.transData, ax.transAxes)
xy_pairs = np.column_stack([
np.repeat(vector, 2), np.tile([0, height], n)
])
if var == "y":
trans = tx.blended_transform_factory(ax.transAxes, ax.transData)
xy_pairs = np.column_stack([
np.tile([0, height], n), np.repeat(vector, 2)
])
# Draw the lines on the plot
line_segs = xy_pairs.reshape([n, 2, 2])
ax.add_collection(LineCollection(
line_segs, transform=trans, colors=colors, **kws
))
ax.autoscale_view(scalex=var == "x", scaley=var == "y")
# ==================================================================================== #
# External API
# ==================================================================================== #
def histplot(
data=None, *,
# Vector variables
x=None, y=None, hue=None, weights=None,
# Histogram computation parameters
stat="count", bins="auto", binwidth=None, binrange=None,
discrete=None, cumulative=False, common_bins=True, common_norm=True,
# Histogram appearance parameters
multiple="layer", element="bars", fill=True, shrink=1,
# Histogram smoothing with a kernel density estimate
kde=False, kde_kws=None, line_kws=None,
# Bivariate histogram parameters
thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,
# Hue mapping parameters
palette=None, hue_order=None, hue_norm=None, color=None,
# Axes information
log_scale=None, legend=True, ax=None,
# Other appearance keywords
**kwargs,
):
p = _DistributionPlotter(
data=data,
variables=_DistributionPlotter.get_semantics(locals())
)
p.map_hue(palette=palette, order=hue_order, norm=hue_norm)
if ax is None:
ax = plt.gca()
# TODO move these defaults inside the plot functions
if kde_kws is None:
kde_kws = {}
if line_kws is None:
line_kws = {}
if cbar_kws is None:
cbar_kws = {}
# Check for a specification that lacks x/y data and return early
if not p.has_xy_data:
return ax
# Attach the axes to the plotter, setting up unit conversions
p._attach(ax, log_scale=log_scale)
# Default to discrete bins for categorical variables
# Note that having this logic here may constrain plans for distplot
# It can move inside the plot_ functions, it will just need to modify
# the estimate_kws dictionary (I am not sure how we feel about that)
if discrete is None:
if p.univariate:
discrete = p.var_types[p.data_variable] == "categorical"
else:
discrete_x = p.var_types["x"] == "categorical"
discrete_y = p.var_types["y"] == "categorical"
discrete = discrete_x, discrete_y
estimate_kws = dict(
stat=stat,
bins=bins,
binwidth=binwidth,
binrange=binrange,
discrete=discrete,
cumulative=cumulative,
)
if p.univariate:
if "hue" not in p.variables:
kwargs["color"] = color
p.plot_univariate_histogram(
multiple=multiple,
element=element,
fill=fill,
shrink=shrink,
common_norm=common_norm,
common_bins=common_bins,
kde=kde,
kde_kws=kde_kws.copy(),
color=color,
legend=legend,
estimate_kws=estimate_kws.copy(),
line_kws=line_kws.copy(),
plot_kws=kwargs,
ax=ax,
)
else:
p.plot_bivariate_histogram(
common_bins=common_bins,
common_norm=common_norm,
thresh=thresh,
pthresh=pthresh,
pmax=pmax,
color=color,
legend=legend,
cbar=cbar,
cbar_ax=cbar_ax,
cbar_kws=cbar_kws,
estimate_kws=estimate_kws,
plot_kws=kwargs,
ax=ax,
)
return ax
histplot.__doc__ = """\
Plot univeriate or bivariate histograms to show distributions of datasets.
A histogram is a classic visualization tool that represents the distribution
of one or more variables by counting the number of observations that fall within
disrete bins.
This function can normalize the statistic computed within each bin to estimate
frequency, density or probability mass, and it can add a smooth curve obtained
using a kernel density estimate, similar to :func:`kdeplot`.
More information is provided in the :ref:`user guide <userguide_hist>`.
Parameters
----------
{params.core.data}
{params.core.xy}
{params.core.hue}
weights : vector or key in ``data``
If provided, weight the contribution of the corresponding data points
towards the count in each bin by these factors.
{params.hist.stat}
{params.hist.bins}
{params.hist.binwidth}
{params.hist.binrange}
discrete : bool
If True, default to ``binwidth=1`` and draw the bars so that they are
centered on their corresponding data points. This avoids "gaps" that may
otherwise appear when using discrete (integer) data.
cumulative : bool
If True, plot the cumulative counts as bins increase.
common_bins : bool
If True, use the same bins when semantic variables produce multiple
plots. If using a reference rule to determine the bins, it will be computed
with the full dataset.
common_norm : bool
If True and using a normalized statistic, the normalization will apply over
the full dataset. Otherwise, normalize each histogram independently.
multiple : {{"layer", "dodge", "stack", "fill"}}
Approach to resolving multiple elements when semantic mapping creates subsets.
Only relevant with univariate data.
element : {{"bars", "step", "poly"}}
Visual representation of the histogram statistic.
Only relevant with univariate data.
fill : bool
If True, fill in the space under the histogram.
Only relevant with univariate data.
shrink : number
Scale the width of each bar relative to the binwidth by this factor.
Only relevant with univariate data.
kde : bool
If True, compute a kernel density estimate to smooth the distribution
and show on the plot as (one or more) line(s).
Only relevant with univariate data.
kde_kws : dict
Parameters that control the KDE computation, as in :func:`kdeplot`.
line_kws : dict
Parameters that control the KDE visualization, passed to
:meth:`matplotlib.axes.Axes.plot`.
thresh : number or None
Cells with a statistic less than or equal to this value will be transparent.
Only relevant with bivariate data.
pthresh : number or None
Like ``thresh``, but a value in [0, 1] such that cells with aggregate counts
(or other statistics, when used) up to this proportion of the total will be
transparent.
pmax : number or None
A value in [0, 1] that sets that saturation point for the colormap at a value
such that cells below is constistute this proportion of the total count (or
other statistic, when used).
{params.dist.cbar}
{params.dist.cbar_ax}
{params.dist.cbar_kws}
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
{params.core.color}
{params.dist.log_scale}
{params.dist.legend}
{params.core.ax}
kwargs
Other keyword arguments are passed to one of the following matplotlib
functions:
- :meth:`matplotlib.axes.Axes.bar` (univariate, element="bars")
- :meth:`matplotlib.axes.Axes.fill_between` (univariate, other element, fill=True)
- :meth:`matplotlib.axes.Axes.plot` (univariate, other element, fill=False)
- :meth:`matplotlib.axes.Axes.pcolormesh` (bivariate)
Returns
-------
{returns.ax}
See Also
--------
{seealso.kdeplot}
{seealso.rugplot}
{seealso.ecdfplot}
{seealso.jointplot}
distplot
Notes
-----
The choice of bins for computing and plotting a histogram can exert
substantial influence on the insights that one is able to draw from the
visualization. If the bins are too large, they may erase important features.
On the other hand, bins that are too small may be dominated by random
variability, obscuring the shape of the true underlying distribution. The
default bin size is determined using a reference rule that depends on the
sample size and variance. This works well in many cases, (i.e., with
"well-behaved" data) but it fails in others. It is always a good to try
different bin sizes to be sure that you are not missing something important.
This function allows you to specify bins in several different ways, such as
by setting the total number of bins to use, the width of each bin, or the
specific locations where the bins should break.
Examples
--------
.. include:: ../docstrings/histplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def kdeplot(
x=None, # Allow positional x, because behavior will not change with reorg
*,
y=None,
shade=None, # Note "soft" deprecation, explained below
vertical=False, # Deprecated
kernel=None, # Deprecated
bw=None, # Deprecated
gridsize=200, # TODO maybe depend on uni/bivariate?
cut=3, clip=None, legend=True, cumulative=False,
shade_lowest=None, # Deprecated, controlled with levels now
cbar=False, cbar_ax=None, cbar_kws=None,
ax=None,
# New params
weights=None, # TODO note that weights is grouped with semantics
hue=None, palette=None, hue_order=None, hue_norm=None,
multiple="layer", common_norm=True, common_grid=False,
levels=10, thresh=.05,
bw_method="scott", bw_adjust=1, log_scale=None,
color=None, fill=None,
# Renamed params
data=None, data2=None,
**kwargs,
):
# Handle deprecation of `data2` as name for y variable
if data2 is not None:
y = data2
# If `data2` is present, we need to check for the `data` kwarg being
# used to pass a vector for `x`. We'll reassign the vectors and warn.
# We need this check because just passing a vector to `data` is now
# technically valid.
x_passed_as_data = (
x is None
and | |
logic, but we can deal with this later
if thisUser == 'None':
thisUser = GUESTID
user = int(thisUser)
lp = Listeningpost(name=unicode('New LP'),
userid = user,
created = datetime.now(),
modified = datetime.now()
)
lpid = lp.id
retStr="?extra_lpid=" + str(lpid) + "&userid=" + str(user) + "&status=owner"
raise redirect("/edit_lp/" + retStr)
#view the LPs, allowing for selection, editing, and deletion
@expose(template="buzzbot.templates.view_lps")
@expose()
def view_lps(self, **kwargs):
#figure out who this user is and where he belongs
thisUserIDObj = identity.current.identity()
msg=""
#tg's identity module relies on cookies. If the user has these blocked, the user is
# set to 'None', which confuses the routine. In this case, we'll provide a 'guest' id
# and a notification
if thisUserIDObj.user_id == None:
setUser(GUESTID)
thisUserIDObj = identity.current.identity()
msg = "Note: you are logged in as guest because cookies are blocked on your browser."
thisUser = thisUserIDObj.user_id
thisUserGroup = thisUserIDObj.groups
#the only group we really care about at this point is admin (to allow access to the
# interface)
isadmin = False
if 'admin' in thisUserGroup:
isadmin = True
#grab all the LPs - this is a bit inefficient, but we won't have millions for a while
lps = model.Listeningpost.select()
lpsThisUser=model.Listeningpost.selectBy(userid=thisUser)
lpsToDisplay = [] #an array of search objects to pass the the controller
editURL = [] # ...and a parallel array that flags that editing is allowed
deleteURL =[] # ...and one to signal that deletion is allowed
viewURL = [] # ...and one to view the results
ownerName =[]
allUsers = User.select()
#if we have a first-time user, create a new LP and start again
if lpsThisUser.count() <1 :
mylpid = self.create_lp()
#list the LPs in this order: user's, group's, admin's
for s in lps:
if s.userid == thisUser: #give the user full run of his own searches
lpsToDisplay.append(s)
editURL.append("/edit_lp/" + "?extra_lpid=" + str(s.id) + "&userid=" + str(thisUser) +"&status=owner" )
deleteURL.append("/verify_delete_lp/" + "?" + "extra_lpid=" + str(s.id) + "&userid=" + str(thisUser) +"&status=owner" )
#find the "search" associated with this LP (it's an amalgom of all related searches)
if s.searchtorun > 0 and model.Content.selectBy(searchid=s.searchtorun).count()>0:
viewURL.append("/view_content/" + "?" + "searchid=" + str(s.searchtorun) + "&userid=" + str(thisUser) +"&status=owner" )
else: viewURL.append("")
ownerName.append(getUserName(thisUser))
#if the LP begins to someone else and it's public then add it
for s in lps:
if s.userid <> thisUser and s.is_public:
ownerSearchObj = User.selectBy(id=s.userid)
thisOwner = ownerSearchObj[0]
thisOwnerName = thisOwner._get_display_name()
thisOwnerGroup = thisOwner.groups
if thisOwnerGroup == thisUserGroup:
lpsToDisplay.append(s)
editURL.append("/edit_lp/" + "?" + "extra_lpid=" + str(s.id) + "&userid=" + str(thisUser) +"&status=nonowner" )
deleteURL.append("")
#find the "search" associated with this LP (it's an amalgom of all related searches)
if s.searchtorun > 0 and model.Content.selectBy(searchid=s.searchtorun).count()>0:
viewURL.append("/view_content/" + "?" + "searchid=" + str(s.searchtorun) + "&userid=" + str(thisUser) +"&status=owner" )
else: viewURL.append("")
ownerName.append(getUserName(s.userid))
#now find client-worthy LPs (perhaps added by an admin)
for s in lps:
if s.is_client_worthy:
#screen out searches we've already added
addMe=True
for d in lpsToDisplay:
if d.id == s.id:
addMe=False
if addMe:
lpsToDisplay.append(s)
editURL.append("/edit_lp/" + "?"+ "extra_lpid=" + str(s.id) + "&userid=" + str(thisUser)+"&status=nonowner" )
deleteURL.append("")
#find the "search" associated with this LP (it's an amalgom of all related searches)
if s.searchtorun > 0 and model.Content.selectBy(searchid=s.searchtorun).count()>0:
viewURL.append("/view_content/" + "?" + "searchid=" + str(s.searchtorun) + "&userid=" + str(thisUser) +"&status=owner" )
else: viewURL.append("")
ownerName.append(getUserName(s.userid))
#this directs the returned form to the processSLPInput method
retStr="&userid=" + str(thisUser)
submit_action = "/process_view_lp_buttons/?"+retStr
return dict(form=edit_lp_form, lps=lpsToDisplay, editlink = editURL,
owner=ownerName, deletelink = deleteURL, viewlink=viewURL,
msg=msg, action = submit_action, isadmin = isadmin)
@expose(template = "buzzbot.templates.edit_lp")
def process_view_lp_buttons(self, **kwargs):
#if a new LP has been requested, call edit_lp to make one
if "new" in kwargs:
self.create_lp()
return
if "admin" in kwargs:
raise redirect("/admin/")
if "home" in kwargs:
raise redirect("/login/" )
@expose(template="buzzbot.templates.edit_lp")
@expose()
def edit_lp(self, tg_error=None, **kwargs):
args=[]
args=kwargs
thisID = identity.current.identity()
thisUser = thisID.user_id
thisUserGroup = thisID.groups
thisLP = -666
#the lpid field will come from the listeningPosts database
# or from the search controller (as extra_lpid); this to differentiate
# status when updating different db tables
if "extra_lpid" in kwargs:
thisLP = int(kwargs.get('extra_lpid'))
if "lpid" in kwargs:
thisLP = int(kwargs.get('lpid'))
#find this lp and determine if the user owns it
try:#this succeeds if the LP exists
lp = model.Listeningpost.get(thisLP)
if lp.userid == thisUser:
thisStatus = 'owner'
else: thisStatus = 'nonowner'
except:#...if it fails, we'll build a new one
lp = model.Listeningpost(userid = thisUser, name = "new LP", status = "owner")
#create a dict of user-specifiable parameters of the listening post
inputDict = {'name' : lp.name,
'description' : lp.description,
'is_client_worthy' : lp.is_client_worthy,
'is_public' : lp.is_public,
'update_nightly': lp.update_nightly,
'userid': lp.userid,
'submit_text': 'OK',
'extra_lpid': thisLP,
'targetword': lp.targetword
}
#get all the searches associated with this LP; we'll inject them into the template
# with as links to the edit search template; each link will carry url tags to identify
# the owner, group, and LPid, and to (dis)allow editing or deletion of others' searches;
# we could just set cookies, but this is a bit cleaner vis-a-vis security
searchesThisLP = model.LPSearch.selectBy(lp_id=thisLP)
searchesThisUser=model.Search.selectBy(userid=thisUser)
allSearches = model.Search.select()
searchesToDisplay = [] #an array of search objects to pass the the controller
editURL = [] # ...and a parallel array that flags that editing is allowed
deleteURL =[] # ...and one to signal that deletion is allowed
ownerName =[]
allUsers = User.select()
#List the searches in this order: this LP's, user's, group's, admin's
# Icons will get the tags to direct user to edit and delete methods; the delete tag
# is null if the user lacks permission to delete (not the owner, for now). We'll refine the
# permissions to define group, sub-group, etc. permissions later.
for s in searchesThisLP:
try:
#add the name of the search, links to edit, delete, and the owner's name to their own lists
thisSearch = model.Search.get(int(s.search_id))
searchesToDisplay.append(thisSearch)
editURL.append("/edit_search/" + "?id=" + str(s.search_id) + "&user=" + str(thisUser) +"&status=owner" + "&lpid=" + str(thisLP))
deleteURL.append("/verify_delete_search/" + "?" + "id=" + str(s.search_id) + "&user=" + str(thisUser) +"&status=owner"+ "&lpid=" + str(thisLP) )
ownerName.append(getUserName(thisUser))
except:
pass
#get this user's searches
for s in searchesThisUser:
if s.userid == thisUser and not s.islp: #give the user full run of his own searches
#screen out searches we've already added to our list
addMe=True
for d in searchesToDisplay:
if d.id == s.id:
addMe=False
if addMe:
searchesToDisplay.append(s)
editURL.append("/edit_search/" + "?id=" + str(s.id) + "&user=" + str(thisUser) +"&status=owner"+ "&lpid=" + str(thisLP) )
deleteURL.append("/verify_delete_search/" + "?" + "id=" + str(s.id) + "&user=" + str(thisUser) +"&status=owner" + "&lpid=" + str(thisLP))
ownerName.append(getUserName(thisUser))
#these searches belong to other users in the same group and are marked public
for s in allSearches:
if s.userid <> thisUser and s.is_public and not s.islp:
#find this search owner's group
thisOwner = User.get(s.userid)
thisSearchOwnersGroup = thisOwner.groups
if thisSearchOwnersGroup == thisUserGroup:
addMe=True
for d in searchesToDisplay:
if d.id == s.id:
addMe=False
if addMe:
searchesToDisplay.append(s)
editURL.append("/edit_search/" + "?" + "id=" + str(s.id) + "&user=" + str(thisUser) +"&status=nonowner"+ "&lpid=" + str(thisLP) )
deleteURL.append("")
ownerName.append(getUserName(s.userid))
#now find client-worthy searches (perhaps added by an admin)
for s in allSearches:
if s.is_client_worthy and not s.islp:
#screen out searches we've already added to our list
addMe=True
for d in searchesToDisplay:
if d.id == s.id:
addMe=False
if addMe:
searchesToDisplay.append(s)
editURL.append("/edit_search/" + "?" + "id=" + str(s.id) + "&user=" + str(thisUser) +"&status=nonowner"+ "&lpid=" + str(thisLP) )
deleteURL.append("")
ownerName.append(getUserName(s.userid))
#make a list of check-box widgets for (de)selecting searches
widgetList=[]
#we'll keep count and check the ones currently associated with this LP
count = 1
currentSearches=searchesThisLP.count()
for s in searchesToDisplay:
#we'll | |
= len(self.samplesList[0])
if (isPreferedDataUsed == True):
mean = preferedMean
standardDeviation = preferedStandardDeviation
else:
mean = []
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(0)
mean.append(temporalRow)
for row in range(0, numberOfSamples):
for column in range(0, numberOfColumns):
mean[0][column] = mean[0][column] + self.samplesList[row][column]
for column in range(0, numberOfColumns):
mean[0][column] = mean[0][column]/numberOfSamples
standardDeviation = []
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(0)
standardDeviation.append(temporalRow)
for row in range(0, numberOfSamples):
for column in range(0, numberOfColumns):
standardDeviation[0][column] = standardDeviation[0][column] + (self.samplesList[row][column]-mean[0][column])**2
for column in range(0, numberOfColumns):
standardDeviation[0][column] = (standardDeviation[0][column]/(numberOfSamples-1))**(0.5)
# Now that we have obtained the data we need for the Normalization
# equation, we now plug in those values in it.
normalizedDataPoints = []
for row in range(0, numberOfSamples):
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append((self.samplesList[row][column] - mean[0][column])/standardDeviation[0][column])
normalizedDataPoints.append(temporalRow)
# We save the current the modeling results
normalizedResults = []
normalizedResults.append(mean)
normalizedResults.append(standardDeviation)
normalizedResults.append(normalizedDataPoints)
return normalizedResults
"""
getReverseStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value")
This method returns a dataset but with its original datapoint values before
having applied the Standarization Feature Scaling method.
CODE EXAMPLE1:
matrix_x = [
[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
deNormalizedResults = featureScaling.getReverseStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation)
preferedMean = deNormalizedResults[0]
preferedStandardDeviation = deNormalizedResults[1]
deNormalizedDataPoints = deNormalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
deNormalizedDataPoints =
[[75.0, 15.0],
[100.0, 15.0],
[125.0, 15.0],
[75.0, 17.5],
[100.0, 17.5],
[125.0, 17.5],
[75.0, 20.0],
[100.0, 20.0],
[125.0, 20.0],
[75.0, 22.5],
[100.0, 22.5],
[125.0, 22.5],
[75.0, 25.0],
[100.0, 25.0],
[125.0, 25.0],
[75.0, 27.5],
[100.0, 27.5],
[125.0, 27.5]]
"""
def getReverseStandarization(self, preferedMean, preferedStandardDeviation):
numberOfSamples = len(self.samplesList)
numberOfColumns = len(self.samplesList[0])
deNormalizedDataPoints = []
for row in range(0, numberOfSamples):
temporalRow = []
for column in range(0, numberOfColumns):
temporalRow.append(self.samplesList[row][column]*preferedStandardDeviation[0][column] + preferedMean[0][column])
deNormalizedDataPoints.append(temporalRow)
# We save the current the modeling results
deNormalizedResults = []
deNormalizedResults.append(preferedMean)
deNormalizedResults.append(preferedStandardDeviation)
deNormalizedResults.append(deNormalizedDataPoints)
return deNormalizedResults
"""
setSamplesList(newSamplesList="the new samples list that you wish to work with")
This method sets a new value in the objects local variable "samplesList".
"""
def setSamplesList(self, newSamplesList):
self.samplesList = newSamplesList
"""
The Regression library gives several different types of coeficients to model
a required data. But notice that the arguments of this class are expected to be
the mean values of both the "x" and the "y" values.
Regression("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class Regression:
def __init__(self, x_samplesList, y_samplesList):
self.y_samplesList = y_samplesList
self.x_samplesList = x_samplesList
def set_xSamplesList(self, x_samplesList):
self.x_samplesList = x_samplesList
def set_ySamplesList(self, y_samplesList):
self.y_samplesList = y_samplesList
"""
# ----------------------------------- #
# ----------------------------------- #
# ----- STILL UNDER DEVELOPMENT ----- #
# ----------------------------------- #
# ----------------------------------- #
getGaussianRegression()
Returns the best fitting model to predict the behavior of a dataset through
a Gaussian Regression model that may have any number of independent
variables (x).
Note that if no fitting model is found, then this method will swap the
dependent variables values in such a way that "0"s will be interpretated as
"1"s and vice-versa to then try again to find at least 1 fitting model to
your dataset. If this still doenst work, then this method will return
modeling results will all coefficients with values equal to zero, predicted
accuracy equal to zero and all predicted values will also equal zero.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[39.139277579342206],
[-13.813509557297337],
[2.302251592882884],
[-13.813509557296968],
[2.302251592882836]]
accuracyFromTraining =
99.94999999999685
predictedData =
[[0.9989999999998915],
[0.9990000000000229],
[0.9989999999999554],
[0.9989999999999234],
[0.0009999999999997621],
[0.0010000000000001175],
[0.00099999999999989],
[0.000999999999999915]]
# NOTE:"predictedData" will try to give "1" for positive values and "0"
# for negative values always, regardless if your negative values
# were originally given to the trained model as "-1"s.
coefficientDistribution =
'Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getGaussianRegression(self):
from . import MortrackML_Library as mSL
import math
numberOfRows = len(self.y_samplesList)
# We re-adapt the current dependent samples (y) so that we can later
# use them to make the Gaussian function model withouth obtaining
# indeterminate values.
modifiedSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=-1) and (self.y_samplesList[row][0]!=0) and (self.y_samplesList[row][0]!=0.001) and (self.y_samplesList[row][0]!=0.999)):
raise Exception('ERROR: One of the dependent (y) data points doesnt have the right format values (eg. 1 or a -1; 1 or a 0; 0.999 or a 0.001).')
if ((self.y_samplesList[row][0]==1) or (self.y_samplesList[row][0]==0.999)):
temporalRow.append(0.999)
if ((self.y_samplesList[row][0]==-1) or (self.y_samplesList[row][0]==0) or self.y_samplesList[row][0]==0.001):
temporalRow.append(0.001)
modifiedSamplesList_y.append(temporalRow)
# We modify our current dependent samples (y) to get the dependent
# samples (y) that we will input to make the Gaussian function model
modifiedGaussianSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
#temporalRow.append( -math.log(modifiedSamplesList_y[row][0])*2 )
temporalRow.append( -math.log(modifiedSamplesList_y[row][0]) )
modifiedGaussianSamplesList_y.append(temporalRow)
# We obtain the independent coefficients of the best fitting model
# obtained through the Gaussian function (kernel) that we will use to distort
# the current dimentional spaces that we were originally given by the
# user
regression = mSL.Regression(self.x_samplesList, modifiedGaussianSamplesList_y)
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=2, evtfbmip=False)
allModeledAccuracies = modelingResults[4]
# Re-evaluate every obtained model trained through the Multiple
# Polynomial Regression but this time determining the best fitting
# model by recalculating each of their accuracies but this time with
# the right math equation, which would be the gaussian function.
bestModelingResults = []
for currentModelingResults in range(0, len(allModeledAccuracies)):
currentCoefficients = allModeledAccuracies[currentModelingResults][1]
isComplyingWithGaussCoefficientsSigns = True
for currentCoefficient in range(0, len(currentCoefficients)):
if ((currentCoefficients==0) and (currentCoefficients[currentCoefficient][0]<0)):
isComplyingWithGaussCoefficientsSigns = False
else:
#if (((currentCoefficient%2)!=0) and (currentCoefficients[currentCoefficient][0]>0)):
# isComplyingWithGaussCoefficientsSigns = False
if (((currentCoefficient%2)==0) and (currentCoefficients[currentCoefficient][0]<0)):
isComplyingWithGaussCoefficientsSigns = False
if (isComplyingWithGaussCoefficientsSigns == True):
# We determine the accuracy of the obtained coefficients
predictedData = []
orderOfThePolynomial = 2
numberOfIndependentVariables = (len(currentCoefficients)-1)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = currentCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfIndependentVariables):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-(actualIc)))
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = modifiedSamplesList_y[row][0]
n1 = predictedData[row][0]
if ((n1<0.2) and (n2<0.051)):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
if (len(bestModelingResults) == 0):
# We save the first best fitting modeling result
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
else:
if (predictionAcurracy > bestModelingResults[1]):
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 | |
""" SNMPv2_MIB
The MIB module for SNMP entities.
Copyright (C) The Internet Society (2002). This
version of this MIB module is part of RFC 3418;
see the RFC itself for full legal notices.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SNMPv2MIB(Entity):
"""
.. attribute:: system
**type**\: :py:class:`System <ydk.models.cisco_ios_xe.SNMPv2_MIB.SNMPv2MIB.System>`
.. attribute:: snmp
**type**\: :py:class:`Snmp <ydk.models.cisco_ios_xe.SNMPv2_MIB.SNMPv2MIB.Snmp>`
.. attribute:: snmpset
**type**\: :py:class:`SnmpSet <ydk.models.cisco_ios_xe.SNMPv2_MIB.SNMPv2MIB.SnmpSet>`
.. attribute:: sysortable
The (conceptual) table listing the capabilities of the local SNMP application acting as a command responder with respect to various MIB modules. SNMP entities having dynamically\-configurable support of MIB modules will have a dynamically\-varying number of conceptual rows
**type**\: :py:class:`SysORTable <ydk.models.cisco_ios_xe.SNMPv2_MIB.SNMPv2MIB.SysORTable>`
"""
_prefix = 'SNMPv2-MIB'
_revision = '2002-10-16'
def __init__(self):
super(SNMPv2MIB, self).__init__()
self._top_entity = None
self.yang_name = "SNMPv2-MIB"
self.yang_parent_name = "SNMPv2-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("system", ("system", SNMPv2MIB.System)), ("snmp", ("snmp", SNMPv2MIB.Snmp)), ("snmpSet", ("snmpset", SNMPv2MIB.SnmpSet)), ("sysORTable", ("sysortable", SNMPv2MIB.SysORTable))])
self._leafs = OrderedDict()
self.system = SNMPv2MIB.System()
self.system.parent = self
self._children_name_map["system"] = "system"
self.snmp = SNMPv2MIB.Snmp()
self.snmp.parent = self
self._children_name_map["snmp"] = "snmp"
self.snmpset = SNMPv2MIB.SnmpSet()
self.snmpset.parent = self
self._children_name_map["snmpset"] = "snmpSet"
self.sysortable = SNMPv2MIB.SysORTable()
self.sysortable.parent = self
self._children_name_map["sysortable"] = "sysORTable"
self._segment_path = lambda: "SNMPv2-MIB:SNMPv2-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SNMPv2MIB, [], name, value)
class System(Entity):
"""
.. attribute:: sysdescr
A textual description of the entity. This value should include the full name and version identification of the system's hardware type, software operating\-system, and networking software
**type**\: str
**length:** 0..255
.. attribute:: sysobjectid
The vendor's authoritative identification of the network management subsystem contained in the entity. This value is allocated within the SMI enterprises subtree (1.3.6.1.4.1) and provides an easy and unambiguous means for determining `what kind of box' is being managed. For example, if vendor `Flintstones, Inc.' was assigned the subtree 1.3.6.1.4.1.424242, it could assign the identifier 1.3.6.1.4.1.424242.1.1 to its `Fred Router'
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
.. attribute:: sysuptime
The time (in hundredths of a second) since the network management portion of the system was last re\-initialized
**type**\: int
**range:** 0..4294967295
.. attribute:: syscontact
The textual identification of the contact person for this managed node, together with information on how to contact this person. If no contact information is known, the value is the zero\-length string
**type**\: str
**length:** 0..255
.. attribute:: sysname
An administratively\-assigned name for this managed node. By convention, this is the node's fully\-qualified domain name. If the name is unknown, the value is the zero\-length string
**type**\: str
**length:** 0..255
.. attribute:: syslocation
The physical location of this node (e.g., 'telephone closet, 3rd floor'). If the location is unknown, the value is the zero\-length string
**type**\: str
**length:** 0..255
.. attribute:: sysservices
A value which indicates the set of services that this entity may potentially offer. The value is a sum. This sum initially takes the value zero. Then, for each layer, L, in the range 1 through 7, that this node performs transactions for, 2 raised to (L \- 1) is added to the sum. For example, a node which performs only routing functions would have a value of 4 (2^(3\-1)). In contrast, a node which is a host offering application services would have a value of 72 (2^(4\-1) + 2^(7\-1)). Note that in the context of the Internet suite of protocols, values should be calculated accordingly\: layer functionality 1 physical (e.g., repeaters) 2 datalink/subnetwork (e.g., bridges) 3 internet (e.g., supports the IP) 4 end\-to\-end (e.g., supports the TCP) 7 applications (e.g., supports the SMTP) For systems including OSI protocols, layers 5 and 6 may also be counted
**type**\: int
**range:** 0..127
.. attribute:: sysorlastchange
The value of sysUpTime at the time of the most recent change in state or value of any instance of sysORID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'SNMPv2-MIB'
_revision = '2002-10-16'
def __init__(self):
super(SNMPv2MIB.System, self).__init__()
self.yang_name = "system"
self.yang_parent_name = "SNMPv2-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('sysdescr', (YLeaf(YType.str, 'sysDescr'), ['str'])),
('sysobjectid', (YLeaf(YType.str, 'sysObjectID'), ['str'])),
('sysuptime', (YLeaf(YType.uint32, 'sysUpTime'), ['int'])),
('syscontact', (YLeaf(YType.str, 'sysContact'), ['str'])),
('sysname', (YLeaf(YType.str, 'sysName'), ['str'])),
('syslocation', (YLeaf(YType.str, 'sysLocation'), ['str'])),
('sysservices', (YLeaf(YType.int32, 'sysServices'), ['int'])),
('sysorlastchange', (YLeaf(YType.uint32, 'sysORLastChange'), ['int'])),
])
self.sysdescr = None
self.sysobjectid = None
self.sysuptime = None
self.syscontact = None
self.sysname = None
self.syslocation = None
self.sysservices = None
self.sysorlastchange = None
self._segment_path = lambda: "system"
self._absolute_path = lambda: "SNMPv2-MIB:SNMPv2-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(SNMPv2MIB.System, [u'sysdescr', u'sysobjectid', u'sysuptime', u'syscontact', u'sysname', u'syslocation', u'sysservices', u'sysorlastchange'], name, value)
class Snmp(Entity):
"""
.. attribute:: snmpinpkts
The total number of messages delivered to the SNMP entity from the transport service
**type**\: int
**range:** 0..4294967295
.. attribute:: snmpoutpkts
The total number of SNMP Messages which were passed from the SNMP protocol entity to the transport service
**type**\: int
**range:** 0..4294967295
**status**\: obsolete
.. attribute:: snmpinbadversions
The total number of SNMP messages which were delivered to the SNMP entity and were for an unsupported SNMP version
**type**\: int
**range:** 0..4294967295
.. attribute:: snmpinbadcommunitynames
The total number of community\-based SNMP messages (for example, SNMPv1) delivered to the SNMP entity which used an SNMP community name not known to said entity. Also, implementations which authenticate community\-based SNMP messages using check(s) in addition to matching the community name (for example, by also checking whether the message originated from a transport address allowed to use a specified community name) MAY include in this value the number of messages which failed the additional check(s). It is strongly RECOMMENDED that the documentation for any security model which is used to authenticate community\-based SNMP messages specify the precise conditions that contribute to this value
**type**\: int
**range:** 0..4294967295
.. attribute:: snmpinbadcommunityuses
The total number of community\-based SNMP messages (for example, SNMPv1) delivered to the SNMP entity which represented an SNMP operation that was not allowed for the SNMP community named in the message. The precise conditions under which this counter is incremented (if at all) depend on how the SNMP entity implements its access control mechanism and how its applications interact with that access control mechanism. It is strongly RECOMMENDED that the documentation for any access control mechanism which is used to control access to and visibility of MIB instrumentation specify the precise conditions that contribute to this value
**type**\: int
**range:** 0..4294967295
.. attribute:: snmpinasnparseerrs
The total number of ASN.1 or BER errors encountered by the SNMP entity when decoding received SNMP messages
**type**\: int
**range:** 0..4294967295
.. attribute:: snmpintoobigs
The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error\-status field was `tooBig'
**type**\: int
**range:** 0..4294967295
**status**\: obsolete
.. attribute:: snmpinnosuchnames
The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error\-status field was `noSuchName'
**type**\: int
**range:** 0..4294967295
**status**\: obsolete
.. attribute:: snmpinbadvalues
The total number of SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error\-status field was `badValue'
**type**\: int
**range:** 0..4294967295
**status**\: obsolete
.. attribute:: snmpinreadonlys
The total number valid SNMP PDUs which were delivered to the SNMP protocol entity and for which the value of the error\-status field was `readOnly'. It should be noted that it is | |
# $Id$
# This class is for processing query to map into concept
# Task:
# 1) Normalize text
# 2) Map into UMLS concept
import re
from pyparsing import *
class QueryNorm:
''' This class is for normalizing text
'''
def __init__(self, text):
self.data = text
def __repr__(self):
return "%s" % (self.data)
def norm(self):
#Rule 0: If a word has an uppercase letter occurring in it after a lowercase letter, split on that letter (e.g. "firstSecond" -> "first Second")
wordL = self.data.split()
Strx = ''
for word in wordL:
count = 0
char1 = ''
for c in word[1:]:
if c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
count = count + 1
char1 = c
if count==1 and not word[0] in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
wordx=word.split(char1)
wordi1=word.index(char1)
word1 = word[0:wordi1]
word2 = word[wordi1:len(word)]
Strx = Strx + word1 + ' ' + word2
else:
Strx = Strx + ' ' + word
line1 = Strx.lower()
# Rule 1: If string match 'mom'" then convert into 'mother'
if line1.find(' mom') >=0:
line1 = line1.replace('mom','mother')
# Rule 2: If string match 'dad'" then convert into 'father'
elif line1.find(' dad') >=0:
line1 = line1.replace(' dad',' father')
# Rule 3: If there are more than two letters before the hyphen, then split on the hyphen. Otherwise, remove the hyphen (e.g. "X-Ray" -> XRay, "By-pass" -> bypass, "heart-attack" -> heart attack.)
Str = ''
wordL = line1.split()
for word in wordL:
#Str = Str + ' ' + word
Hyphen = word.split('-')
#print Hyphen
if len(Hyphen)==0:
Str = Str + ' ' + word
else:
if len(Hyphen[0])<=2:
Str = Str + ' ' + ''.join(Hyphen[0:])
else:
Str = Str + ' ' + ' '.join(Hyphen[0:])
# Rule 4: If a Word has a numeral in it, split at the last digit (e.g. Q13AGE -> Q13 AGE)
line1 = Str
wordL = line1.split()
Str1 = ''
for word in wordL:
if re.match(r'\w+\d+\w+',word):
#print "Matched"
idx =0
#print word
for i in range(len(word)-1,0,-1):
if str(word[i]) in "0123456789":
idx = i
break
word1 = word[0:idx+1]
word2 = word[idx+1:len(word)]
Str1 = Str1 + ' ' + word1 + ' ' + word2
else:
Str1 = Str1 + ' ' + word
# Rule 5: If string match 'sex' then convert into 'gender'
if Str1.find('sex') >=0:
Str1 = Str1.replace('sex','gender')
# Rule 6: If string match 'male' and 'female' then convert into 'gender'
if Str1.find('sex [male or female]')>=0 :
Str1 = Str1.replace('sex [male or female]','gender')
# # Rule 7: If string match 'male' and 'gender' then convert into 'male gender'
# if Str1.find(' male ')>=0 and Str1.find('gender')>=0 and Str1.find(' female ')==-1:
# Str1 = Str1.replace('male','')
# Str1 = Str1.replace('female','')
# Rule 8: If string match 'age parent' then convert into 'age of parent'
if Str1.find('age parent') >=0:
Str1 = Str1.replace('age parent','age of parent')
# Rule 9: If string match 'baby's sex' then convert into 'sex of baby'
if Str1.find('baby\'s sex') >=0:
Str1 = Str1.replace('baby\'s sex','sex of baby')
# Rule 10: If string match 'mother's age' then convert into 'age of mother'
if Str1.find('mother\'s age') >=0 :
Str1 = Str1.replace('mother\'s age','age of mother')
# Rule 10: If string match 'father's age' then convert into 'age of father'
if Str1.find('father\'s age') >=0 :
Str1 = Str1.replace('father\'s age','age of father')
# Rule 11: If string match 'onset' then convert into 'started'
if Str1.find('onset') >=0 :
Str1 = Str1.replace('onset','started')
# Remove '.','?'
Str1 = Str1.replace('.',' ')
Str1 = Str1.replace(';',' ')
Str1 = Str1.replace('?',' ')
Str1 = Str1.replace('!',' ')
Str1 = Str1.replace('(',' ')
Str1 = Str1.replace(')',' ')
Str1 = Str1.replace(':',' ')
Str1 = Str1.replace('#',' number ')
Str1 = Str1.replace('/',' ')
Str1 = Str1.replace('\'',' ')
Str1 = Str1.replace('[',' ')
Str1 = Str1.replace(']',' ')
#Str1 = Str1.replace(',',' ')
# Remove some words for MetaMap confusion
Str1 = Str1.replace('utterance',' ')
Str1 = Str1.replace('phrase',' ')
Str1 = Str1.replace('candidates',' ')
Str1 = Str1.replace('mappings',' ')
Str1 = Str1.replace('\"','')
return Str1.strip()
class QueryMap:
''' This class is to map free text into MetaMap
'''
def __init__(self,text):
self.data = text
def wrapper(self):
''' Wrapper for MetaMap, called MetaMap from shell
Input is self.data
'''
from subprocess import Popen, PIPE
#p1 = Popen(["echo", text], stdout=PIPE)
# self.data should be converted into string
p1 = Popen(["echo", str(self.data)], stdout=PIPE)
p2 = Popen(["/data/resources/MetaMap/public_mm/bin/metamap11v2", "-q", "--silent"], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close()
result = p2.communicate()[0]
p1.wait()
return result
def getMapping(self, Text):
''' Get mapped terms from MetaMap.
'''
#Text = str(self.data)
QueryStr = []
## For debugging
#print Text
TermList = {}
Sents = Text.split('phrase')
UttText = Sents[0].split('",')[0].split('\',')[1].strip('"').lower()
# Only get mapping string !
for Sent1 in Sents[1:]:
Phrase = Sent1.split('candidates')
PhraseText = Phrase[0].split(',')[0].split('(')[1].lower().strip('\'')
## For debugging
#print "Start to print ========"
#print Phrase
#print "PHRASE: " + PhraseText
for Sent2 in Phrase[1:]:
Candidate = Sent2.split('mappings')
CandidateString = Candidate[1].split('\n')[0]
# Access Mapping
MappedString = Candidate[1].split('\n')[1]
CandidateList = CandidateString.split('ev(')
if len(CandidateList)>=2:
Candidate_temp = []
for item in CandidateList[1:]:
CandidateCUI = item.split(',')[1]
CandidateMatched = item.split(',')[2].lower().strip('\'')
if item.find('\',[') >=0:
CandidatePreferred = item.split('\',[')[0].split('\'')[-1].lower().strip('\'')
else:
CandidatePreferred = item.split(',')[3].lower()
SemType = item.split('[')[2].strip(',').strip(']')
## For debugging
#print item
#print "MATCHED : " + CandidateMatched
#print "PREFERRED : " + CandidatePreferred
# =======================================
# REMOVE LOINC code and SNOMED CT, added on Apr 26, 2013
# =======================================
if CandidatePreferred.find('^')==-1 and CandidatePreferred.find('-')==-1:
Candidate_temp.append((CandidateMatched,CandidatePreferred))
QueryStr.append((PhraseText,Candidate_temp))
else:
## For debugging
#print "PREFERRED : "
QueryStr.append((PhraseText,'',''))
OrigQuery = ''
for item in QueryStr:
OrigQuery += ' ' + item[0]
## For debugging
#print "==================================="
#print "Orig Query: "
#print OrigQuery.strip()
#print "Extended Query: "
# --------------------------------
# Adding original query into query
ExtendedQuery = ' '
#ExtendedQuery = self.data + ' OR '
for item in QueryStr:
# MOST IMPORTANT FOR DEBUGGING
#print "======"
#print item
if len(item[0].strip())>0:
temp1 = '("' + item[0].strip('"') + '"'
else:
#print item[1][0][1]
#temp1 = ''
# Note: Error of MetaMap when parsing phrase such as "(copd and child)"
if len(item[1])>=0 and len(item[1][0])>0:
temp1 ='("' + item[1][0][1] + '"'
else:
temp1=''
#print temp1
# If there is mapping terms
if len(item[1])>0:
for item1 in item[1]:
# if preferred terms is not matched phrase
#if len(temp1)>0 and item1[1].strip('"') !=item[0].strip('"'):
if len(temp1)>0 and item1[1].strip('"') !=item[0].strip('"') and item1[1]!=item1[0]:
temp1=temp1 + ' OR "' + item1[1].strip('"') + '"'
#if len(temp1)>0 and item1[1].strip('"') !=item[0].strip('"') and item1[1].strip('"') !=item[1][0][1].strip('"'):
# temp1=temp1 + ' OR "' + item1[1].strip('"') + '"'
#if len(temp1)>0 and item1[1].strip('"') !=item[0].strip('"') and item1[1].strip('"') ==item[1][0][1].strip('"'):
# temp1=temp1 + ' OR ("' + item1[1].strip('"') + '"'
ExtendedQuery+= ' ' + temp1
ExtendedQuery +=')'
# Add AND or OR or NOT operators
else:
# If parse into individual AND,OR,NOT
if item[0].find('and')==0 or item[0].find('or')==0 or item[0].find('not')==0:
ExtendedQuery+=' ' + item[0].upper()
# If not
#else:
#ExtendedQuery+=' "' + item[0].upper() + '"'
#ExtendedQuery+= item[0].upper()
#print ExtendedQuery.strip()
return ExtendedQuery.strip()
def getMappingNew(self,Text):
''' Get mapped terms from MetaMap.
'''
#Text = str(self.data)
QueryStr = []
## For debugging
#print Text
TermList = {}
Sents = Text.split('phrase')
UttText = Sents[0].split('",')[0].split('\',')[1].strip('"').lower()
# Only get mapping string !
for Sent1 in Sents[1:]:
Phrase = Sent1.split('candidates')
PhraseText = Phrase[0].split(',')[0].split('(')[1].lower().strip('\'')
## For debugging
#print "Start to print ========"
#print Phrase
#print "PHRASE: " + PhraseText
for Sent2 in Phrase[1:]:
Candidate = Sent2.split('mappings')
CandidateString = Candidate[1].split('\n')[0]
# Access Mapping
MappedString = Candidate[1].split('\n')[1]
CandidateList = CandidateString.split('ev(')
if len(CandidateList)>=2:
Candidate_temp = []
for item in CandidateList[1:]:
CandidateCUI = item.split(',')[1]
CandidateMatched = item.split(',')[2].lower().strip('\'')
if item.find('\',[') >=0:
CandidatePreferred = item.split('\',[')[0].split('\'')[-1].lower().strip('\'')
else:
CandidatePreferred = item.split(',')[3].lower()
SemType = item.split('[')[2].strip(',').strip(']')
## For debugging
#print | |
continue
OooOo [ o0o0O00 ] = IIiO0Ooo . interface
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if ( OooOo == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 100 - 100: II111iiii . IiII . I11i
return
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
for o0o0O00 in OooOo :
I111IIiIII = OooOo [ o0o0O00 ]
oOO0oo = red ( o0o0O00 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( oOO0oo ,
I111IIiIII ) )
O0OoO0o = I111IIiIII if len ( OooOo ) > 1 else None
for dest in Ii1iII11 :
lisp_send_info_request ( lisp_sockets , dest , port , O0OoO0o )
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
if ( OOoo0000 != [ ] ) :
for IIiIII1IIi in lisp_map_resolvers_list . values ( ) :
IIiIII1IIi . resolve_dns_name ( )
if 78 - 78: oO0o
if 33 - 33: oO0o + i1IIi
return
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
if 94 - 94: O0 + OoO0O00 / I1IiiI * II111iiii * i11iIiiIii
if ( value . find ( "." ) != - 1 ) :
o0o0O00 = value . split ( "." )
if ( len ( o0o0O00 ) != 4 ) : return ( False )
if 55 - 55: OoooooooOO * O0 + i1IIi % I1IiiI
for Iii1i1Ii in o0o0O00 :
if ( Iii1i1Ii . isdigit ( ) == False ) : return ( False )
if ( int ( Iii1i1Ii ) > 255 ) : return ( False )
if 79 - 79: I1IiiI * O0 . Ii1I
return ( True )
if 24 - 24: ooOoO0o * OoOoOO00 * iIii1I11I1II1 * iII111i + I1IiiI - II111iiii
if 31 - 31: oO0o / I1ii11iIi11i
if 96 - 96: i1IIi + i1IIi * I1Ii111 . II111iiii % OoooooooOO
if 58 - 58: IiII
if 64 - 64: iIii1I11I1II1 / OoOoOO00
if ( value . find ( "-" ) != - 1 ) :
o0o0O00 = value . split ( "-" )
for Ii11 in [ "N" , "S" , "W" , "E" ] :
if ( Ii11 in o0o0O00 ) :
if ( len ( o0o0O00 ) < 8 ) : return ( False )
return ( True )
if 14 - 14: Ii1I / OoooooooOO . i1IIi % IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if ( value . find ( "-" ) != - 1 ) :
o0o0O00 = value . split ( "-" )
if ( len ( o0o0O00 ) != 3 ) : return ( False )
if 71 - 71: OoO0O00 - I11i
for o00O in o0o0O00 :
try : int ( o00O , 16 )
except : return ( False )
if 44 - 44: O0 - IiII . OoOoOO00 . I11i / Ii1I % oO0o
return ( True )
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if 4 - 4: I1IiiI
if ( value . find ( ":" ) != - 1 ) :
o0o0O00 = value . split ( ":" )
if ( len ( o0o0O00 ) < 2 ) : return ( False )
if 36 - 36: Ii1I
oooO0OO = False
I1I11Iiii111 = 0
for o00O in o0o0O00 :
I1I11Iiii111 += 1
if ( o00O == "" ) :
if ( oooO0OO ) :
if ( len ( o0o0O00 ) == I1I11Iiii111 ) : break
if ( I1I11Iiii111 > 2 ) : return ( False )
if 27 - 27: i11iIiiIii * iII111i
oooO0OO = True
continue
if 48 - 48: Oo0Ooo . i1IIi
try : int ( o00O , 16 )
except : return ( False )
if 49 - 49: OOooOOo / OoO0O00 % I1Ii111
return ( True )
if 80 - 80: iII111i
if 17 - 17: oO0o % o0oOOo0O0Ooo . o0oOOo0O0Ooo + ooOoO0o + I1Ii111 - OoO0O00
if 37 - 37: i1IIi * OOooOOo / OoooooooOO + II111iiii
if 73 - 73: I1Ii111 - II111iiii / Ii1I + Ii1I
if 41 - 41: II111iiii / II111iiii / iII111i * I1IiiI * I1Ii111 * oO0o
if ( value [ 0 ] == "+" ) :
o0o0O00 = value [ 1 : : ]
for II1I1i1II in o0o0O00 :
if ( II1I1i1II . isdigit ( ) == False ) : return ( False )
if 3 - 3: OOooOOo / OoOoOO00 % iIii1I11I1II1
return ( True )
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
return ( False )
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - | |
83
RULE_compound_identifier = 84
RULE_literal = 85
RULE_err = 86
ruleNames = [
"parse",
"query",
"select_query",
"select_query_main",
"select_with_step",
"select_select_step",
"select_from_step",
"select_array_join_step",
"select_sample_step",
"sample_ratio",
"select_join_step",
"select_join_right_part",
"select_prewhere_step",
"select_where_step",
"select_groupby_step",
"select_having_step",
"select_orderby_step",
"select_limit_step",
"select_limitby_step",
"settings_step",
"select_format_step",
"insert_query",
"create_query",
"rename_query",
"drop_query",
"alter_query",
"alter_query_element",
"clickhouse_type",
"simple_type",
"enum_entry",
"use_query",
"set_query",
"assignment_list",
"assignment",
"kill_query_query",
"optimize_query",
"table_properties_query",
"show_tables_query",
"show_processlist_query",
"check_query",
"full_table_name",
"partition_name",
"cluster_name",
"database_name",
"table_name",
"format_name",
"query_outfile_step",
"engine",
"identifier_with_optional_parameters",
"identifier_with_parameters",
"order_by_expression_list",
"order_by_element",
"table_ttl_list",
"table_ttl_declaration",
"nested_table",
"name_type_pair_list",
"name_type_pair",
"compound_name_type_pair",
"column_declaration_list",
"column_declaration",
"column_name",
"column_type",
"column_name_list",
"select_expr_list",
"select_expr",
"select_alias",
"alias",
"alias_name",
"table_function",
"subquery",
"expression_with_optional_alias",
"expr",
"interval_unit",
"expression_list",
"not_empty_expression_list",
"array",
"function",
"function_parameters",
"function_arguments",
"function_name",
"functionname",
"variable",
"identifier",
"keyword",
"compound_identifier",
"literal",
"err",
]
EOF = Token.EOF
LINE_COMMENT = 1
K_ADD = 2
K_AFTER = 3
K_ALL = 4
K_ALIAS = 5
K_ALTER = 6
K_AND = 7
K_ANY = 8
K_ARRAY = 9
K_AS = 10
K_ASCENDING = 11
K_ASC = 12
K_ASYNC = 13
K_ATTACH = 14
K_BETWEEN = 15
K_BY = 16
K_CASE = 17
K_CAST = 18
K_CHECK = 19
K_CLUSTER = 20
K_COLUMN = 21
K_COLLATE = 22
K_CODEC = 23
K_CREATE = 24
K_CROSS = 25
K_DAY = 26
K_DELETE = 27
K_DESCRIBE = 28
K_DESCENDING = 29
K_DESC = 30
K_DATABASE = 31
K_DATABASES = 32
K_DEFAULT = 33
K_DETACH = 34
K_DISK = 35
K_DISTINCT = 36
K_DROP = 37
K_ELSE = 38
K_END = 39
K_ENGINE = 40
K_EXISTS = 41
K_FETCH = 42
K_FINAL = 43
K_FIRST = 44
K_FROM = 45
K_FREEZE = 46
K_FORMAT = 47
K_FULL = 48
K_GLOBAL = 49
K_GROUP = 50
K_HAVING = 51
K_HOUR = 52
K_ID = 53
K_IF = 54
K_INNER = 55
K_INSERT = 56
K_INTERVAL = 57
K_INTO = 58
K_IN = 59
K_IS = 60
K_JOIN = 61
K_KILL = 62
K_LAST = 63
K_LEFT = 64
K_LIKE = 65
K_LIMIT = 66
K_MAIN = 67
K_MATERIALIZED = 68
K_MINUTE = 69
K_MODIFY = 70
K_MONTH = 71
K_NOT = 72
K_NULL = 73
K_NULLS = 74
K_OFFSET = 75
K_ON = 76
K_OPTIMIZE = 77
K_ORDER = 78
K_OR = 79
K_OUTFILE = 80
K_PARTITION = 81
K_POPULATE = 82
K_PREWHERE = 83
K_PROCESSLIST = 84
K_QUERY = 85
K_RENAME = 86
K_RETURN = 87
K_RIGHT = 88
K_SAMPLE = 89
K_SECOND = 90
K_SELECT = 91
K_SET = 92
K_SETTINGS = 93
K_SHOW = 94
K_SYNC = 95
K_TABLE = 96
K_TABLES = 97
K_TEMPORARY = 98
K_TEST = 99
K_THEN = 100
K_TOTALS = 101
K_TO = 102
K_TTL = 103
K_OUTER = 104
K_VALUES = 105
K_VOLUME = 106
K_VIEW = 107
K_UNION = 108
K_USE = 109
K_USING = 110
K_WEEK = 111
K_WHEN = 112
K_WHERE = 113
K_WITH = 114
K_YEAR = 115
COLON = 116
COMMA = 117
SEMI = 118
LPAREN = 119
RPAREN = 120
RARROW = 121
LT = 122
GT = 123
QUESTION = 124
STAR = 125
PLUS = 126
CONCAT = 127
OR = 128
DOLLAR = 129
DOT = 130
PERCENT = 131
MINUS = 132
DIVIDE = 133
EQUALS = 134
ASSIGN = 135
NOT_EQUALS = 136
NOT_EQUALS2 = 137
LE = 138
GE = 139
LBRAKET = 140
RBRAKET = 141
LCURLY = 142
RCURLY = 143
T_ARRAY = 144
T_TUPLE = 145
T_NULLABLE = 146
T_FLOAT32 = 147
T_FLOAT64 = 148
T_UINT8 = 149
T_UINT16 = 150
T_UINT32 = 151
T_UINT64 = 152
T_INT8 = 153
T_INT16 = 154
T_INT32 = 155
T_INT64 = 156
T_ENUM8 = 157
T_ENUM16 = 158
T_UUID = 159
T_DATE = 160
T_DATETIME = 161
T_STRING = 162
T_FIXEDSTRING = 163
T_NULL = 164
T_INTERVAL_YEAR = 165
T_INTERVAL_MONTH = 166
T_INTERVAL_WEEK = 167
T_INTERVAL_DAY = 168
T_INTERVAL_HOUR = 169
T_INTERVAL_MINUTE = 170
T_INTERVAL_SECOND = 171
T_AGGREGATE_FUNCTION = 172
F_COUNT = 173
F_SUM = 174
IDENTIFIER = 175
NUMERIC_LITERAL = 176
STRING_LITERAL = 177
QUOTED_LITERAL = 178
SPACES = 179
UNEXPECTED_CHAR = 180
def __init__(self, input: TokenStream, output: TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = ParserATNSimulator(
self, self.atn, self.decisionsToDFA, self.sharedContextCache
)
self._predicates = None
class ParseContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def EOF(self):
return self.getToken(ClickHouseParser.EOF, 0)
def query(self):
return self.getTypedRuleContext(ClickHouseParser.QueryContext, 0)
def err(self):
return self.getTypedRuleContext(ClickHouseParser.ErrContext, 0)
def getRuleIndex(self):
return ClickHouseParser.RULE_parse
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterParse"):
listener.enterParse(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitParse"):
listener.exitParse(self)
def parse(self):
localctx = ClickHouseParser.ParseContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_parse)
try:
self.enterOuterAlt(localctx, 1)
self.state = 176
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [
ClickHouseParser.K_ALTER,
ClickHouseParser.K_ATTACH,
ClickHouseParser.K_CHECK,
ClickHouseParser.K_CREATE,
ClickHouseParser.K_DESCRIBE,
ClickHouseParser.K_DESC,
ClickHouseParser.K_DETACH,
ClickHouseParser.K_DROP,
ClickHouseParser.K_EXISTS,
ClickHouseParser.K_INSERT,
ClickHouseParser.K_KILL,
ClickHouseParser.K_OPTIMIZE,
ClickHouseParser.K_RENAME,
ClickHouseParser.K_SELECT,
ClickHouseParser.K_SET,
ClickHouseParser.K_SHOW,
ClickHouseParser.K_USE,
ClickHouseParser.K_WITH,
]:
self.state = 174
self.query()
pass
elif token in [ClickHouseParser.UNEXPECTED_CHAR]:
self.state = 175
self.err()
pass
else:
raise NoViableAltException(self)
self.state = 178
self.match(ClickHouseParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class QueryContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def show_tables_query(self):
return self.getTypedRuleContext(ClickHouseParser.Show_tables_queryContext, 0)
def select_query(self):
return self.getTypedRuleContext(ClickHouseParser.Select_queryContext, 0)
def insert_query(self):
return self.getTypedRuleContext(ClickHouseParser.Insert_queryContext, 0)
def create_query(self):
return self.getTypedRuleContext(ClickHouseParser.Create_queryContext, 0)
def rename_query(self):
return self.getTypedRuleContext(ClickHouseParser.Rename_queryContext, 0)
def drop_query(self):
return self.getTypedRuleContext(ClickHouseParser.Drop_queryContext, 0)
def alter_query(self):
return self.getTypedRuleContext(ClickHouseParser.Alter_queryContext, 0)
def use_query(self):
return self.getTypedRuleContext(ClickHouseParser.Use_queryContext, 0)
def set_query(self):
return self.getTypedRuleContext(ClickHouseParser.Set_queryContext, 0)
def optimize_query(self):
return self.getTypedRuleContext(ClickHouseParser.Optimize_queryContext, 0)
def table_properties_query(self):
return self.getTypedRuleContext(ClickHouseParser.Table_properties_queryContext, 0)
def show_processlist_query(self):
return self.getTypedRuleContext(ClickHouseParser.Show_processlist_queryContext, 0)
def check_query(self):
return self.getTypedRuleContext(ClickHouseParser.Check_queryContext, 0)
def kill_query_query(self):
return self.getTypedRuleContext(ClickHouseParser.Kill_query_queryContext, 0)
def getRuleIndex(self):
return ClickHouseParser.RULE_query
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterQuery"):
listener.enterQuery(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitQuery"):
listener.exitQuery(self)
def query(self):
localctx = ClickHouseParser.QueryContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_query)
try:
self.state = 194
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input, 1, self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 180
self.show_tables_query()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 181
self.select_query()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 182
self.insert_query()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 183
self.create_query()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 184
self.rename_query()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 185
self.drop_query()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 186
self.alter_query()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 187
self.use_query()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 188
self.set_query()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 189
self.optimize_query()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 190
self.table_properties_query()
pass
elif la_ == 12:
self.enterOuterAlt(localctx, 12)
self.state = 191
self.show_processlist_query()
pass
elif la_ == 13:
self.enterOuterAlt(localctx, 13)
self.state = 192
self.check_query()
pass
elif la_ == 14:
self.enterOuterAlt(localctx, 14)
self.state = 193
self.kill_query_query()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Select_queryContext(ParserRuleContext):
def __init__(self, parser, parent: ParserRuleContext = None, invokingState: int = -1):
super().__init__(parent, invokingState)
self.parser = parser
def select_query_main(self, i: int = None):
if i is None:
return self.getTypedRuleContexts(ClickHouseParser.Select_query_mainContext)
else:
return self.getTypedRuleContext(ClickHouseParser.Select_query_mainContext, i)
def K_UNION(self, i: int = None):
if i is None:
return self.getTokens(ClickHouseParser.K_UNION)
else:
return self.getToken(ClickHouseParser.K_UNION, i)
def K_ALL(self, i: int = None):
if i is None:
return self.getTokens(ClickHouseParser.K_ALL)
else:
return self.getToken(ClickHouseParser.K_ALL, i)
def query_outfile_step(self):
return self.getTypedRuleContext(ClickHouseParser.Query_outfile_stepContext, 0)
def select_format_step(self):
return self.getTypedRuleContext(ClickHouseParser.Select_format_stepContext, 0)
def getRuleIndex(self):
return ClickHouseParser.RULE_select_query
def enterRule(self, listener: ParseTreeListener):
if hasattr(listener, "enterSelect_query"):
listener.enterSelect_query(self)
def exitRule(self, listener: ParseTreeListener):
if hasattr(listener, "exitSelect_query"):
listener.exitSelect_query(self)
def select_query(self):
localctx = ClickHouseParser.Select_queryContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_select_query)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 196
self.select_query_main()
self.state = 202
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la == ClickHouseParser.K_UNION:
self.state = 197
self.match(ClickHouseParser.K_UNION)
self.state = 198
self.match(ClickHouseParser.K_ALL)
self.state = 199
self.select_query_main()
self.state = 204
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 206
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == ClickHouseParser.K_INTO:
self.state = 205
self.query_outfile_step()
self.state = 209
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la == ClickHouseParser.K_FORMAT:
self.state = 208
self.select_format_step()
except RecognitionException as re:
localctx.exception = | |
info.name.lower() == 'query_status':
return info
def _findinfos(self, votable):
# this can be overridden to specialize for a particular DAL protocol
infos = {}
res = self._findresultsresource(votable)
for info in res.infos:
infos[info.name] = info.value
for info in votable.infos:
infos[info.name] = info.value
return infos
def __repr__(self):
return repr(self.to_table())
@property
def queryurl(self):
"""
the URL query that produced these results. None is returned if unknown
"""
return self._url
@property
def votable(self):
"""
The complete votable XML Document `astropy.io.votable.tree.VOTableFile`
"""
return self._votable
@property
def resultstable(self):
"""
The votable XML element `astropy.io.votable.tree.Table`
"""
return self._resultstable
def to_table(self):
"""
Returns a astropy Table object.
Returns
-------
`astropy.table.Table`
"""
return self.resultstable.to_table(use_names_over_ids=True)
@property
def table(self):
warn(AstropyDeprecationWarning(
'Using the table property is deprecated. '
'Please use se to_table() instead.'
))
return self.to_table()
def __len__(self):
"""
return the record count
"""
return len(self.resultstable.array)
def __getitem__(self, indx):
"""
if indx is a string, r[indx] will return the field with the name of
indx; if indx is an integer, r[indx] will return the indx-th record.
"""
if isinstance(indx, int):
return self.getrecord(indx)
elif isinstance(indx, tuple):
return self.getvalue(*indx)
else:
return self.getcolumn(indx)
@property
def fieldnames(self):
"""
return the names of the columns. These are the names that are used
to access values from the dictionaries returned by getrecord(). They
correspond to the column name.
"""
return self._fldnames
@property
def fielddescs(self):
"""
return the full metadata the columns as a list of Field instances,
a simple object with attributes corresponding the the VOTable FIELD
attributes, namely: name, id, type, ucd, utype, arraysize, description
"""
return self.resultstable.fields
@property
def status(self):
"""
The query status as a 2-element tuple e.g. ('OK', 'Everythings fine')
"""
return self._status
def fieldname_with_ucd(self, ucd):
"""
return the field name that has a given UCD value or None if the UCD
is not found.
"""
search_ucds = set(parse_ucd(ucd, has_colon=True))
for field in (field for field in self.fielddescs if field.ucd):
field_ucds = set(parse_ucd(field.ucd, has_colon=True))
if search_ucds & field_ucds:
return field.name
return None
def fieldname_with_utype(self, utype):
"""
return the field name that has a given UType value or None if the UType
is not found.
"""
try:
iterchain = (
self.getdesc(fieldname) for fieldname in self.fieldnames)
iterchain = (field for field in iterchain if field.utype == utype)
return next(iterchain).name
except StopIteration:
return None
def getcolumn(self, name):
"""
return a numpy array containing the values for the column with the
given name
"""
try:
if name not in self.fieldnames:
name = self.resultstable.get_field_by_id(name).name
return self.resultstable.array[name]
except KeyError:
raise KeyError("No such column: {}".format(name))
def getrecord(self, index):
"""
return a representation of a result record that follows dictionary
semantics. The keys of the dictionary are those returned by this
instance's fieldnames attribute.The returned record may have additional
accessor methods for getting at stardard DAL response metadata
(e.g. ra, dec).
Parameters
----------
index : int
the integer index of the desired record where 0 returns the first
record
Returns
-------
Record
a dictionary-like wrapper containing the result record metadata.
Raises
------
IndexError
if index is negative or equal or larger than the number of rows in
the result table.
See Also
--------
Record
"""
return Record(self, index, session=self._session)
def getvalue(self, name, index):
"""
return the value of a record attribute--a value from a column and row.
Parameters
----------
name : str
the name of the attribute (column)
index : int
the zero-based index of the record
Raises
------
IndexError
if index is negative or equal or larger than the
number of rows in the result table.
KeyError
if name is not a recognized column name
"""
return self.getrecord(index)[name]
def getdesc(self, name):
"""
return the field description for the record attribute (column) with
the given name
Parameters
----------
name : str
the name of the attribute (column)
Returns
-------
object
with attributes (name, id, datatype, unit, ucd, utype, arraysize)
which describe the column
"""
if name not in self._fldnames:
raise KeyError(name)
return self.resultstable.get_field_by_id_or_name(name)
def __iter__(self):
"""
return a python iterable for stepping through the records in this
result
"""
pos = 0
while True:
try:
out = self.getrecord(pos)
except IndexError:
break
yield out
pos += 1
def broadcast_samp(self, client_name=None):
"""
Broadcast the table to ``client_name`` via SAMP
"""
with samp.connection() as conn:
samp.send_table_to(
conn, self.to_table(),
client_name=client_name, name=self.queryurl)
def cursor(self):
"""
return a cursor that is compliant with the Python Database API's
:class:`.Cursor` interface. See PEP 249 for details.
"""
from .dbapi2 import Cursor
return Cursor(self)
class Record(Mapping):
"""
one record from a DAL query result. The column values are accessible
as dictionary items. It also provides special added functions for
accessing the dataset the record corresponds to. Subclasses may provide
additional functions for access to service type-specific data.
"""
def __init__(self, results, index, session=None):
self._results = results
self._index = index
self._session = use_session(session)
self._mapping = collections.OrderedDict(
zip(
results.fieldnames,
results.resultstable.array.data[index]
)
)
def __getitem__(self, key):
try:
if key not in self._mapping:
key = self._results.resultstable.get_field_by_id(key).name
return self._mapping[key]
except KeyError:
raise KeyError("No such column: {}".format(key))
def __iter__(self):
return iter(self._mapping)
def __len__(self):
return len(self._mapping)
def __repr__(self):
return repr(tuple(self.values()))
def get(self, key, default=None, decode=False):
"""
This method mimics the dict get method and adds a decode parameter
to allow decoding of binary strings.
"""
out = self._mapping.get(key, default)
if decode and isinstance(out, bytes):
out = out.decode('ascii')
return out
def getbyucd(self, ucd, default=None, decode=False):
"""
return the column with the given ucd.
"""
return self.get(
self._results.fieldname_with_ucd(ucd), default, decode)
def getbyutype(self, utype, default=None, decode=False):
"""
return the column with the given utype.
Raises
------
KeyError
if theres no column with the given utype.
"""
return self.get(
self._results.fieldname_with_utype(utype), default, decode)
def getdataformat(self):
"""
return the mimetype of the dataset described by this record.
"""
return self.getbyucd('meta.code.mime', decode=True)
def getdataurl(self):
"""
return the URL contained in the access URL column which can be used
to retrieve the dataset described by this record. None is returned
if no such column exists.
"""
for fieldname in self._results.fieldnames:
field = self._results.getdesc(fieldname)
if (field.utype and "access.reference" in field.utype.lower()) or (
field.ucd and "meta.dataset" in field.ucd and
"meta.ref.url" in field.ucd
):
out = self[fieldname]
if isinstance(out, bytes):
out = out.decode('utf-8')
return out
return None
def getdataobj(self):
"""
return the appropiate data object suitable for the data content behind
this record.
"""
return mime_object_maker(self.getdataurl(), self.getdataformat())
@stream_decode_content
def getdataset(self, timeout=None):
"""
Get the dataset described by this record from the server.
Parameters
----------
timeout : float
the time in seconds to allow for a successful
connection with server before failing with an
IOError (specifically, socket.timeout) exception
Returns
-------
A file-like object which may be read to retrieve the referenced
dataset.
Raises
------
KeyError
if no datast access URL is included in the record
URLError
if the dataset access URL is invalid (note: subclass of IOError)
HTTPError
if an HTTP error occurs while accessing the dataset
(note: subclass of IOError)
socket.timeout
if the timeout is exceeded before a connection is established.
(note: subclass of IOError)
IOError
if some other error occurs while establishing the data stream.
"""
url = self.getdataurl()
if not url:
raise KeyError("no dataset access URL recognized in record")
if timeout:
response = self._session.get(url, stream=True, timeout=timeout)
else:
response = self._session.get(url, stream=True)
response.raise_for_status()
return response.raw
def cachedataset(self, filename=None, dir=".", timeout=None, bufsize=None):
"""
retrieve the dataset described by this record and write it out to
a file with the given name. If the file already exists, it will be
over-written.
Parameters
----------
filename : str
the name of the file to write dataset to. If the
value represents a relative path, it will be taken
to be relative to the value of the ``dir``
parameter. If None, a default name is attempted
based on the record title and format.
dir : str
the directory to write the file into. This value
will be ignored if filename is an absolute path.
timeout : int
the time in seconds to allow for a successful
connection with server before failing with an
IOError (specifically, | |
weight = _get_weight_or_default(ifst._weight_factory, weight,
map_type == MapType.TIMES_MAPPER)
ofst = ifst._mutable_fst_type()
ifst._ops.map(ifst, ofst, map_type, delta, weight)
return ofst
def compose(ifst1, ifst2, connect=True, compose_filter="auto"):
"""
Constructively composes two FSTs.
This operation computes the composition of two FSTs. If A transduces
string x to y with weight a and B transduces y to z with weight b, then
their composition transduces string x to z with weight a \otimes b. The
output labels of the first transducer or the input labels of the second
transducer must be sorted (or otherwise support appropriate matchers).
Args:
ifst1: The first input FST.
ifst2: The second input FST.
connect: Should output be trimmed?
compose_filter: A string matching a known composition filter; one of:
"alt_sequence", "auto", "match", "null", "sequence", "trivial".
Returns:
A composed FST.
See also: `arcsort`.
"""
try:
compose_filter = _getters.GetComposeFilter(compose_filter)
except ValueError:
raise ValueError("Unknown compose filter: {!r}"
.format(compose_filter))
ofst = ifst1._mutable_fst_type()
ifst1._ops.compose(ifst1, ifst2, ofst, connect, compose_filter)
return ofst
def determinize(ifst, delta=DELTA, weight=None, nstate=NO_STATE_ID,
subsequential_label=0, det_type="functional",
increment_subsequential_label=False):
"""
Constructively determinizes a weighted FST.
This operations creates an equivalent FST that has the property that no
state has two transitions with the same input label. For this algorithm,
epsilon transitions are treated as regular symbols (cf. `rmepsilon`).
Args:
ifst: The input FST.
delta: Comparison/quantization delta (default: 0.0009765625).
weight: A Weight in the FST semiring or an object that can be converted
to a Weight in the FST semiring indicating the desired weight
threshold below which paths are pruned; if None, no paths are
pruned.
nstate: State number threshold (default: -1).
subsequential_label: Input label of arc corresponding to residual final
output when producing a subsequential transducer.
det_type: Type of determinization; one of: "functional" (input
transducer is functional), "nonfunctional" (input transducer is not
functional) and disambiguate" (input transducer is not functional
but only keep the min of ambiguous outputs).
increment_subsequential_label: Increment subsequential when creating
several arcs for the residual final output at a given state.
Returns:
An equivalent deterministic FST.
Raises:
ValueError: Unknown determinization type.
See also: `disambiguate`, `rmepsilon`.
"""
try:
det_type = _getters.GetDeterminizeType(det_type)
except ValueError:
raise ValueError("Unknown determinization type: {!r}".format(det_type))
# Threshold is set to semiring Zero (no pruning) if weight is None.
weight = _get_weight_or_default(ifst._weight_factory, weight, False)
ofst = ifst._mutable_fst_type()
ifst._ops.determinize(ifst, ofst, delta, weight, nstate,
subsequential_label, det_type,
increment_subsequential_label)
return ofst
def difference(ifst1, ifst2, connect=True, compose_filter="auto"):
"""
Constructively computes the difference of two FSTs.
This operation computes the difference between two FSAs. Only strings that
are in the first automaton but not in second are retained in the result. The
first argument must be an acceptor; the second argument must be an
unweighted, epsilon-free, deterministic acceptor. The output labels of the
first transducer or the input labels of the second transducer must be sorted
(or otherwise support appropriate matchers).
Args:
ifst1: The first input FST.
ifst2: The second input FST.
connect: Should the output FST be trimmed?
compose_filter: A string matching a known composition filter; one of:
"alt_sequence", "auto", "match", "null", "sequence", "trivial".
Returns:
An FST representing the difference of the FSTs.
"""
try:
compose_filter = _getters.GetComposeFilter(compose_filter)
except ValueError:
raise ValueError("Unknown compose filter: {!r}"
.format(compose_filter))
ofst = ifst1._mutable_fst_type()
ifst1._ops.difference(ifst1, ifst2, ofst, connect, compose_filter)
return ofst
def disambiguate(ifst, delta=DELTA, weight=None,
nstate=NO_STATE_ID, subsequential_label=0):
"""
Constructively disambiguates a weighted transducer.
This operation disambiguates a weighted transducer. The result will be an
equivalent FST that has the property that no two successful paths have the
same input labeling. For this algorithm, epsilon transitions are treated as
regular symbols (cf. `rmepsilon`).
Args:
ifst: The input FST.
delta: Comparison/quantization delta (default: 0.0009765625).
weight: A Weight in the FST semiring or an object that can be converted
to a Weight in the FST semiring indicating the desired weight
threshold below which paths are pruned; if None, no paths are
pruned.
nstate: State number threshold.
subsequential_label: Input label of arc corresponding to residual final
output when producing a subsequential transducer.
Returns:
An equivalent disambiguated FST.
See also: `determinize`, `rmepsilon`.
"""
# Threshold is set to semiring Zero (no pruning) if weight is None.
weight = _get_weight_or_default(ifst._weight_factory, weight, False)
ofst = ifst._mutable_fst_type()
ifst._ops.disambiguate(ifst, ofst, delta, weight, nstate,
subsequential_label)
return ofst
def epsnormalize(ifst, eps_norm_output=False):
"""
Constructively epsilon-normalizes an FST.
This operation creates an equivalent FST that is epsilon-normalized. An
acceptor is epsilon-normalized if it it is epsilon-removed (cf.
`rmepsilon`). A transducer is input epsilon-normalized if, in addition,
along any path, all arcs with epsilon input labels follow all arcs with
non-epsilon input labels. Output epsilon-normalized is defined similarly.
The input FST must be functional.
Args:
ifst: The input FST.
eps_norm_output: Should the FST be output epsilon-normalized?
Returns:
An equivalent epsilon-normalized FST.
See also: `rmepsilon`.
"""
if eps_norm_output:
eps_norm_type = EpsNormalizeType.EPS_NORM_OUTPUT
else:
eps_norm_type = EpsNormalizeType.EPS_NORM_INPUT
ofst = ifst._mutable_fst_type()
ifst._ops.epsnormalize(ifst, ofst, eps_norm_type)
return ofst
def equal(ifst1, ifst2, delta=DELTA):
"""
Are two FSTs equal?
This function tests whether two FSTs have the same states with the same
numbering and the same transitions with the same labels and weights in the
same order.
Args:
ifst1: The first input FST.
ifst2: The second input FST.
delta: Comparison/quantization delta (0.0009765625).
Returns:
True if the FSTs satisfy the above condition, else False.
See also: `equivalent`, `isomorphic`, `randequivalent`.
"""
return ifst1._ops.equal(ifst1, ifst2, delta)
def equivalent(ifst1, ifst2, delta=DELTA):
"""
Are the two acceptors equivalent?
This operation tests whether two epsilon-free deterministic weighted
acceptors are equivalent, that is if they accept the same strings with the
same weights.
Args:
ifst1: The first input FST.
ifst2: The second input FST.
delta: Comparison/quantization delta (default: 0.0009765625).
Returns:
True if the FSTs satisfy the above condition, else False.
Raises:
RuntimeError: Equivalence test encountered error.
See also: `equal`, `isomorphic`, `randequivalent`.
"""
result, error = ifst1._ops.equivalent(ifst1, ifst2, delta)
if error:
raise RuntimeError("Equivalence test encountered error")
return result
def intersect(ifst1, ifst2, connect=True, compose_filter="auto"):
"""
Constructively intersects two FSTs.
This operation computes the intersection (Hadamard product) of two FSTs.
Only strings that are in both automata are retained in the result. The two
arguments must be acceptors. One of the arguments must be label-sorted (or
otherwise support appropriate matchers).
Args:
ifst1: The first input FST.
ifst2: The second input FST.
connect: Should output be trimmed?
compose_filter: A string matching a known composition filter; one of:
"alt_sequence", "auto", "match", "null", "sequence", "trivial".
Returns:
An intersected FST.
"""
try:
compose_filter = _getters.GetComposeFilter(compose_filter)
except ValueError:
raise ValueError("Unknown compose filter: {!r}"
.format(compose_filter))
ofst = ifst1._mutable_fst_type()
ifst1._ops.intersect(ifst1, ifst2, ofst, connect, compose_filter)
return ofst
def isomorphic(ifst1, ifst2, delta=DELTA):
"""
Are the two acceptors isomorphic?
This operation determines if two transducers with a certain required
determinism have the same states, irrespective of numbering, and the same
transitions with the same labels and weights, irrespective of ordering. In
other words, FSTs A, B are isomorphic if and only if the states of A can be
renumbered and the transitions leaving each state reordered so the two are
equal (according to the definition given in `equal`).
Args:
ifst1: The first input FST.
ifst2: The second input FST.
delta: Comparison/quantization delta (default: 0.0009765625).
Returns:
True if the two transducers satisfy the above condition, else False.
See also: `equal`, `equivalent`, `randequivalent`.
"""
return ifst1._ops.isomorphic(ifst1, ifst2, delta)
def prune(ifst, weight=None, nstate=NO_STATE_ID, delta=DELTA):
"""
Constructively removes paths with weights below a certain threshold.
This operation deletes states and arcs in the input FST that do not belong
to a successful path whose weight is no more (w.r.t the natural semiring
order) than the threshold t \otimes-times the weight of the shortest path in
the input FST. Weights must be commutative and have the path property.
Args:
ifst: The input FST.
weight: A Weight in the FST semiring or an object that can be converted
to a Weight in the FST semiring indicating the desired weight
threshold below which paths are pruned; if None, no paths are
pruned.
nstate: State number threshold (default: -1).
delta: Comparison/quantization delta (default: 0.0009765625).
Returns:
A pruned FST.
See also: The destructive variant.
"""
# | |
# -*- coding: utf-8 -*-
# $Id: wuihlpform.py $
"""
Test Manager Web-UI - Form Helpers.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 118412 $"
# Standard python imports.
import copy;
# Validation Kit imports.
from common import utils;
from common.webutils import escapeAttr, escapeElem;
from testmanager import config;
from testmanager.core.schedgroup import SchedGroupMemberData, SchedGroupDataEx;
from testmanager.core.testcaseargs import TestCaseArgsData;
from testmanager.core.testgroup import TestGroupMemberData, TestGroupDataEx;
class WuiHlpForm(object):
"""
Helper for constructing a form.
"""
ksItemsList = 'ksItemsList'
ksOnSubmit_AddReturnToFieldWithCurrentUrl = '+AddReturnToFieldWithCurrentUrl+';
def __init__(self, sId, sAction, dErrors = None, fReadOnly = False, sOnSubmit = None):
self._fFinalized = False;
self._fReadOnly = fReadOnly;
self._dErrors = dErrors if dErrors is not None else dict();
if sOnSubmit == self.ksOnSubmit_AddReturnToFieldWithCurrentUrl:
sOnSubmit = u'return addRedirectToInputFieldWithCurrentUrl(this)';
if sOnSubmit is None: sOnSubmit = u'';
else: sOnSubmit = u' onsubmit=\"%s\"' % (escapeAttr(sOnSubmit),);
self._sBody = u'\n' \
u'<div id="%s" class="tmform">\n' \
u' <form action="%s" method="post"%s>\n' \
u' <ul>\n' \
% (sId, sAction, sOnSubmit);
def _add(self, sText):
"""Internal worker for appending text to the body."""
assert not self._fFinalized;
if not self._fFinalized:
self._sBody += unicode(sText, errors='ignore') if isinstance(sText, str) else sText;
return True;
return False;
def _escapeErrorText(self, sText):
"""Escapes error text, preserving some predefined HTML tags."""
if sText.find('<br>') >= 0:
asParts = sText.split('<br>');
for i, _ in enumerate(asParts):
asParts[i] = escapeElem(asParts[i].strip());
sText = '<br>\n'.join(asParts);
else:
sText = escapeElem(sText);
return sText;
def _addLabel(self, sName, sLabel, sDivSubClass = 'normal'):
"""Internal worker for adding a label."""
if sName in self._dErrors:
sError = self._dErrors[sName];
if utils.isString(sError): # List error trick (it's an associative array).
return self._add(u' <li>\n'
u' <div class="tmform-field"><div class="tmform-field-%s">\n'
u' <label for="%s" class="tmform-error-label">%s\n'
u' <span class="tmform-error-desc">%s</span>\n'
u' </label>\n'
% (escapeAttr(sDivSubClass), escapeAttr(sName), escapeElem(sLabel),
self._escapeErrorText(sError), ) );
return self._add(u' <li>\n'
u' <div class="tmform-field"><div class="tmform-field-%s">\n'
u' <label for="%s">%s</label>\n'
% (escapeAttr(sDivSubClass), escapeAttr(sName), escapeElem(sLabel)) );
def finalize(self):
"""
Finalizes the form and returns the body.
"""
if not self._fFinalized:
self._add(u' </ul>\n'
u' </form>\n'
u'</div>\n'
u'<div class="clear"></div>\n' );
return self._sBody;
def addTextHidden(self, sName, sValue, sExtraAttribs = ''):
"""Adds a hidden text input."""
return self._add(u' <div class="tmform-field-hidden">\n'
u' <input name="%s" id="%s" type="text" hidden%s value="%s" class="tmform-hidden">\n'
u' </div>\n'
u' </li>\n'
% ( escapeAttr(sName), escapeAttr(sName), sExtraAttribs, escapeElem(str(sValue)) ));
#
# Non-input stuff.
#
def addNonText(self, sValue, sLabel, sName = 'non-text', sPostHtml = ''):
"""Adds a read-only text input."""
self._addLabel(sName, sLabel, 'string');
if sValue is None: sValue = '';
return self._add(u' <p>%s%s</p>\n'
u' </div></div>\n'
u' </li>\n'
% (escapeElem(unicode(sValue)), sPostHtml ));
def addRawHtml(self, sRawHtml, sLabel, sName = 'raw-html'):
"""Adds a read-only text input."""
self._addLabel(sName, sLabel, 'string');
self._add(sRawHtml);
return self._add(u' </div></div>\n'
u' </li>\n');
#
# Text input fields.
#
def addText(self, sName, sValue, sLabel, sSubClass = 'string', sExtraAttribs = '', sPostHtml = ''):
"""Adds a text input."""
if self._fReadOnly:
return self.addTextRO(sName, sValue, sLabel, sSubClass, sExtraAttribs);
if sSubClass not in ('int', 'long', 'string', 'uuid', 'timestamp', 'wide'): raise Exception(sSubClass);
self._addLabel(sName, sLabel, sSubClass);
if sValue is None: sValue = '';
return self._add(u' <input name="%s" id="%s" type="text"%s value="%s">%s\n'
u' </div></div>\n'
u' </li>\n'
% ( escapeAttr(sName), escapeAttr(sName), sExtraAttribs, escapeElem(sValue), sPostHtml ));
def addTextRO(self, sName, sValue, sLabel, sSubClass = 'string', sExtraAttribs = '', sPostHtml = ''):
"""Adds a read-only text input."""
if sSubClass not in ('int', 'long', 'string', 'uuid', 'timestamp', 'wide'): raise Exception(sSubClass);
self._addLabel(sName, sLabel, sSubClass);
if sValue is None: sValue = '';
return self._add(u' <input name="%s" id="%s" type="text" readonly%s value="%s" class="tmform-input-readonly">'
u'%s\n'
u' </div></div>\n'
u' </li>\n'
% ( escapeAttr(sName), escapeAttr(sName), sExtraAttribs, escapeElem(unicode(sValue)), sPostHtml ));
def addWideText(self, sName, sValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a wide text input."""
return self.addText(sName, sValue, sLabel, 'wide', sExtraAttribs, sPostHtml = sPostHtml);
def addWideTextRO(self, sName, sValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a wide read-only text input."""
return self.addTextRO(sName, sValue, sLabel, 'wide', sExtraAttribs, sPostHtml = sPostHtml);
def _adjustMultilineTextAttribs(self, sExtraAttribs, sValue):
""" Internal helper for setting good default sizes for textarea based on content."""
if sExtraAttribs.find('cols') < 0 and sExtraAttribs.find('width') < 0:
sExtraAttribs = 'cols="96%" ' + sExtraAttribs;
if sExtraAttribs.find('rows') < 0 and sExtraAttribs.find('width') < 0:
if sValue is None: sValue = '';
else: sValue = sValue.strip();
cRows = sValue.count('\n') + (not sValue.endswith('\n'));
if cRows * 80 < len(sValue):
cRows += 2;
cRows = max(min(cRows, 16), 2);
sExtraAttribs = ('rows="%s" ' % (cRows,)) + sExtraAttribs;
return sExtraAttribs;
def addMultilineText(self, sName, sValue, sLabel, sSubClass = 'string', sExtraAttribs = ''):
"""Adds a multiline text input."""
if self._fReadOnly:
return self.addMultilineTextRO(sName, sValue, sLabel, sSubClass, sExtraAttribs);
if sSubClass not in ('int', 'long', 'string', 'uuid', 'timestamp'): raise Exception(sSubClass)
self._addLabel(sName, sLabel, sSubClass)
if sValue is None: sValue = '';
sNewValue = unicode(sValue) if not isinstance(sValue, list) else '\n'.join(sValue)
return self._add(u' <textarea name="%s" id="%s" %s>%s</textarea>\n'
u' </div></div>\n'
u' </li>\n'
% ( escapeAttr(sName), escapeAttr(sName), self._adjustMultilineTextAttribs(sExtraAttribs, sNewValue),
escapeElem(sNewValue)))
def addMultilineTextRO(self, sName, sValue, sLabel, sSubClass = 'string', sExtraAttribs = ''):
"""Adds a multiline read-only text input."""
if sSubClass not in ('int', 'long', 'string', 'uuid', 'timestamp'): raise Exception(sSubClass)
self._addLabel(sName, sLabel, sSubClass)
if sValue is None: sValue = '';
sNewValue = unicode(sValue) if not isinstance(sValue, list) else '\n'.join(sValue)
return self._add(u' <textarea name="%s" id="%s" readonly %s>%s</textarea>\n'
u' </div></div>\n'
u' </li>\n'
% ( escapeAttr(sName), escapeAttr(sName), self._adjustMultilineTextAttribs(sExtraAttribs, sNewValue),
escapeElem(sNewValue)))
def addInt(self, sName, iValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds an integer input."""
return self.addText(sName, unicode(iValue), sLabel, 'int', sExtraAttribs, sPostHtml = sPostHtml);
def addIntRO(self, sName, iValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds an integer input."""
return self.addTextRO(sName, unicode(iValue), sLabel, 'int', sExtraAttribs, sPostHtml = sPostHtml);
def addLong(self, sName, lValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a long input."""
return self.addText(sName, unicode(lValue), sLabel, 'long', sExtraAttribs, sPostHtml = sPostHtml);
def addLongRO(self, sName, lValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a long input."""
return self.addTextRO(sName, unicode(lValue), sLabel, 'long', sExtraAttribs, sPostHtml = sPostHtml);
def addUuid(self, sName, uuidValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds an UUID input."""
return self.addText(sName, unicode(uuidValue), sLabel, 'uuid', sExtraAttribs, sPostHtml = sPostHtml);
def addUuidRO(self, sName, uuidValue, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a read-only UUID input."""
return self.addTextRO(sName, unicode(uuidValue), sLabel, 'uuid', sExtraAttribs, sPostHtml = sPostHtml);
def addTimestampRO(self, sName, sTimestamp, sLabel, sExtraAttribs = '', sPostHtml = ''):
"""Adds a read-only database string timstamp input."""
return self.addTextRO(sName, sTimestamp, sLabel, 'timestamp', sExtraAttribs, sPostHtml = sPostHtml);
#
# Text areas.
#
#
# Combo boxes.
#
def addComboBox(self, sName, sSelected, sLabel, aoOptions, sExtraAttribs = '', sPostHtml = ''):
"""Adds a combo box."""
if self._fReadOnly:
return self.addComboBoxRO(sName, sSelected, sLabel, aoOptions, sExtraAttribs, sPostHtml);
self._addLabel(sName, sLabel, 'combobox');
self._add(' <select name="%s" id="%s" class="tmform-combobox"%s>\n'
% (escapeAttr(sName), escapeAttr(sName), sExtraAttribs));
sSelected = unicode(sSelected);
for iValue, sText, _ in aoOptions:
sValue = unicode(iValue);
self._add(' <option value="%s"%s>%s</option>\n'
% (escapeAttr(sValue), ' selected' if sValue == sSelected else '',
escapeElem(sText)));
return self._add(u' </select>' + sPostHtml + '\n'
u' </div></div>\n'
u' </li>\n');
def addComboBoxRO(self, sName, sSelected, sLabel, aoOptions, sExtraAttribs = '', sPostHtml = ''):
"""Adds a read-only combo box."""
self.addTextHidden(sName, sSelected);
self._addLabel(sName, sLabel, 'combobox-readonly');
self._add(u' <select name="%s" id="%s" disabled class="tmform-combobox"%s>\n'
% (escapeAttr(sName), escapeAttr(sName), sExtraAttribs));
sSelected = unicode(sSelected);
for iValue, sText, _ in aoOptions:
sValue = unicode(iValue);
self._add(' <option value="%s"%s>%s</option>\n'
% (escapeAttr(sValue), ' selected' if sValue == sSelected else '',
escapeElem(sText)));
return self._add(u' </select>' + sPostHtml + '\n'
u' </div></div>\n'
u' </li>\n');
#
# Check boxes.
#
@staticmethod
def _reinterpretBool(fValue):
"""Reinterprets a value as a boolean type."""
if fValue is not type(True):
if fValue is None:
| |
import random
import binascii
class DES(object):
# Initial permutation for subkey generation (IPC)
__ipc = [
56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# Left rotation for subkey generation (LS)
__ls = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# Final permutation for subkey generation (FPC)
__fpc = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# Initial permutation (IP)
__ip = [
57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table (E)
__et = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# Post S-boxes permutation (P)
__psp = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23, 13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# Final permutation (IP^-1)
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Cryption modes
ENC = 0
DEC = 1
# Cryption rounds
ROUNDS = 6
def __init__(self, key):
self.K = [[0] * 48] * DES.ROUNDS
self.L = []
self.R = []
self.C = []
self.set_key(key)
self.set_iv(self.gen_bits(64))
def get_key(self):
return self.__key
def set_key(self, key):
self.__key = self.__string_to_bits(key)
self.K = [[0] * 48] * DES.ROUNDS
self.__generate_subkeys()
def get_iv(self):
return self.__iv
def set_iv(self, iv):
self.__iv = self.__string_to_bits(iv)
def __string_to_bits(self, x):
return map(int, list(x))
def __bits_to_string(self, x):
return ''.join(map(str, x))
def __listxor(self, a, b):
return map(lambda x, y: x ^ y, a, b)
def __permutate(self, block, table):
return map(lambda x: block[x], table)
# Get the padding char(in binary) according to padding length
def __get_pad(self, plen):
pch = bin(int(binascii.hexlify(chr(plen)), 16))[2:]
return '0' * (8 - len(pch)) + pch
# Recover padding length according to the padding char
def __read_pad(self, pstr):
return int('0b' + pstr, 2)
def __pad(self, data):
plen = 8 - (len(data) % 64) / 8
return data + self.__get_pad(plen) * plen
def __unpad(self, data):
plen = self.__read_pad(data[-8:])
return data[:-(plen * 8)]
def __generate_subkeys(self):
key = self.__permutate(self.get_key(), DES.__ipc)
self.L, self.R = key[:28], key[28:]
for i in range(DES.ROUNDS):
for j in range(DES.__ls[i]):
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
self.K[i] = self.__permutate(self.L + self.R, DES.__fpc)
# Implementation of single-block crypting
def __crypt(self, block, crypt_type):
block = self.__permutate(block, DES.__ip)
self.L, self.R = block[:32], block[32:]
round_no, round_delta = {DES.ENC: (0, 1), DES.DEC: (DES.ROUNDS-1, -1)}[crypt_type]
for i in range(DES.ROUNDS):
# make a copy of R[i-1] -> L[i]
old_R = self.R[:]
# -> R[i]
self.R = self.__permutate(self.R, self.__et)
# Xor R[i-1] with K[i]
self.R = self.__listxor(self.R, self.K[round_no])
B = [self.R[x*6:(x+1)*6] for x in range(8)]
new_B = [0] * 32
for j in range(8):
# S-box mapping
l = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
v = self.__sbox[j][(l << 4) + n]
# Convert to bits
new_B[j*4+0] = (v & 8) >> 3
new_B[j*4+1] | |
as database name.
:param pulumi.Input[str] state: The current state of the Database Home.
:param pulumi.Input[str] tde_wallet_password: The optional password to open the TDE wallet. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numeric, and two special characters. The special characters must be _, \#, or -.
:param pulumi.Input[str] time_created: The date and time the Database Home was created.
:param pulumi.Input[str] time_stamp_for_point_in_time_recovery: The point in time of the original database from which the new database is created. If not specifed, the latest backup is used to create the database.
"""
pulumi.set(__self__, "admin_password", <PASSWORD>)
if backup_id is not None:
pulumi.set(__self__, "backup_id", backup_id)
if backup_tde_password is not None:
pulumi.set(__self__, "backup_tde_password", backup_tde_password)
if character_set is not None:
pulumi.set(__self__, "character_set", character_set)
if connection_strings is not None:
pulumi.set(__self__, "connection_strings", connection_strings)
if database_id is not None:
pulumi.set(__self__, "database_id", database_id)
if database_software_image_id is not None:
pulumi.set(__self__, "database_software_image_id", database_software_image_id)
if db_backup_config is not None:
pulumi.set(__self__, "db_backup_config", db_backup_config)
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if db_unique_name is not None:
pulumi.set(__self__, "db_unique_name", db_unique_name)
if db_workload is not None:
pulumi.set(__self__, "db_workload", db_workload)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id is not None:
pulumi.set(__self__, "id", id)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if ncharacter_set is not None:
pulumi.set(__self__, "ncharacter_set", ncharacter_set)
if one_off_patches is not None:
pulumi.set(__self__, "one_off_patches", one_off_patches)
if pdb_name is not None:
pulumi.set(__self__, "pdb_name", pdb_name)
if state is not None:
pulumi.set(__self__, "state", state)
if tde_wallet_password is not None:
pulumi.set(__self__, "tde_wallet_password", tde_wallet_password)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_stamp_for_point_in_time_recovery is not None:
pulumi.set(__self__, "time_stamp_for_point_in_time_recovery", time_stamp_for_point_in_time_recovery)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> pulumi.Input[str]:
"""
A strong password for SYS, SYSTEM, PDB Admin and TDE Wallet. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numbers, and two special characters. The special characters must be _, \#, or -.
"""
return pulumi.get(self, "admin_password")
@admin_password.setter
def admin_password(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_password", value)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> Optional[pulumi.Input[str]]:
"""
The backup [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
return pulumi.get(self, "backup_id")
@backup_id.setter
def backup_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_id", value)
@property
@pulumi.getter(name="backupTdePassword")
def backup_tde_password(self) -> Optional[pulumi.Input[str]]:
"""
The password to open the TDE wallet.
"""
return pulumi.get(self, "backup_tde_password")
@backup_tde_password.setter
def backup_tde_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backup_tde_password", value)
@property
@pulumi.getter(name="characterSet")
def character_set(self) -> Optional[pulumi.Input[str]]:
"""
The character set for the database. The default is AL32UTF8. Allowed values are:
"""
return pulumi.get(self, "character_set")
@character_set.setter
def character_set(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "character_set", value)
@property
@pulumi.getter(name="connectionStrings")
def connection_strings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DbHomeDatabaseConnectionStringArgs']]]]:
return pulumi.get(self, "connection_strings")
@connection_strings.setter
def connection_strings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DbHomeDatabaseConnectionStringArgs']]]]):
pulumi.set(self, "connection_strings", value)
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> Optional[pulumi.Input[str]]:
"""
The database [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
return pulumi.get(self, "database_id")
@database_id.setter
def database_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_id", value)
@property
@pulumi.getter(name="databaseSoftwareImageId")
def database_software_image_id(self) -> Optional[pulumi.Input[str]]:
"""
The database software image [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)
"""
return pulumi.get(self, "database_software_image_id")
@database_software_image_id.setter
def database_software_image_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database_software_image_id", value)
@property
@pulumi.getter(name="dbBackupConfig")
def db_backup_config(self) -> Optional[pulumi.Input['DbHomeDatabaseDbBackupConfigArgs']]:
"""
(Updatable) Backup Options To use any of the API operations, you must be authorized in an IAM policy. If you're not authorized, talk to an administrator. If you're an administrator who needs to write policies to give users access, see [Getting Started with Policies](https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/policygetstarted.htm).
"""
return pulumi.get(self, "db_backup_config")
@db_backup_config.setter
def db_backup_config(self, value: Optional[pulumi.Input['DbHomeDatabaseDbBackupConfigArgs']]):
pulumi.set(self, "db_backup_config", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
The display name of the database to be created from the backup. It must begin with an alphabetic character and can contain a maximum of eight alphanumeric characters. Special characters are not permitted.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="dbUniqueName")
def db_unique_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "db_unique_name")
@db_unique_name.setter
def db_unique_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_unique_name", value)
@property
@pulumi.getter(name="dbWorkload")
def db_workload(self) -> Optional[pulumi.Input[str]]:
"""
The database workload type.
"""
return pulumi.get(self, "db_workload")
@db_workload.setter
def db_workload(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_workload", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm).
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the backup destination.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
Additional information about the current lifecycle state.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter(name="ncharacterSet")
def ncharacter_set(self) -> Optional[pulumi.Input[str]]:
"""
The national character set for the database. The default is AL16UTF16. Allowed values are: AL16UTF16 or UTF8.
"""
return pulumi.get(self, "ncharacter_set")
@ncharacter_set.setter
def ncharacter_set(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ncharacter_set", value)
@property
@pulumi.getter(name="oneOffPatches")
def one_off_patches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of one-off patches for Database Homes.
"""
return pulumi.get(self, "one_off_patches")
@one_off_patches.setter
def one_off_patches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "one_off_patches", value)
@property
@pulumi.getter(name="pdbName")
def pdb_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the pluggable database. The name must begin with an alphabetic character and can contain a maximum of thirty alphanumeric characters. Special characters are not permitted. Pluggable database should not be same as database name.
"""
return pulumi.get(self, "pdb_name")
@pdb_name.setter
def pdb_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pdb_name", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
The current state of the Database Home.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="tdeWalletPassword")
def tde_wallet_password(self) -> Optional[pulumi.Input[str]]:
"""
The optional password to open the TDE wallet. The password must be at least nine characters and contain at least two uppercase, two lowercase, two numeric, and two special characters. The special characters must be _, \#, or -.
"""
return pulumi.get(self, "tde_wallet_password")
@tde_wallet_password.setter
def tde_wallet_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tde_wallet_password", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the Database Home was created.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeStampForPointInTimeRecovery")
def time_stamp_for_point_in_time_recovery(self) -> Optional[pulumi.Input[str]]:
"""
The point in time of the original database from which the new database is created. If not specifed, the latest backup is used to create the database.
"""
return pulumi.get(self, "time_stamp_for_point_in_time_recovery")
@time_stamp_for_point_in_time_recovery.setter
def time_stamp_for_point_in_time_recovery(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_stamp_for_point_in_time_recovery", value)
@pulumi.input_type
class DbHomeDatabaseConnectionStringArgs:
def __init__(__self__, *,
all_connection_strings: Optional[pulumi.Input[Mapping[str, Any]]] = None,
cdb_default: Optional[pulumi.Input[str]] = None,
cdb_ip_default: Optional[pulumi.Input[str]] = None):
if all_connection_strings is not None:
pulumi.set(__self__, "all_connection_strings", all_connection_strings)
if cdb_default is not None:
pulumi.set(__self__, "cdb_default", cdb_default)
if cdb_ip_default is not None:
pulumi.set(__self__, "cdb_ip_default", cdb_ip_default)
@property
@pulumi.getter(name="allConnectionStrings")
def all_connection_strings(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
return pulumi.get(self, "all_connection_strings")
@all_connection_strings.setter
def all_connection_strings(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "all_connection_strings", value)
@property
@pulumi.getter(name="cdbDefault")
def cdb_default(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cdb_default")
@cdb_default.setter
def cdb_default(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cdb_default", value)
@property
@pulumi.getter(name="cdbIpDefault")
def cdb_ip_default(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cdb_ip_default")
@cdb_ip_default.setter
def cdb_ip_default(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cdb_ip_default", value)
@pulumi.input_type
class DbHomeDatabaseDbBackupConfigArgs:
def __init__(__self__, *,
auto_backup_enabled: Optional[pulumi.Input[bool]] = None,
auto_backup_window: Optional[pulumi.Input[str]] = None,
backup_destination_details: Optional[pulumi.Input[Sequence[pulumi.Input['DbHomeDatabaseDbBackupConfigBackupDestinationDetailArgs']]]] = None,
recovery_window_in_days: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[bool] auto_backup_enabled: (Updatable) If set to true, configures automatic backups. If you previously used RMAN or dbcli to configure backups and then you switch to using the Console or the API for backups, a new backup configuration is created and associated with your database. This means that you can no longer rely on your previously configured unmanaged backups to work.
:param pulumi.Input[str] auto_backup_window: (Updatable) Time window selected for initiating automatic backup for the database system. There are twelve available two-hour time windows. If no option is selected, a start time between 12:00 AM to 7:00 AM in the region of the database is automatically chosen. For example, if the user selects SLOT_TWO from the enum list, the | |
[]
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSSD.append({S[i], C[j], S[k], S[l], D[m]})
STRAIGHT_SCSSD.append({S[9], C[10], S[11], S[12], D[0]})
STRAIGHT_SCSCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSCS.append({S[i], C[j], S[k], C[l], S[m]})
STRAIGHT_SCSCS.append({S[9], C[10], S[11], C[12], S[0]})
STRAIGHT_SCSCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSCC.append({S[i], C[j], S[k], C[l], C[m]})
STRAIGHT_SCSCC.append({S[9], C[10], S[11], C[12], C[0]})
STRAIGHT_SCSCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSCH.append({S[i], C[j], S[k], C[l], H[m]})
STRAIGHT_SCSCH.append({S[9], C[10], S[11], C[12], H[0]})
STRAIGHT_SCSCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSCD.append({S[i], C[j], S[k], C[l], D[m]})
STRAIGHT_SCSCD.append({S[9], C[10], S[11], C[12], D[0]})
STRAIGHT_SCSHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSHS.append({S[i], C[j], S[k], H[l], S[m]})
STRAIGHT_SCSHS.append({S[9], C[10], S[11], H[12], S[0]})
STRAIGHT_SCSHC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSHC.append({S[i], C[j], S[k], H[l], C[m]})
STRAIGHT_SCSHC.append({S[9], C[10], S[11], H[12], C[0]})
STRAIGHT_SCSHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSHH.append({S[i], C[j], S[k], H[l], H[m]})
STRAIGHT_SCSHH.append({S[9], C[10], S[11], H[12], H[0]})
STRAIGHT_SCSHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSHD.append({S[i], C[j], S[k], H[l], D[m]})
STRAIGHT_SCSHD.append({S[9], C[10], S[11], H[12], D[0]})
STRAIGHT_SCSDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSDS.append({S[i], C[j], S[k], D[l], S[m]})
STRAIGHT_SCSDS.append({S[9], C[10], S[11], D[12], S[0]})
STRAIGHT_SCSDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSDC.append({S[i], C[j], S[k], D[l], C[m]})
STRAIGHT_SCSDC.append({S[9], C[10], S[11], D[12], C[0]})
STRAIGHT_SCSDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSDH.append({S[i], C[j], S[k], D[l], H[m]})
STRAIGHT_SCSDH.append({S[9], C[10], S[11], D[12], H[0]})
STRAIGHT_SCSDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCSDD.append({S[i], C[j], S[k], D[l], D[m]})
STRAIGHT_SCSDD.append({S[9], C[10], S[11], D[12], D[0]})
STRAIGHT_SCCSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCSS.append({S[i], C[j], C[k], S[l], S[m]})
STRAIGHT_SCCSS.append({S[9], C[10], C[11], S[12], S[0]})
STRAIGHT_SCCSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCSC.append({S[i], C[j], C[k], S[l], C[m]})
STRAIGHT_SCCSC.append({S[9], C[10], C[11], S[12], C[0]})
STRAIGHT_SCCSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCSH.append({S[i], C[j], C[k], S[l], H[m]})
STRAIGHT_SCCSH.append({S[9], C[10], C[11], S[12], H[0]})
STRAIGHT_SCCSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCSD.append({S[i], C[j], C[k], S[l], D[m]})
STRAIGHT_SCCSD.append({S[9], C[10], C[11], S[12], D[0]})
STRAIGHT_SCCCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCCS.append({S[i], C[j], C[k], C[l], S[m]})
STRAIGHT_SCCCS.append({S[9], C[10], C[11], C[12], S[0]})
STRAIGHT_SCCCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCCC.append({S[i], C[j], C[k], C[l], C[m]})
STRAIGHT_SCCCC.append({S[9], C[10], C[11], C[12], C[0]})
STRAIGHT_SCCCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCCH.append({S[i], C[j], C[k], C[l], H[m]})
STRAIGHT_SCCCH.append({S[9], C[10], C[11], C[12], H[0]})
STRAIGHT_SCCCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCCD.append({S[i], C[j], C[k], C[l], D[m]})
STRAIGHT_SCCCD.append({S[9], C[10], C[11], C[12], D[0]})
STRAIGHT_SCCHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SCCHS.append({S[i], C[j], C[k], H[l], S[m]})
STRAIGHT_SCCHS.append({S[9], C[10], C[11], H[12], S[0]})
STRAIGHT_SCCHC = []
for i in range(13):
| |
'short-hm': 'C2cb', 'is_reference': False},
),
'C c 2 a' : (
(('C', 'italic'),
('c', 'italic'),
('2', 'regular'),
('a', 'italic'),
), {'itnumber': 41, 'crystal_system': 'orthorhombic', 'short-hm': 'Cc2a', 'is_reference': False},
),
'A c 2 a' : (
(('A', 'italic'),
('c', 'italic'),
('2', 'regular'),
('a', 'italic'),
), {'itnumber': 41, 'crystal_system': 'orthorhombic', 'short-hm': 'Ac2a', 'is_reference': False},
),
'F m m 2' : (
(('F', 'italic'),
('m', 'italic'),
('m', 'italic'),
('2', 'regular'),
), {'itnumber': 42, 'crystal_system': 'orthorhombic', 'short-hm': 'Fmm2', 'is_reference': True},
),
'F 2 m m' : (
(('F', 'italic'),
('2', 'regular'),
('m', 'italic'),
('m', 'italic'),
), {'itnumber': 42, 'crystal_system': 'orthorhombic', 'short-hm': 'F2mm', 'is_reference': False},
),
'F m 2 m' : (
(('F', 'italic'),
('m', 'italic'),
('2', 'regular'),
('m', 'italic'),
), {'itnumber': 42, 'crystal_system': 'orthorhombic', 'short-hm': 'Fm2m', 'is_reference': False},
),
'F d d 2' : (
(('F', 'italic'),
('d', 'italic'),
('d', 'italic'),
('2', 'regular'),
), {'itnumber': 43, 'crystal_system': 'orthorhombic', 'short-hm': 'Fdd2', 'is_reference': True},
),
'F 2 d d' : (
(('F', 'italic'),
('2', 'regular'),
('d', 'italic'),
('d', 'italic'),
), {'itnumber': 43, 'crystal_system': 'orthorhombic', 'short-hm': 'F2dd', 'is_reference': False},
),
'F d 2 d' : (
(('F', 'italic'),
('d', 'italic'),
('2', 'regular'),
('d', 'italic'),
), {'itnumber': 43, 'crystal_system': 'orthorhombic', 'short-hm': 'Fd2d', 'is_reference': False},
),
'I m m 2' : (
(('I', 'italic'),
('m', 'italic'),
('m', 'italic'),
('2', 'regular'),
), {'itnumber': 44, 'crystal_system': 'orthorhombic', 'short-hm': 'Imm2', 'is_reference': True},
),
'I 2 m m' : (
(('I', 'italic'),
('2', 'regular'),
('m', 'italic'),
('m', 'italic'),
), {'itnumber': 44, 'crystal_system': 'orthorhombic', 'short-hm': 'I2mm', 'is_reference': False},
),
'I m 2 m' : (
(('I', 'italic'),
('m', 'italic'),
('2', 'regular'),
('m', 'italic'),
), {'itnumber': 44, 'crystal_system': 'orthorhombic', 'short-hm': 'Im2m', 'is_reference': False},
),
'I b a 2' : (
(('I', 'italic'),
('b', 'italic'),
('a', 'italic'),
('2', 'regular'),
), {'itnumber': 45, 'crystal_system': 'orthorhombic', 'short-hm': 'Iba2', 'is_reference': True},
),
'I 2 c b' : (
(('I', 'italic'),
('2', 'regular'),
('c', 'italic'),
('b', 'italic'),
), {'itnumber': 45, 'crystal_system': 'orthorhombic', 'short-hm': 'I2cb', 'is_reference': False},
),
'I c 2 a' : (
(('I', 'italic'),
('c', 'italic'),
('2', 'regular'),
('a', 'italic'),
), {'itnumber': 45, 'crystal_system': 'orthorhombic', 'short-hm': 'Ic2a', 'is_reference': False},
),
'I m a 2' : (
(('I', 'italic'),
('m', 'italic'),
('a', 'italic'),
('2', 'regular'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'Ima2', 'is_reference': True},
),
'I b m 2' : (
(('I', 'italic'),
('b', 'italic'),
('m', 'italic'),
('2', 'regular'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'Ibm2', 'is_reference': False},
),
'I 2 m b' : (
(('I', 'italic'),
('2', 'regular'),
('m', 'italic'),
('b', 'italic'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'I2mb', 'is_reference': False},
),
'I 2 c m' : (
(('I', 'italic'),
('2', 'regular'),
('c', 'italic'),
('m', 'italic'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'I2cm', 'is_reference': False},
),
'I c 2 m' : (
(('I', 'italic'),
('c', 'italic'),
('2', 'regular'),
('m', 'italic'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'Ic2m', 'is_reference': False},
),
'I m 2 a' : (
(('I', 'italic'),
('m', 'italic'),
('2', 'regular'),
('a', 'italic'),
), {'itnumber': 46, 'crystal_system': 'orthorhombic', 'short-hm': 'Im2a', 'is_reference': False},
),
'P m m m' : (
(('P', 'italic'),
('m', 'italic'),
('m', 'italic'),
('m', 'italic'),
), {'itnumber': 47, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmmm', 'is_reference': True},
),
'P n n n:1' : (
(('P', 'italic'),
('n', 'italic'),
('n', 'italic'),
('n', 'italic'),
), {'itnumber': 48, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnnn', 'is_reference': False},
),
'P n n n:2' : (
(('P', 'italic'),
('n', 'italic'),
('n', 'italic'),
('n', 'italic'),
), {'itnumber': 48, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnnn', 'is_reference': True},
),
'P c c m' : (
(('P', 'italic'),
('c', 'italic'),
('c', 'italic'),
('m', 'italic'),
), {'itnumber': 49, 'crystal_system': 'orthorhombic', 'short-hm': 'Pccm', 'is_reference': True},
),
'P m a a' : (
(('P', 'italic'),
('m', 'italic'),
('a', 'italic'),
('a', 'italic'),
), {'itnumber': 49, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmaa', 'is_reference': False},
),
'P b m b' : (
(('P', 'italic'),
('b', 'italic'),
('m', 'italic'),
('b', 'italic'),
), {'itnumber': 49, 'crystal_system': 'orthorhombic', 'short-hm': 'Pbmb', 'is_reference': False},
),
'P b a n:1' : (
(('P', 'italic'),
('b', 'italic'),
('a', 'italic'),
('n', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pban', 'is_reference': False},
),
'P b a n:2' : (
(('P', 'italic'),
('b', 'italic'),
('a', 'italic'),
('n', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pban', 'is_reference': True},
),
'P n c b:1' : (
(('P', 'italic'),
('n', 'italic'),
('c', 'italic'),
('b', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pncb', 'is_reference': False},
),
'P n c b:2' : (
(('P', 'italic'),
('n', 'italic'),
('c', 'italic'),
('b', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pncb', 'is_reference': False},
),
'P c n a:1' : (
(('P', 'italic'),
('c', 'italic'),
('n', 'italic'),
('a', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcna', 'is_reference': False},
),
'P c n a:2' : (
(('P', 'italic'),
('c', 'italic'),
('n', 'italic'),
('a', 'italic'),
), {'itnumber': 50, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcna', 'is_reference': False},
),
'P m m a' : (
(('P', 'italic'),
('m', 'italic'),
('m', 'italic'),
('a', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmma', 'is_reference': True},
),
'P m m b' : (
(('P', 'italic'),
('m', 'italic'),
('m', 'italic'),
('b', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmmb', 'is_reference': False},
),
'P b m m' : (
(('P', 'italic'),
('b', 'italic'),
('m', 'italic'),
('m', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pbmm', 'is_reference': False},
),
'P c m m' : (
(('P', 'italic'),
('c', 'italic'),
('m', 'italic'),
('m', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcmm', 'is_reference': False},
),
'P m c m' : (
(('P', 'italic'),
('m', 'italic'),
('c', 'italic'),
('m', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmcm', 'is_reference': False},
),
'P m a m' : (
(('P', 'italic'),
('m', 'italic'),
('a', 'italic'),
('m', 'italic'),
), {'itnumber': 51, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmam', 'is_reference': False},
),
'P n n a' : (
(('P', 'italic'),
('n', 'italic'),
('n', 'italic'),
('a', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnna', 'is_reference': True},
),
'P n n b' : (
(('P', 'italic'),
('n', 'italic'),
('n', 'italic'),
('b', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnnb', 'is_reference': False},
),
'P b n n' : (
(('P', 'italic'),
('b', 'italic'),
('n', 'italic'),
('n', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pbnn', 'is_reference': False},
),
'P c n n' : (
(('P', 'italic'),
('c', 'italic'),
('n', 'italic'),
('n', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcnn', 'is_reference': False},
),
'P n c n' : (
(('P', 'italic'),
('n', 'italic'),
('c', 'italic'),
('n', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pncn', 'is_reference': False},
),
'P n a n' : (
(('P', 'italic'),
('n', 'italic'),
('a', 'italic'),
('n', 'italic'),
), {'itnumber': 52, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnan', 'is_reference': False},
),
'P m n a' : (
(('P', 'italic'),
('m', 'italic'),
('n', 'italic'),
('a', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pmna', 'is_reference': True},
),
'P n m b' : (
(('P', 'italic'),
('n', 'italic'),
('m', 'italic'),
('b', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pnmb', 'is_reference': False},
),
'P b m n' : (
(('P', 'italic'),
('b', 'italic'),
('m', 'italic'),
('n', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pbmn', 'is_reference': False},
),
'P c n m' : (
(('P', 'italic'),
('c', 'italic'),
('n', 'italic'),
('m', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcnm', 'is_reference': False},
),
'P n c m' : (
(('P', 'italic'),
('n', 'italic'),
('c', 'italic'),
('m', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pncm', 'is_reference': False},
),
'P m a n' : (
(('P', 'italic'),
('m', 'italic'),
('a', 'italic'),
('n', 'italic'),
), {'itnumber': 53, 'crystal_system': 'orthorhombic', 'short-hm': 'Pman', 'is_reference': False},
),
'P c c a' : (
(('P', 'italic'),
('c', 'italic'),
('c', 'italic'),
('a', 'italic'),
), {'itnumber': 54, 'crystal_system': 'orthorhombic', 'short-hm': 'Pcca', 'is_reference': True},
),
'P c c b' : (
(('P', 'italic'),
('c', 'italic'),
('c', 'italic'),
('b', 'italic'),
), {'itnumber': 54, 'crystal_system': 'orthorhombic', 'short-hm': 'Pccb', 'is_reference': False},
),
'P b a a' : (
(('P', 'italic'),
('b', 'italic'),
('a', 'italic'),
('a', 'italic'),
), {'itnumber': 54, 'crystal_system': 'orthorhombic', 'short-hm': 'Pbaa', 'is_reference': False},
),
'P c a | |
import warnings
import numpy as np
from joblib import Parallel, delayed
from scipy.stats.distributions import chi2
from scipy.stats.stats import _contains_nan
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def contains_nan(a): # from scipy
"""Check if inputs contains NaNs"""
return _contains_nan(a, nan_policy="raise")
def check_ndarray_xy(x, y):
"""Check if x or y is an ndarray"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise TypeError("x and y must be ndarrays")
def convert_xy_float64(x, y):
"""Convert x or y to np.float64 (if not already done)"""
# convert x and y to floats
x = np.asarray(x).astype(np.float64)
y = np.asarray(y).astype(np.float64)
return x, y
def check_reps(reps):
"""Check if reps is valid"""
# check if reps is an integer > than 0
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
# check if reps is under 1000 (recommended)
elif reps < 1000:
msg = (
"The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!"
)
warnings.warn(msg, RuntimeWarning)
def _check_distmat(x, y):
"""Check if x and y are distance matrices."""
if (
not np.allclose(x, x.T)
or not np.allclose(y, y.T)
or not np.all((x.diagonal() == 0))
or not np.all((y.diagonal() == 0))
):
raise ValueError(
"x and y must be distance matrices, {is_sym} symmetric and "
"{zero_diag} zeros along the diagonal".format(
is_sym="x is not"
if not np.array_equal(x, x.T)
else "y is not"
if not np.array_equal(y, y.T)
else "both are",
zero_diag="x doesn't have"
if not np.all((x.diagonal() == 0))
else "y doesn't have"
if not np.all((y.diagonal() == 0))
else "both have",
)
)
def _check_kernmat(x, y):
"""Check if x and y are similarity matrices."""
if (
not np.allclose(x, x.T)
or not np.allclose(y, y.T)
or not np.all((x.diagonal() == 1))
or not np.all((y.diagonal() == 1))
):
raise ValueError(
"x and y must be kernel similarity matrices, "
"{is_sym} symmetric and {one_diag} "
"ones along the diagonal".format(
is_sym="x is not"
if not np.array_equal(x, x.T)
else "y is not"
if not np.array_equal(y, y.T)
else "both are",
one_diag="x doesn't have"
if not np.all((x.diagonal() == 1))
else "y doesn't have"
if not np.all((y.diagonal() == 1))
else "both have",
)
)
def compute_kern(x, y, metric="gaussian", workers=1, **kwargs):
"""
Kernel similarity matrices for the inputs.
Parameters
----------
x,y : ndarray
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions. Alternatively, ``x`` and ``y`` can be kernel similarity matrices,
where the shapes must both be ``(n, n)``.
metric : str, callable, or None, default: "gaussian"
A function that computes the kernel similarity among the samples within each
data matrix.
Valid strings for ``metric`` are, as defined in
:func:`sklearn.metrics.pairwise.pairwise_kernels`,
[``"additive_chi2"``, ``"chi2"``, ``"linear"``, ``"poly"``,
``"polynomial"``, ``"rbf"``,
``"laplacian"``, ``"sigmoid"``, ``"cosine"``]
Note ``"rbf"`` and ``"gaussian"`` are the same metric.
Set to ``None`` or ``"precomputed"`` if ``x`` and ``y`` are already similarity
matrices. To call a custom function, either create the similarity matrix
before-hand or create a function of the form :func:`metric(x, **kwargs)`
where ``x`` is the data matrix for which pairwise kernel similarity matrices are
calculated and kwargs are extra arguements to send to your custom function.
workers : int, default: 1
The number of cores to parallelize the p-value computation over.
Supply ``-1`` to use all cores available to the Process.
**kwargs
Arbitrary keyword arguments provided to
:func:`sklearn.metrics.pairwise.pairwise_kernels`
or a custom kernel function.
Returns
-------
simx, simy : ndarray
Similarity matrices based on the metric provided by the user.
"""
if not metric:
metric = "precomputed"
if metric in ["gaussian", "rbf"]:
if "gamma" not in kwargs:
l2 = pairwise_distances(x, metric="l2", n_jobs=workers)
n = l2.shape[0]
# compute median of off diagonal elements
med = np.median(
np.lib.stride_tricks.as_strided(
l2, (n - 1, n + 1), (l2.itemsize * (n + 1), l2.itemsize)
)[:, 1:]
)
# prevents division by zero when used on label vectors
med = med if med else 1
kwargs["gamma"] = 1.0 / (2 * (med ** 2))
metric = "rbf"
if callable(metric):
simx = metric(x, **kwargs)
simy = metric(y, **kwargs)
_check_kernmat(
simx, simy
) # verify whether matrix is correct, built into sklearn func
else:
simx = pairwise_kernels(x, metric=metric, n_jobs=workers, **kwargs)
simy = pairwise_kernels(y, metric=metric, n_jobs=workers, **kwargs)
return simx, simy
def compute_dist(x, y, metric="euclidean", workers=1, **kwargs):
"""
Distance matrices for the inputs.
Parameters
----------
x,y : ndarray
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions. Alternatively, ``x`` and ``y`` can be distance matrices,
where the shapes must both be ``(n, n)``.
metric : str, callable, or None, default: "euclidean"
A function that computes the distance among the samples within each
data matrix.
Valid strings for ``metric`` are, as defined in
:func:`sklearn.metrics.pairwise_distances`,
- From scikit-learn: [``"euclidean"``, ``"cityblock"``, ``"cosine"``,
``"l1"``, ``"l2"``, ``"manhattan"``] See the documentation for
:mod:`scipy.spatial.distance` for details
on these metrics.
- From scipy.spatial.distance: [``"braycurtis"``, ``"canberra"``,
``"chebyshev"``, ``"correlation"``, ``"dice"``, ``"hamming"``,
``"jaccard"``, ``"kulsinski"``, ``"mahalanobis"``, ``"minkowski"``,
``"rogerstanimoto"``, ``"russellrao"``, ``"seuclidean"``,
``"sokalmichener"``, ``"sokalsneath"``, ``"sqeuclidean"``,
``"yule"``] See the documentation for :mod:`scipy.spatial.distance` for
details on these metrics.
Set to ``None`` or ``"precomputed"`` if ``x`` and ``y`` are already distance
matrices. To call a custom function, either create the distance matrix
before-hand or create a function of the form ``metric(x, **kwargs)``
where ``x`` is the data matrix for which pairwise distances are
calculated and ``**kwargs`` are extra arguements to send to your custom
function.
workers : int, default: 1
The number of cores to parallelize the p-value computation over.
Supply ``-1`` to use all cores available to the Process.
**kwargs
Arbitrary keyword arguments provided to
:func:`sklearn.metrics.pairwise_distances` or a
custom distance function.
Returns
-------
distx, disty : ndarray
Distance matrices based on the metric provided by the user.
"""
if not metric:
metric = "precomputed"
if callable(metric):
distx = metric(x, **kwargs)
disty = metric(y, **kwargs)
_check_distmat(
distx, disty
) # verify whether matrix is correct, built into sklearn func
else:
distx = pairwise_distances(x, metric=metric, n_jobs=workers, **kwargs)
disty = pairwise_distances(y, metric=metric, n_jobs=workers, **kwargs)
return distx, disty
def check_perm_blocks(perm_blocks):
# Checks generic properties of perm_blocks
if perm_blocks is None:
return None
elif isinstance(perm_blocks, list):
perm_blocks = np.asarray(perm_blocks)
elif not isinstance(perm_blocks, np.ndarray):
raise TypeError("perm_blocks must be an ndarray or list")
if perm_blocks.ndim == 1:
perm_blocks = perm_blocks[:, np.newaxis]
elif perm_blocks.ndim > 2:
raise ValueError("perm_blocks must be of at most dimension 2")
return perm_blocks
def check_perm_blocks_dim(perm_blocks, y):
if not perm_blocks.shape[0] == y.shape[0]:
raise ValueError("perm_bocks first dimension must be same length as y")
def check_perm_block(perm_block):
# checks a hierarchy level of perm_blocks for proper exchangeability
if not isinstance(perm_block[0], int):
unique, perm_blocks, counts = np.unique(
perm_block, return_counts=True, return_inverse=True
)
pos_counts = counts
else:
unique, counts = np.unique(perm_block, return_counts=True)
pos_counts = [c for c, u in zip(counts, unique) if u >= 0]
if len(set(pos_counts)) > 1:
raise ValueError(
f"Exchangeable hiearchy has groups with {min(pos_counts)} to \
{max(pos_counts)} elements"
)
return perm_block
class _PermNode(object):
"""Helper class for nodes in _PermTree."""
def __init__(self, parent, label=None, index=None):
self.children = []
self.parent = parent
self.label = label
self.index = index
def get_leaf_indices(self):
if len(self.children) == 0:
return [self.index]
else:
indices = []
for child in self.children:
indices += child.get_leaf_indices()
return indices
def add_child(self, child):
self.children.append(child)
def get_children(self):
return self.children
class _PermTree(object):
"""Tree representation of dependencies for restricted permutations"""
def __init__(self, perm_blocks):
perm_blocks = check_perm_blocks(perm_blocks)
self.root = _PermNode(None)
self._add_levels(self.root, perm_blocks, np.arange(perm_blocks.shape[0]))
indices = self.root.get_leaf_indices()
self._index_order = np.argsort(indices)
def _add_levels(self, root: _PermNode, perm_blocks, indices):
# Add new child node for each unique label, then recurse or end
if perm_blocks.shape[1] == 0:
for idx in indices:
child_node = _PermNode(parent=root, label=1, index=idx)
root.add_child(child_node)
else:
perm_block = check_perm_block(perm_blocks[:, 0])
for label in np.unique(perm_block):
idxs = np.where(perm_block == label)[0]
child_node = _PermNode(parent=root, label=label)
root.add_child(child_node)
self._add_levels(child_node, perm_blocks[idxs, 1:], indices[idxs])
def _permute_level(self, node):
if len(node.get_children()) == 0:
return [node.index]
| |
import os
import sys
import pickle
import signal
import argparse
import traceback
import torch
import numpy as np
import embedding.factory as ebd
import classifier.factory as clf
import dataset.loader as loader
import train.factory as train_utils
def parse_args():
parser = argparse.ArgumentParser(
description="Few Shot Text Classification with Distributional Signatures")
# data configuration
parser.add_argument("--data_path", type=str,
default="data/reuters.json",
help="path to dataset")
parser.add_argument(
"--DA_path",
type=str,
default="",
help="Data augmentation file. This argument is for elong_aug and shot_aug.",
)
parser.add_argument(
"--elongation",
action="store_true",
default=False,
help="Add DA sentence behind each sentence.",
)
parser.add_argument(
"--aug_mode",
choices=["elongation", "shot", "task", "mix"],
help='Choice for data augmentation method.',
)
parser.add_argument(
"--task_aug_target",
choices=["train", "train_val", "val"],
help='Task augmentation on meta-training classes.',
default="train",
)
parser.add_argument(
"--task_aug_test",
action="store_true",
help="Augment test classes during task augmentation.",
default=False,
)
parser.add_argument(
"--task_aug_exclude_test_query",
action="store_true",
default=False,
)
parser.add_argument(
"--task_aug_exclude_val_query",
action="store_true",
default=False,
)
parser.add_argument(
"--test_new_only",
action="store_true",
help="Task augmentation on test classes but remove the old classes.",
default=False,
)
parser.add_argument(
"--test_DA",
action="store_true",
help="DA on test data. This argument is for elong_aug and shot_aug.",
default=False,
)
parser.add_argument(
"--use_support_DA",
action="store_true",
help="DA support sets. This argument is for elong_aug and shot_aug.",
default=False,
)
parser.add_argument(
"--use_query_DA",
action="store_true",
help="DA query sets. This argument is for elong_aug and shot_aug.",
default=False,
)
parser.add_argument(
"--DA_vocab",
type=str,
choices=["", "use_old", "use_DA"],
help="Determine which vocab used for DA sentences. This argument is for elong_aug and shot_aug.",
default="use_old",
)
parser.add_argument(
"--fix_conflicts",
action="store_true",
help="Fix conflicts of classes during task augmentation.",
default=False,
)
parser.add_argument("--dataset", type=str, default="reuters",
help="name of the dataset. "
"Options: [20newsgroup, amazon, huffpost, "
"reuters, rcv1, fewrel]")
parser.add_argument("--n_train_class", type=int, default=15,
help="number of meta-train classes")
parser.add_argument("--n_val_class", type=int, default=5,
help="number of meta-val classes")
parser.add_argument("--n_test_class", type=int, default=11,
help="number of meta-test classes")
# load bert embeddings for sent-level datasets (optional)
parser.add_argument("--n_workers", type=int, default=10,
help="Num. of cores used for loading data. Set this "
"to zero if you want to use all the cpus.")
parser.add_argument("--bert", default=False, action="store_true",
help=("set true if use bert embeddings "
"(only available for sent-level datasets: "
"huffpost, fewrel"))
parser.add_argument("--bert_cache_dir", default=None, type=str,
help=("path to the cache_dir of transformers"))
parser.add_argument("--pretrained_bert", default=None, type=str,
help=("path to the pre-trained bert embeddings."))
# task configuration
parser.add_argument("--way", type=int, default=5,
help="#classes for each task")
parser.add_argument("--shot", type=int, default=5,
help="#support examples for each class for each task")
parser.add_argument("--query", type=int, default=25,
help="#query examples for each class for each task")
# train/test configuration
parser.add_argument("--train_epochs", type=int, default=1000,
help="max num of training epochs")
parser.add_argument("--train_episodes", type=int, default=100,
help="#tasks sampled during each training epoch")
parser.add_argument("--val_episodes", type=int, default=100,
help="#asks sampled during each validation epoch")
parser.add_argument("--test_episodes", type=int, default=1000,
help="#tasks sampled during each testing epoch")
parser.add_argument("--test_query_size", type=int, default=-1,
help="#query examples for each class for each task")
# settings for finetuning baseline
parser.add_argument("--finetune_loss_type", type=str, default="softmax",
help="type of loss for finetune top layer"
"options: [softmax, dist]")
parser.add_argument("--finetune_maxepochs", type=int, default=5000,
help="number epochs to finetune each task for (inner loop)")
parser.add_argument("--finetune_episodes", type=int, default=10,
help="number tasks to finetune for (outer loop)")
parser.add_argument("--finetune_split", default=0.8, type=float,
help="percent of train data to allocate for val"
"when mode is finetune")
# model options
parser.add_argument("--embedding", type=str, default="avg",
help=("document embedding method. Options: "
"[avg, tfidf, meta, oracle, cnn]"))
parser.add_argument("--classifier", type=str, default="nn",
help=("classifier. Options: [nn, proto, r2d2, mlp]"))
parser.add_argument("--auxiliary", type=str, nargs="*", default=[],
help=("auxiliary embeddings (used for fewrel). "
"Options: [pos, ent]"))
# cnn configuration
parser.add_argument("--cnn_num_filters", type=int, default=50,
help="Num of filters per filter size [default: 50]")
parser.add_argument("--cnn_filter_sizes", type=int, nargs="+",
default=[3, 4, 5],
help="Filter sizes [default: 3]")
# nn configuration
parser.add_argument("--nn_distance", type=str, default="l2",
help=("distance for nearest neighbour. "
"Options: l2, cos [default: l2]"))
# proto configuration
parser.add_argument("--proto_hidden", nargs="+", type=int,
default=[300, 300],
help=("hidden dimension of the proto-net"))
# maml configuration
parser.add_argument("--maml", action="store_true", default=False,
help=("Use maml or not. "
"Note: maml has to be used with classifier=mlp"))
parser.add_argument("--mlp_hidden", nargs="+", type=int, default=[300, 5],
help=("hidden dimension of the proto-net"))
parser.add_argument("--maml_innersteps", type=int, default=10)
parser.add_argument("--maml_batchsize", type=int, default=10)
parser.add_argument("--maml_stepsize", type=float, default=1e-1)
parser.add_argument("--maml_firstorder", action="store_true", default=False,
help="truncate higher order gradient")
# lrd2 configuration
parser.add_argument("--lrd2_num_iters", type=int, default=5,
help=("num of Newton steps for LRD2"))
# induction networks configuration
parser.add_argument("--induct_rnn_dim", type=int, default=128,
help=("Uni LSTM dim of induction network's encoder"))
parser.add_argument("--induct_hidden_dim", type=int, default=100,
help=("tensor layer dim of induction network's relation"))
parser.add_argument("--induct_iter", type=int, default=3,
help=("num of routings"))
parser.add_argument("--induct_att_dim", type=int, default=64,
help=("attention projection dim of induction network"))
# aux ebd configuration (for fewrel)
parser.add_argument("--pos_ebd_dim", type=int, default=5,
help="Size of position embedding")
parser.add_argument("--pos_max_len", type=int, default=40,
help="Maximum sentence length for position embedding")
# base word embedding
parser.add_argument("--wv_path", type=str,
default="./",
help="path to word vector cache")
parser.add_argument("--word_vector", type=str, default="wiki.en.vec",
help=("Name of pretrained word embeddings."))
parser.add_argument("--finetune_ebd", action="store_true", default=False,
help=("Finetune embedding during meta-training"))
# options for the distributional signatures
parser.add_argument("--meta_idf", action="store_true", default=False,
help="use idf")
parser.add_argument("--meta_iwf", action="store_true", default=False,
help="use iwf")
parser.add_argument("--meta_w_target", action="store_true", default=False,
help="use target importance score")
parser.add_argument("--meta_w_target_lam", type=float, default=1,
help="lambda for computing w_target")
parser.add_argument("--meta_target_entropy", action="store_true", default=False,
help="use inverse entropy to model task-specific importance")
parser.add_argument("--meta_ebd", action="store_true", default=False,
help="use word embedding into the meta model "
"(showing that revealing word identity harm performance)")
# training options
parser.add_argument("--seed", type=int, default=330, help="seed")
parser.add_argument("--dropout", type=float, default=0.1, help="drop rate")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate")
parser.add_argument("--patience", type=int, default=20, help="patience")
parser.add_argument("--clip_grad", type=float, default=None,
help="gradient clipping")
parser.add_argument("--cuda", type=int, default=-1,
help="cuda device, -1 for cpu")
parser.add_argument("--mode", type=str, default="test",
help=("Running mode."
"Options: [train, test, finetune]"
"[Default: test]"))
parser.add_argument("--save", action="store_true", default=False,
help="train the model")
parser.add_argument("--notqdm", action="store_true", default=False,
help="disable tqdm")
parser.add_argument("--result_path", type=str, default="")
parser.add_argument("--snapshot", type=str, default="",
help="path to the pretraiend weights")
return parser.parse_args()
def print_args(args):
"""
Print arguments (only show the relevant arguments)
"""
print("\nParameters:")
for attr, value in sorted(args.__dict__.items()):
if args.embedding != "cnn" and attr[:4] == "cnn_":
continue
if args.classifier != "proto" and attr[:6] == "proto_":
continue
if args.classifier != "nn" and attr[:3] == "nn_":
continue
if args.embedding != "meta" and attr[:5] == "meta_":
continue
if args.embedding != "cnn" and attr[:4] == "cnn_":
continue
if args.classifier != "mlp" and attr[:4] == "mlp_":
continue
if args.classifier != "proto" and attr[:6] == "proto_":
continue
if "pos" not in args.auxiliary and attr[:4] == "pos_":
continue
if not args.maml and attr[:5] == "maml_":
continue
print("\t{}={}".format(attr.upper(), value))
print("""
(Credit: <NAME>) /
_,.------....___,.' ',.-.
,-' _,.--' |
,' _.-' .
/ , ,' `
. / / ``.
| | . \.\\
____ |___._. | __ \ `.
.' `---'' ``'-.--''` \ . \\
. , __ ` | .
`,' ,-'' . \ | L
,' ' _.' -._ / |
,`-. ,'. `--' >. ,' |
. .'\\' `-' __ , ,-. / `.__.- ,'
||:, . ,' ; / / \ ` `. . .'/
j|:D \ `--' ' ,'_ . . `.__, \ , /
/ L:_ | . '' :_; `.'.'
. ''' '''''' V
`. . `. _,.. `
`,_ . . _,-'/ .. `,' __ `
) \`._ ___....----'' ,' .' \ | ' \ .
/ `. '`-.--'' _,' ,' `---' | `./ |
. _ `'''--.._____..--' , ' |
| .' `. `-. /-. / ,
| `._.' `,_ ; / ,' .
.' /| `-. . ,' , ,
'-.__ __ _,',' '`-..___;-...__ ,.'\ ____.___.'
`'^--'..' '-`-^-''-- `-^-'`.'''''''`.,^.`.--' mh
""")
if args.DA_path != '':
print("Now using data augmentation.")
print(f"The vocabulary used: {args.DA_vocab}.")
if args.test_DA:
print("Also augmenting test data.")
def set_seed(seed):
"""
Setting random seeds
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
def main(seed):
args = parse_args()
args.seed = seed
print_args(args)
set_seed(args.seed)
if args.DA_path == '' or args.DA_vocab == 'use_old':
train_data, val_data, test_data, vocab = loader.load_dataset(args)
DA_data = {"train": None, "val": None, "test": None}
if args.DA_path != '':
if not args.use_support_DA and not args.use_query_DA:
raise ValueError(
'DA should be performed for either support or query sets.'
)
if args.DA_vocab == 'use_old':
train_DA, val_DA, test_DA = loader.load_DA_data(args, vocab)
elif args.DA_vocab == 'use_DA':
train_DA, val_DA, test_DA, vocab = loader.load_DA_data(args)
train_data, val_data, test_data = loader.load_dataset(args, vocab)
DA_data = {"train": train_DA, "val": val_DA, "test": test_DA}
if args.aug_mode == 'task' and args.task_aug_exclude_test_query:
# args.val_episodes *= 2
args.test_episodes *= 2
# initialize model
model = {}
model["ebd"] = ebd.get_embedding(vocab, args)
model["clf"] = clf.get_classifier(model["ebd"].ebd_dim, args)
if args.mode == "train":
# train model on train_data, early stopping based on val_data
train_utils.train(train_data, val_data, model, args, DA_data=DA_data)
elif args.mode == "finetune":
# sample an example from each class during training
way = args.way
query = args.query
shot = args.shot
args.query = 1
args.shot= 1
args.way = args.n_train_class
train_utils.train(train_data, val_data, model, | |
m.x304 >= 1.48160454092422)
m.c425 = Constraint(expr= m.x197 + m.x269 + m.x304 >= 0.832909122935104)
m.c426 = Constraint(expr= m.x198 + m.x270 + m.x304 >= 1.16315080980568)
m.c427 = Constraint(expr= m.x199 + m.x271 + m.x304 >= 1.64865862558738)
m.c428 = Constraint(expr= m.x200 + m.x272 + m.x304 >= 0.916290731874155)
m.c429 = Constraint(expr= m.x201 + m.x273 + m.x304 >= 1.48160454092422)
m.c430 = Constraint(expr= m.x202 + m.x274 + m.x304 >= 0.0953101798043249)
m.c431 = Constraint(expr= m.x203 + m.x275 + m.x304 >= 1.50407739677627)
m.c432 = Constraint(expr= m.x204 + m.x276 + m.x304 >= 1.90210752639692)
m.c433 = Constraint(expr= m.x205 + m.x265 + m.x305 >= 0)
m.c434 = Constraint(expr= m.x206 + m.x266 + m.x305 >= 1.84054963339749)
m.c435 = Constraint(expr= m.x207 + m.x267 + m.x305 >= 1.22377543162212)
m.c436 = Constraint(expr= m.x208 + m.x268 + m.x305 >= 1.58923520511658)
m.c437 = Constraint(expr= m.x209 + m.x269 + m.x305 >= 0.993251773010283)
m.c438 = Constraint(expr= m.x210 + m.x270 + m.x305 >= 1.82454929205105)
m.c439 = Constraint(expr= m.x211 + m.x271 + m.x305 >= 1.1314021114911)
m.c440 = Constraint(expr= m.x212 + m.x272 + m.x305 >= 0.182321556793955)
m.c441 = Constraint(expr= m.x213 + m.x273 + m.x305 >= 0.832909122935104)
m.c442 = Constraint(expr= m.x214 + m.x274 + m.x305 >= 1.62924053973028)
m.c443 = Constraint(expr= m.x215 + m.x275 + m.x305 >= 1.30833281965018)
m.c444 = Constraint(expr= m.x216 + m.x276 + m.x305 >= 1.7227665977411)
m.c445 = Constraint(expr= m.x217 + m.x265 + m.x306 >= 1.16315080980568)
m.c446 = Constraint(expr= m.x218 + m.x266 + m.x306 >= 1.09861228866811)
m.c447 = Constraint(expr= m.x219 + m.x267 + m.x306 >= 1.25276296849537)
m.c448 = Constraint(expr= m.x220 + m.x268 + m.x306 >= 1.19392246847243)
m.c449 = Constraint(expr= m.x221 + m.x269 + m.x306 >= 1.02961941718116)
m.c450 = Constraint(expr= m.x222 + m.x270 + m.x306 >= 1.22377543162212)
m.c451 = Constraint(expr= m.x223 + m.x271 + m.x306 >= 1.43508452528932)
m.c452 = Constraint(expr= m.x224 + m.x272 + m.x306 >= 1.06471073699243)
m.c453 = Constraint(expr= m.x225 + m.x273 + m.x306 >= 1.82454929205105)
m.c454 = Constraint(expr= m.x226 + m.x274 + m.x306 >= 0.78845736036427)
m.c455 = Constraint(expr= m.x227 + m.x275 + m.x306 >= 1.75785791755237)
m.c456 = Constraint(expr= m.x228 + m.x276 + m.x306 >= 1.50407739677627)
m.c457 = Constraint(expr= m.x229 + m.x265 + m.x307 >= 0.741937344729377)
m.c458 = Constraint(expr= m.x230 + m.x266 + m.x307 >= 0.916290731874155)
m.c459 = Constraint(expr= m.x231 + m.x267 + m.x307 >= 1.43508452528932)
m.c460 = Constraint(expr= m.x232 + m.x268 + m.x307 >= 1.28093384546206)
m.c461 = Constraint(expr= m.x233 + m.x269 + m.x307 >= 1.30833281965018)
m.c462 = Constraint(expr= m.x234 + m.x270 + m.x307 >= 0.78845736036427)
m.c463 = Constraint(expr= m.x235 + m.x271 + m.x307 >= 1.62924053973028)
m.c464 = Constraint(expr= m.x236 + m.x272 + m.x307 >= -0.916290731874155)
m.c465 = Constraint(expr= m.x237 + m.x273 + m.x307 >= 1.41098697371026)
m.c466 = Constraint(expr= m.x238 + m.x274 + m.x307 >= 0.262364264467491)
m.c467 = Constraint(expr= m.x239 + m.x275 + m.x307 >= 1.88706964903238)
m.c468 = Constraint(expr= m.x240 + m.x276 + m.x307 >= 1.22377543162212)
m.c469 = Constraint(expr= m.x241 + m.x265 + m.x308 >= 1.25276296849537)
m.c470 = Constraint(expr= m.x242 + m.x266 + m.x308 >= 1.41098697371026)
m.c471 = Constraint(expr= m.x243 + m.x267 + m.x308 >= -0.105360515657826)
m.c472 = Constraint(expr= m.x244 + m.x268 + m.x308 >= 0.336472236621213)
m.c473 = Constraint(expr= m.x245 + m.x269 + m.x308 >= 1.28093384546206)
m.c474 = Constraint(expr= m.x246 + m.x270 + m.x308 >= 0.993251773010283)
m.c475 = Constraint(expr= m.x247 + m.x271 + m.x308 >= 1.06471073699243)
m.c476 = Constraint(expr= m.x248 + m.x272 + m.x308 >= 1.30833281965018)
m.c477 = Constraint(expr= m.x249 + m.x273 + m.x308 >= -0.22314355131421)
m.c478 = Constraint(expr= m.x250 + m.x274 + m.x308 >= 0.405465108108164)
m.c479 = Constraint(expr= m.x251 + m.x275 + m.x308 >= 1.52605630349505)
m.c480 = Constraint(expr= m.x252 + m.x276 + m.x308 >= 1.19392246847243)
m.c481 = Constraint(expr=250000*exp(m.x289) + 150000*exp(m.x290) + 180000*exp(m.x291) + 160000*exp(m.x292) + 120000*exp(
m.x293) + 130000*exp(m.x294) + 190000*exp(m.x295) + 140000*exp(m.x296) + 175000*exp(m.x297) +
125000*exp(m.x298) + 140000*exp(m.x299) + 220000*exp(m.x300) + 300000*exp(m.x301) + 200000*exp(
m.x302) + 120000*exp(m.x303) + 320000*exp(m.x304) + 400500*exp(m.x305) + 210000*exp(m.x306) +
310000*exp(m.x307) + 70000*exp(m.x308) <= 6000)
m.c482 = Constraint(expr= - m.x14 + m.x278 - 4.04964438330419*m.b549 >= -1.74705929031015)
m.c483 = Constraint(expr= - m.x15 + m.x279 - 4.04964438330419*m.b550 >= -1.74705929031015)
m.c484 = Constraint(expr= - m.x16 + m.x280 - 4.04964438330419*m.b551 >= -1.74705929031015)
m.c485 = Constraint(expr= - m.x17 + m.x281 - 4.04964438330419*m.b552 >= -1.74705929031015)
m.c486 = Constraint(expr= - m.x18 + m.x282 - 4.04964438330419*m.b553 >= -1.74705929031015)
m.c487 = Constraint(expr= - m.x19 + m.x283 - 4.04964438330419*m.b554 >= -1.74705929031015)
m.c488 = Constraint(expr= - m.x20 + m.x284 - 4.04964438330419*m.b555 >= -1.74705929031015)
m.c489 = Constraint(expr= - m.x21 + m.x285 - 4.04964438330419*m.b556 >= -1.74705929031015)
m.c490 = Constraint(expr= - m.x22 + m.x286 - 4.04964438330419*m.b557 >= -1.74705929031015)
m.c491 = Constraint(expr= - m.x23 + m.x287 - 4.04964438330419*m.b558 >= -1.74705929031015)
m.c492 = Constraint(expr= - m.x24 + m.x288 - 4.04964438330419*m.b559 >= -1.74705929031015)
m.c493 = Constraint(expr= - m.x26 + m.x278 - 4.39931813178394*m.b549 >= -2.0967330387899)
m.c494 = Constraint(expr= - m.x27 + m.x279 - 4.39931813178394*m.b550 >= -2.0967330387899)
m.c495 = Constraint(expr= - m.x28 + m.x280 - 4.39931813178394*m.b551 >= -2.0967330387899)
m.c496 = Constraint(expr= - m.x29 + m.x281 - 4.39931813178394*m.b552 >= -2.0967330387899)
m.c497 = Constraint(expr= - m.x30 + m.x282 - 4.39931813178394*m.b553 >= -2.0967330387899)
m.c498 = Constraint(expr= - m.x31 + m.x283 - 4.39931813178394*m.b554 >= -2.0967330387899)
m.c499 = Constraint(expr= - m.x32 + m.x284 - 4.39931813178394*m.b555 >= -2.0967330387899)
m.c500 = Constraint(expr= - m.x33 + m.x285 - 4.39931813178394*m.b556 >= -2.0967330387899)
m.c501 = Constraint(expr= - m.x34 + m.x286 - 4.39931813178394*m.b557 >= -2.0967330387899)
m.c502 = Constraint(expr= - m.x35 + m.x287 - 4.39931813178394*m.b558 >= -2.0967330387899)
m.c503 = Constraint(expr= - m.x36 + m.x288 - 4.39931813178394*m.b559 >= -2.0967330387899)
m.c504 = Constraint(expr= - m.x38 + m.x278 - 4.19022633392538*m.b549 >= -1.88764124093134)
m.c505 = Constraint(expr= - m.x39 + m.x279 - 4.19022633392538*m.b550 >= -1.88764124093134)
m.c506 = Constraint(expr= - m.x40 + m.x280 - 4.19022633392538*m.b551 >= -1.88764124093134)
m.c507 = Constraint(expr= - m.x41 + m.x281 - 4.19022633392538*m.b552 >= -1.88764124093134)
m.c508 = Constraint(expr= - m.x42 + m.x282 - 4.19022633392538*m.b553 >= -1.88764124093134)
m.c509 = Constraint(expr= - m.x43 + m.x283 - 4.19022633392538*m.b554 >= -1.88764124093134)
m.c510 = Constraint(expr= - m.x44 + m.x284 - 4.19022633392538*m.b555 >= -1.88764124093134)
m.c511 = Constraint(expr= - m.x45 + m.x285 - 4.19022633392538*m.b556 >= -1.88764124093134)
m.c512 = Constraint(expr= - m.x46 + m.x286 - 4.19022633392538*m.b557 >= -1.88764124093134)
m.c513 = Constraint(expr= - m.x47 + m.x287 - 4.19022633392538*m.b558 >= -1.88764124093134)
m.c514 = Constraint(expr= - m.x48 + m.x288 - 4.19022633392538*m.b559 >= -1.88764124093134)
m.c515 = Constraint(expr= - m.x50 + m.x278 - 3.98613097758187*m.b549 >= -1.68354588458782)
m.c516 = Constraint(expr= - m.x51 + m.x279 - 3.98613097758187*m.b550 >= -1.68354588458782)
m.c517 = Constraint(expr= - m.x52 + m.x280 - 3.98613097758187*m.b551 >= -1.68354588458782)
m.c518 = Constraint(expr= - m.x53 + m.x281 - 3.98613097758187*m.b552 >= -1.68354588458782)
m.c519 = Constraint(expr= - m.x54 + m.x282 - 3.98613097758187*m.b553 >= -1.68354588458782)
m.c520 = Constraint(expr= - m.x55 + m.x283 - 3.98613097758187*m.b554 >= -1.68354588458782)
m.c521 = Constraint(expr= - m.x56 + m.x284 - 3.98613097758187*m.b555 >= -1.68354588458782)
m.c522 = Constraint(expr= - m.x57 + m.x285 - 3.98613097758187*m.b556 >= -1.68354588458782)
m.c523 = Constraint(expr= - m.x58 + m.x286 - 3.98613097758187*m.b557 >= -1.68354588458782)
m.c524 = Constraint(expr= - m.x59 + m.x287 - 3.98613097758187*m.b558 >= -1.68354588458782)
m.c525 = Constraint(expr= - m.x60 + m.x288 - 3.98613097758187*m.b559 >= -1.68354588458782)
m.c526 = Constraint(expr= - m.x62 + m.x278 - 3.81671282562382*m.b549 >= -1.51412773262977)
m.c527 = Constraint(expr= - m.x63 + m.x279 - 3.81671282562382*m.b550 >= -1.51412773262977)
m.c528 = Constraint(expr= - m.x64 + m.x280 - 3.81671282562382*m.b551 >= -1.51412773262977)
m.c529 = Constraint(expr= - m.x65 + m.x281 - 3.81671282562382*m.b552 >= -1.51412773262977)
m.c530 = Constraint(expr= - m.x66 + m.x282 - 3.81671282562382*m.b553 >= -1.51412773262977)
m.c531 = Constraint(expr= - m.x67 + m.x283 - 3.81671282562382*m.b554 >= -1.51412773262977)
m.c532 = Constraint(expr= - m.x68 + m.x284 - 3.81671282562382*m.b555 >= -1.51412773262977)
m.c533 = Constraint(expr= - m.x69 + m.x285 - 3.81671282562382*m.b556 >= -1.51412773262977)
m.c534 = Constraint(expr= - m.x70 + m.x286 - 3.81671282562382*m.b557 >= -1.51412773262977)
m.c535 = Constraint(expr= - m.x71 + m.x287 - 3.81671282562382*m.b558 >= -1.51412773262977)
m.c536 = Constraint(expr= - m.x72 + m.x288 - 3.81671282562382*m.b559 >= -1.51412773262977)
m.c537 = Constraint(expr= - m.x74 + m.x278 - 4.35385575770719*m.b549 >= -2.05127066471314)
m.c538 = Constraint(expr= - m.x75 + m.x279 - 4.35385575770719*m.b550 >= -2.05127066471314)
m.c539 = Constraint(expr= - m.x76 + m.x280 - 4.35385575770719*m.b551 >= -2.05127066471314)
m.c540 = Constraint(expr= - m.x77 + m.x281 - 4.35385575770719*m.b552 >= -2.05127066471314)
m.c541 = Constraint(expr= - m.x78 + m.x282 - 4.35385575770719*m.b553 >= -2.05127066471314)
m.c542 = Constraint(expr= - m.x79 + m.x283 - 4.35385575770719*m.b554 >= -2.05127066471314)
m.c543 = Constraint(expr= - m.x80 + m.x284 - 4.35385575770719*m.b555 >= -2.05127066471314)
m.c544 = Constraint(expr= - m.x81 + m.x285 - 4.35385575770719*m.b556 >= -2.05127066471314)
m.c545 = Constraint(expr= - m.x82 + m.x286 - 4.35385575770719*m.b557 >= -2.05127066471314)
m.c546 = Constraint(expr= - m.x83 + m.x287 - 4.35385575770719*m.b558 >= -2.05127066471314)
m.c547 = Constraint(expr= - m.x84 + m.x288 - 4.35385575770719*m.b559 >= -2.05127066471314)
m.c548 = Constraint(expr= - m.x86 + m.x278 - 4.20927452889608*m.b549 >= -1.90668943590203)
m.c549 = Constraint(expr= - m.x87 + m.x279 - 4.20927452889608*m.b550 >= -1.90668943590203)
m.c550 = Constraint(expr= - m.x88 + m.x280 - 4.20927452889608*m.b551 >= -1.90668943590203)
m.c551 = Constraint(expr= - m.x89 + m.x281 - 4.20927452889608*m.b552 >= -1.90668943590203)
m.c552 = Constraint(expr= - m.x90 + m.x282 - 4.20927452889608*m.b553 >= -1.90668943590203)
m.c553 = Constraint(expr= - m.x91 + m.x283 - 4.20927452889608*m.b554 >= -1.90668943590203)
m.c554 = Constraint(expr= - m.x92 + m.x284 - | |
from __future__ import print_function
"""
:py:class:`UtilsCalib`
==============================
Usage::
from Detector.UtilsCalib import proc_block, DarkProc, evaluate_limits
from Detector.UtilsCalib import tstamps_run_and_now, tstamp_for_dataset
gate_lo, gate_hi, arr_med, arr_abs_dev = proc_block(block, **kwa)
lo, hi = evaluate_limits(arr, nneg=5, npos=5, lim_lo=1, lim_hi=1000, cmt='')
ts_run, ts_now = tstamps_run_and_now(env, fmt=TSTAMP_FORMAT)
ts_run = tstamp_for_dataset(dsname, fmt=TSTAMP_FORMAT)
save_log_record_on_start(dirrepo, fname, fac_mode=0o777)
fname = find_file_for_timestamp(dirname, pattern, tstamp)
This software was developed for the SIT project.
If you use all or part of it, please give an appropriate acknowledgment.
Created on 2021-04-05 by <NAME>
"""
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
from time import time, strftime, localtime
from psana import EventId, DataSource
from PSCalib.GlobalUtils import log_rec_on_start, create_directory, save_textfile, dic_det_type_to_calib_group
from Detector.GlobalUtils import info_ndarr, divide_protected #reshape_to_2d#print_ndarr
from PSCalib.UtilsPanelAlias import alias_for_id #, id_for_alias
from PSCalib.NDArrIO import save_txt
TSTAMP_FORMAT = '%Y%m%d%H%M%S'
def str_tstamp(fmt='%Y-%m-%dT%H:%M:%S', time_sec=None):
"""Returns string timestamp for specified format and time in sec or current time by default
"""
return strftime(fmt, localtime(time_sec))
def evt_time(evt):
"""Returns event (double) time for input psana.Event object.
"""
evid = evt.get(EventId)
ttuple = evid.time()
#logger.debug('evt_time %s', str(ttuple))
return float(ttuple[0]) + float(ttuple[1])*1e-9
def env_time(env):
"""Returns event (double) time for input psana.Env object.
"""
evid = env.configStore().get(EventId)
ttuple = evid.time()
#logger.debug('env_time %s' % str(ttuple))
return float(ttuple[0]) + float(ttuple[1])*1e-9
def dataset_time(dsname):
"""Returns event (double) time for input dsname "exp=xcsx35617:run=6".
"""
ds = DataSource(dsname)
return env_time(ds.env())
def tstamps_run_and_now(env, fmt=TSTAMP_FORMAT):
"""Returns tstamp_run, tstamp_now
"""
time_run = env_time(env)
ts_run = str_tstamp(fmt=fmt, time_sec=time_run)
ts_now = str_tstamp(fmt=fmt, time_sec=None)
logger.debug('tstamps_run_and_now:'
+ ('\n run time stamp : %s' % ts_run)\
+ ('\n current time stamp : %s' % ts_now))
return ts_run, ts_now
def tstamp_for_dataset(dsname, fmt=TSTAMP_FORMAT):
"""Returns tstamp_run for dataset dsname, e.g. "exp=xcsx35617:run=6".
"""
tsec = dataset_time(dsname)
return str_tstamp(fmt=fmt, time_sec=tsec)
def rundescriptor_in_dsname(dsname):
"""Returns (str) run-descriptor flom dsname, e.g. "6-12" from dsname="exp=xcsx35617:run=6-12".
"""
for fld in dsname.split(':'):
if fld[:4]=='run=': return fld.split('=')[-1]
return None
def is_single_run_dataset(dsname):
return rundescriptor_in_dsname(dsname).isdigit()
def evaluate_limits(arr, nneg=5, npos=5, lim_lo=1, lim_hi=16000, cmt='') :
"""Moved from Detector.UtilsEpix10kaCalib
Evaluates low and high limit of the array, which are used to find bad pixels.
"""
ave, std = (arr.mean(), arr.std())
lo = ave-nneg*std if nneg>0 else lim_lo
hi = ave+npos*std if npos>0 else lim_hi
lo, hi = max(lo, lim_lo), min(hi, lim_hi)
logger.info('evaluate_limits %s: ave=%.3f std=%.3f limits low=%.3f high=%.3f'%\
(cmt, ave, std, lo, hi)) # sys._getframe().f_code.co_name
return lo, hi
def save_log_record_on_start(dirrepo, fname, fac_mode=0o774):
"""Adds record on start to the log file <dirlog>/logs/log-<fname>-<year>.txt
"""
rec = log_rec_on_start()
repoman = RepoManager(dirrepo, filemode=fac_mode)
logfname = repoman.logname_on_start(fname)
fexists = os.path.exists(logfname)
save_textfile(rec, logfname, mode='a')
if not fexists: os.chmod(logfname, fac_mode)
logger.debug('record on start: %s' % rec)
logger.info('saved: %s' % logfname)
def save_2darray_in_textfile(nda, fname, fmode, fmt):
fexists = os.path.exists(fname)
np.savetxt(fname, nda, fmt=fmt)
if not fexists: os.chmod(fname, fmode)
logger.info('saved: %s' % fname)
def save_ndarray_in_textfile(nda, fname, fmode, fmt):
fexists = os.path.exists(fname)
save_txt(fname=fname, arr=nda, fmt=fmt)
if not fexists: os.chmod(fname, fmode)
logger.debug('saved: %s fmode: %s fmt: %s' % (fname, oct(fmode), fmt))
def file_name_prefix(panel_type, panel_id, tstamp, exp, irun, fname_aliases):
panel_alias = alias_for_id(panel_id, fname=fname_aliases, exp=exp, run=irun)
return '%s_%s_%s_%s_r%04d' % (panel_type, panel_alias, tstamp, exp, irun), panel_alias
class RepoManager(object):
"""Supports repository directories/files naming structure
<dirrepo>/<panel_id>/<constant_type>/<files-with-constants>
<dirrepo>/logs/<year>/<log-files>
<dirrepo>/logs/log-<fname>-<year>.txt # file with log_rec_on_start()
e.g.: dirrepo = '/reg/g/psdm/detector/gains/epix10k/panels'
Usage::
from Detector.UtilsCalib import RepoManager
repoman = RepoManager(dirrepo)
d = repoman.dir_logs()
d = repoman.makedir_logs()
"""
def __init__(self, dirrepo, **kwa):
self.dirrepo = dirrepo.rstrip('/')
self.dirmode = kwa.get('dirmode', 0o774)
self.filemode = kwa.get('filemode', 0o664)
self.dirname_log = kwa.get('dirname_log', 'logs')
def makedir(self, d):
"""create and return directory d with mode defined in object property
"""
create_directory(d, self.dirmode)
return d
def dir_in_repo(self, name):
"""return directory <dirrepo>/<name>
"""
return os.path.join(self.dirrepo, name)
def makedir_in_repo(self, name):
"""create and return directory <dirrepo>/<name>
"""
return self.makedir(self.dir_in_repo(name))
def dir_logs(self):
"""return directory <dirrepo>/logs
"""
return self.dir_in_repo(self.dirname_log)
def makedir_logs(self):
"""create and return directory <dirrepo>/logs
"""
return self.makedir(self.dir_logs())
def dir_logs_year(self, year=None):
"""return directory <dirrepo>/logs/<year>
"""
_year = str_tstamp(fmt='%Y') if year is None else year
return os.path.join(self.dir_logs(), _year)
def makedir_logs_year(self, year=None):
"""create and return directory <dirrepo>/logs/<year>
"""
return self.makedir(self.dir_logs_year(year))
def dir_merge(self, dname='merge_tmp'):
return self.dir_in_repo(dname)
def makedir_merge(self, dname='merge_tmp'):
return self.makedir(self.dir_merge(dname))
def dir_panel(self, panel_id):
"""returns path to panel directory like <dirrepo>/<panel_id>
"""
return os.path.join(self.dirrepo, panel_id)
def makedir_panel(self, panel_id):
"""create and returns path to panel directory like <dirrepo>/<panel_id>
"""
return self.makedir(self.dir_panel(panel_id))
def dir_type(self, panel_id, ctype): # ctype='pedestals'
"""returns path to the directory like <dirrepo>/<panel_id>/<ctype>
"""
return '%s/%s' % (self.dir_panel(panel_id), ctype)
def makedir_type(self, panel_id, ctype): # ctype='pedestals'
"""create and returns path to the directory like <dirrepo>/<panel_id>/<ctype>
"""
return self.makedir(self.dir_type(panel_id, ctype))
def dir_types(self, panel_id, subdirs=('pedestals', 'rms', 'status', 'plots')):
"""define structure of subdirectories in calibration repository under <dirrepo>/<panel_id>/...
"""
return ['%s/%s'%(self.dir_panel(panel_id), name) for name in subdirs]
def makedir_types(self, panel_id, subdirs=('pedestals', 'rms', 'status', 'plots')):
"""create structure of subdirectories in calibration repository under <dirrepo>/<panel_id>/...
"""
dirs = self.dir_types(panel_id, subdirs=subdirs)
for d in dirs: self.makedir(d)
return dirs
def logname_on_start(self, scrname, year=None):
_year = str_tstamp(fmt='%Y') if year is None else str(year)
return '%s/%s_log_%s.txt' % (self.makedir_logs(), _year, scrname)
def logname(self, scrname):
tstamp = str_tstamp(fmt='%Y-%m-%dT%H%M%S')
return '%s/%s_log_%s.txt' % (self.makedir_logs_year(), tstamp, scrname)
def proc_dark_block(block, **kwa):
"""Copied and modified from UtilsEpix10kaCalib
Assumes that ALL dark events are in the block - returns ALL arrays
Returns per-panel (352, 384) arrays of mean, rms, ...
block.shape = (nrecs, 352, 384), where nrecs <= 1024
"""
exp = kwa.get('exp', None)
detname = kwa.get('det', None)
int_lo = kwa.get('int_lo', 1) # lowest intensity accepted for dark evaluation
int_hi = kwa.get('int_hi', 16000) # highest intensity accepted for dark evaluation
intnlo = kwa.get('intnlo', 6.0) # intensity ditribution number-of-sigmas low
intnhi = kwa.get('intnhi', 6.0) # intensity ditribution number-of-sigmas high
rms_lo = kwa.get('rms_lo', 0.001) # rms ditribution low
rms_hi = kwa.get('rms_hi', 16000) # rms ditribution high
rmsnlo = kwa.get('rmsnlo', 6.0) # rms ditribution number-of-sigmas low
rmsnhi = kwa.get('rmsnhi', 6.0) # rms ditribution number-of-sigmas high
fraclm = kwa.get('fraclm', 0.1) # allowed fraction limit
fraclo = kwa.get('fraclo', 0.05) # fraction of statistics below low gate limit
frachi = kwa.get('frachi', 0.95) # fraction of statistics below high gate limit
frac05 = 0.5
nrecs1 = kwa.get('nrecs1', None) # number of records for the 1st stage processing
logger.debug('in proc_dark_block for exp=%s det=%s, block.shape=%s' % (exp, detname, str(block.shape)))
logger.info(info_ndarr(block, 'begin pricessing of the data block:\n ', first=100, last=105))
logger.debug('fraction of statistics for gate limits low: %.3f high: %.3f' % (fraclo, frachi))
t0_sec = time()
nrecs, ny, nx = block.shape
shape = (ny, nx)
if nrecs1 is None or nrecs1>nrecs: nrecs1 = nrecs
arr1_u16 = np.ones(shape, dtype=np.uint16)
arr1 = np.ones(shape, dtype=np.uint64)
t1_sec = time()
"""
NOTE:
- our data is uint16.
- np.median(block, axis=0) or np.quantile(...,interpolation='linear') return result rounded to int
- in order to return interpolated float values apply the trick:
data_block + random [0,1)-0.5
- this would distort data in the range [-0.5,+0.5) ADU, but would allow
to get better interpolation for median and quantile values
- use nrecs1 (< nrecs) due to memory and time consumption
"""
#blockf64 = np.random.random((nrecs1, ny, nx)) - 0.5 + block[:nrecs1,:]
#logger.debug(info_ndarr(blockf64, '1-st stage conversion uint16 to float64,'\
# +' add random [0,1)-0.5 time = %.3f sec '%\
# (time()-t1_sec), first=100, last=105))
blockf64 = block[:nrecs1,:]
#arr_med = np.median(block, axis=0)
arr_med = np.quantile(blockf64, frac05, axis=0, interpolation='linear')
arr_qlo = np.quantile(blockf64, fraclo, axis=0, interpolation='lower')
arr_qhi = np.quantile(blockf64, frachi, axis=0, interpolation='higher')
logger.debug('block array median/quantile(0.5) for med, qlo, qhi time = %.3f sec' % (time()-t1_sec))
med_med = np.median(arr_med)
med_qlo = np.median(arr_qlo)
med_qhi = np.median(arr_qhi)
arr_dev_3d = block[:,] - arr_med # .astype(dtype=np.float64)
arr_abs_dev = np.median(np.abs(arr_dev_3d), axis=0)
med_abs_dev = np.median(arr_abs_dev)
logger.info(info_ndarr(arr_med, ' arr_med[100:105] ', first=100, last=105))
logger.info(info_ndarr(arr_qlo, ' arr_qlo[100:105] ', first=100, last=105))
logger.info(info_ndarr(arr_qhi, ' arr_qhi[100:105] ', first=100, last=105))
logger.info(info_ndarr(arr_abs_dev, ' abs_dev[100:105] ', first=100, last=105))
s = 'data-block pre-processing time %.3f sec' % (time()-t0_sec)\
+ '\nresults for median over pixels intensities:'\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - pedestal estimator' % (frac05, med_med)\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - gate low limit' % (fraclo, med_qlo)\
+ '\n %.3f fraction of the event spectrum is below %.3f ADU - gate upper limit' % (frachi, med_qhi)\
+ '\n event spectrum spread median(abs(raw-med)): %.3f ADU - spectral peak width estimator' % med_abs_dev
logger.info(s)
#sys.exit('TEST EXIT')
logger.debug(info_ndarr(arr_med, '1st iteration proc time = %.3f sec arr_av1' % (time()-t0_sec)))
#gate_half = nsigma*rms_ave
#logger.debug('set gate_half=%.3f for intensity gated average, which is %.3f * | |
"""
The value module.
Stores attributes for the value instance and handles value-related
methods.
"""
import logging
import threading
import warnings
from ..connection import message_data
from ..connection import seluxit_rpc
from ..errors import wappsto_errors
def isNaN(num):
"""Test if input is a float 'NaN' value."""
return num != num
class Value:
"""
Value instance.
Stores attributes for the value instance and handles value-related
methods.
"""
def __init__(
self,
parent,
uuid,
name,
type_of_value,
data_type,
permission,
number_max,
number_min,
number_step,
number_unit,
string_encoding,
string_max,
blob_encoding,
blob_max,
period,
delta
):
"""
Initialize the Value class.
Initializes an object of value class by passing required parameters.
Args:
parent: Reference to a device object
uuid: An unique identifier of a device
name: A name of a device
type_of_value: Determines a type of value [e.g temperature, CO2]
data_type: Defines whether a value is string, blob or number
permission: Defines permission [read, write, read and write]
(if data_type is number then these parameters are relevant):
number_max: Maximum number a value can have
number_min: Minimum number a value can have
number_step: Number defining a step
number_unit: Unit in which a value should be read
(if data_type is string then these parameters are irrelevant):
string_encoding: A string encoding of a value
string_max: Maximum length of string
(if data_type is blob then these parameters are irrelevant):
blob_encoding: A blob encoding of a value
blob_max: Maximum length of a blob
period: defines the time after which a value should send report
message. Default: {None})
delta: defines the a difference of value (default: {None})
"""
self.wapp_log = logging.getLogger(__name__)
self.wapp_log.addHandler(logging.NullHandler())
self.parent = parent
self.uuid = uuid
self.name = name
self.type_of_value = type_of_value
self.data_type = data_type
self.permission = permission
# The value shared between state instances.
self.number_max = number_max
self.number_min = number_min
self.number_step = number_step
self.number_unit = number_unit
self.string_encoding = string_encoding
self.string_max = string_max
self.blob_encoding = blob_encoding
self.blob_max = blob_max
self.report_state = None
self.control_state = None
self.callback = None
self.timer = threading.Timer(None, None)
self.last_update_of_report = None
# if self._invalid_step(self.number_max):
# msg = "Inconsistent max, min & step provided. "
# msg += "'(max-min)/step' do not appear to an integer-like."
# self.wapp_log.warning(msg)
if period:
self.set_period(period)
if delta:
self.set_delta(delta)
msg = "Value {} debug: {}".format(name, str(self.__dict__))
self.wapp_log.debug(msg)
def __getattr__(self, attr): # pragma: no cover
"""
Get attribute value.
When trying to get value from last_controlled warning is raised about
it being deprecated and calls get_data instead.
Returns:
value of get_data
"""
if attr in ["last_controlled"]:
warnings.warn("Property {} is deprecated".format(attr))
return self.get_control_state().data
def set_period(self, period):
"""
Set the value reporting period.
Sets the time defined in second to report a value to
the server and starts timer.
Args:
period: Reporting period.
"""
if period is None:
self.wapp_log.warning("Period value is not provided.")
return
try:
period = int(period)
except ValueError:
self.wapp_log.error("Period value must be a number.")
return
if period < 0:
self.wapp_log.warning("Period value must not be lower then 0.")
return
self.period = period
def enable_period(self):
"""
Enable the Period handling if period was set.
Enable the Period starts the timer that ensures that the
value are getting updated with the right Periods.
"""
if self.period is None:
self.wapp_log.debug("Period was not set.")
return
if self.get_report_state() is not None:
self.__set_timer()
self.wapp_log.debug("Period successfully set.")
else:
self.wapp_log.warning("Cannot set the period for this value.")
def __set_timer(self):
"""
Set timer.
Stop previous timer and sets new one if period value is not None.
"""
self.timer.cancel()
if self.period is not None:
self.timer_elapsed = False
self.timer = threading.Timer(self.period, self.__timer_done)
self.timer.start()
def __timer_done(self):
self.__set_timer()
self.timer_elapsed = True
# self.handle_refresh() # ERROR: Trickered double sampling. Text needed.
def set_delta(self, delta):
"""
Set the delta to report between.
Sets the delta (range) of change to report in. When a change happens
in the range of this delta it will be reported.
Args:
delta: Range to report between.
"""
if delta is None:
self.wapp_log.warning("Delta value is not provided.")
return
try:
delta = float(delta)
except ValueError:
self.wapp_log.error("Delta value must be a number")
return
if delta < 0:
self.wapp_log.warning("Delta value must not be lower then 0.")
return
if self.__is_number_type():
self.delta = delta
def enable_delta(self):
"""
Enable the Delta handling, if delta is set.
Enable the Delta, ATM do not do anything, other the inform
if delta will be able to work.
"""
if self.delta is None:
self.wapp_log.debug("Delta was not set.")
return
if self.get_report_state():
self.wapp_log.debug("Delta successfully set.")
else:
self.wapp_log.warning("Cannot set the delta for this value.")
def get_parent_device(self): # pragma: no cover
"""
Retrieve parent device reference.
Gets a reference to the device that owns this device.
Returns:
Reference to instance of Device class that owns this Value.
"""
return self.parent
def add_report_state(self, state):
"""
Set report state reference to the value list.
Adds a report state reference to the Value class.
Args:
state: Reference to instance of State class.
"""
self.report_state = state
msg = "Report state {} has been added.".format(state.parent.name)
self.enable_period()
self.enable_delta()
self.wapp_log.debug(msg)
def add_control_state(self, state):
"""
Set control state reference to the value list.
Adds a control state reference to the Value class.
Args:
state: Reference to instance of State class.
"""
self.control_state = state
msg = "Control state {} has been added".format(state.parent.name)
self.wapp_log.debug(msg)
def get_report_state(self):
"""
Retrieve child report state reference.
Gets a reference to the child State class.
Returns:
Reference to instance of State class.
"""
if self.report_state is not None:
return self.report_state
msg = "Value {} has no report state.".format(self.name)
self.wapp_log.warning(msg)
def get_control_state(self):
"""
Retrieve child control state reference.
Gets a reference to the child State class.
Returns:
Reference to instance of State class.
"""
if self.control_state is not None:
return self.control_state
msg = "Value {} has no control state.".format(self.name)
self.wapp_log.warning(msg)
def set_callback(self, callback):
"""
Set the callback.
Sets the callback attribute.
Args:
callback: Callback reference.
Raises:
CallbackNotCallableException: Custom exception to signify invalid
callback.
"""
if not callable(callback):
msg = "Callback method should be a method"
self.wapp_log.error("Error setting callback: {}".format(msg))
raise wappsto_errors.CallbackNotCallableException
self.callback = callback
self.wapp_log.debug("Callback {} has been set.".format(callback))
return True
def _validate_value_data(self, data_value, err_msg=None):
# TODO(MBK): Need refactoring, so it also nicely can be used for
# control validation, in 'receive_Data/incoming_put'
if err_msg is None:
err_msg = []
if self.__is_number_type():
try:
if self._outside_range(data_value):
msg = "Invalid number. Range: {}-{}. Yours is: {}".format(
self.number_min,
self.number_max,
data_value
)
err_msg.append(msg)
self.wapp_log.warning(msg)
if self._invalid_step(data_value):
msg = "Invalid Step. Step: {}. Min: {}. Value: {}".format(
self.number_step,
self.number_min,
data_value
)
err_msg.append(msg)
self.wapp_log.warning(msg)
return str(data_value)
except ValueError:
msg = "Invalid type of value. Must be a number: {}"
msg = msg.format(data_value)
err_msg.append(msg)
self.wapp_log.error(msg)
return "NA"
elif self.__is_string_type():
if self.string_max is None:
return data_value
if len(str(data_value)) <= int(self.string_max):
return data_value
msg = "Value for '{}' not in correct range: {}."
msg = msg.format(self.name, self.string_max)
err_msg.append(msg)
self.wapp_log.warning(msg)
elif self.__is_blob_type():
if self.blob_max is None:
return data_value
if len(str(data_value)) <= int(self.blob_max):
return data_value
msg = "Value for '{}' not in correct range: {}."
msg = msg.format(self.name, self.blob_max)
err_msg.append(msg)
self.wapp_log.warning(msg)
else:
msg = "Value type '{}' is invalid".format(self.date_type)
err_msg.append(msg)
self.wapp_log.error(msg)
def _outside_range(self, value):
"""
Check weather or not the value are outside range.
Args:
value: The value to be checked.
Returns:
True, if outside range.
False if inside range.
"""
return not (self.number_min <= float(value) <= self.number_max)
def _invalid_step(self, value):
"""
Check weather or not the value are invalid step size.
Args:
value: The value to be checked.
Returns:
True, if invalid step size.
False if valid step size.
"""
x = (float(value) - self.number_min) / self.number_step
return not (abs(round(x) - x) <= 1e-9)
def update(self, data_value, timestamp=None):
"""
Update value.
Check if value has a state and validates the information in data_value
if both of these checks pass then method send_state is called.
Args:
data_value: the new value.
timestamp: time of action.
Returns:
True/False indicating the result of operation.
"""
self._update_delta_period_values(data_value)
if timestamp is None:
timestamp = seluxit_rpc.time_stamp()
state = self.get_report_state()
if state is None:
self.wapp_log.warning("Value is write only.")
return False
self._validate_value_data(data_value)
state.timestamp = timestamp
msg = message_data.MessageData(
message_data.SEND_REPORT,
data=str(data_value),
network_id=state.parent.parent.parent.uuid,
device_id=state.parent.parent.uuid,
value_id=state.parent.uuid,
state_id=state.uuid,
verb=message_data.PUT
)
# self.parent.parent.conn.send_data.send_report(msg)
self.parent.parent.conn.sending_queue.put(msg)
def _update_delta_period_values(self, data_value):
if self.period | |
from PIL import Image
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QPixmap
from functools import partial
from pdf2image import convert_from_path, pdfinfo_from_path
from scripts.database_stuff import DB, sqlite
from scripts.tricks import tech as t
from scripts.widgets import DevLabel, PDFWidget
from zipfile import BadZipFile, ZipFile
import concurrent.futures
import math
import os
import platform
import psutil
import shutil
import string
import sys
import time
FIGURE_HEIGHT = 300
TITLE = 'PDF to WEBP-compressed CBZ v0.3 build:776'
def pdf_to_jpeg(job):
"""
thread job that requires a starting and ending index
:param job: tuple
:return: list with paths as strings
"""
source_file, output_folder, first_page, last_page, output_file, poppler_path = job
image_list = convert_from_path(
source_file,
dpi=200,
first_page=first_page,
last_page=last_page,
fmt='jpeg',
output_file=output_file,
output_folder=output_folder,
paths_only=True,
jpegopt=dict(quality=100, optimize=True),
poppler_path=poppler_path,
)
return image_list
def convert_files_to_jpeg(joblist, inputpath, tmp_jpeg_folder, poppler_path=None):
"""
if tmp_folder goes below 100mb False is returned
:param joblist: dictionary with letters as keys containing list indexes (int)
:param inputpath: string to pdf file-path
:param tmp_jpeg_folder: string
:return: list with image paths, or False if hdd full
"""
image_list = []
threadlist = []
for letter in joblist:
threadlist.append((inputpath, tmp_jpeg_folder, joblist[letter][0], joblist[letter][-1], letter, poppler_path,))
with concurrent.futures.ProcessPoolExecutor() as executor:
for _, rv in zip(joblist, executor.map(pdf_to_jpeg, threadlist)):
for path in rv:
image_list.append(path)
_, __, tmp_free = shutil.disk_usage(tmp_jpeg_folder)
if (tmp_free/1000000) < 100:
return False
image_list.sort()
return image_list
def jpeg_to_webp(job):
"""
jpeg to webp
:param job: tuple -> 0:jpeg_file_path, 1:save_webp_file_path, 2:webp_quality
:return: string -> webp_file_location
"""
source_path, destination_path, _, webp_quality, resize_4k = job
image = Image.open(source_path)
if resize_4k and image.size[0] > 3840:
image_size = 3840, round(image.size[1] * (3840 / image.size[0]))
image.thumbnail(image_size, Image.ANTIALIAS)
image.save(destination_path, 'webp', method=6, quality=webp_quality)
return dict(source=source_path, destination=destination_path)
def convert_files_to_webp(joblist):
"""
:param joblist: list with jpeg_files
:return:
"""
count = 0
with concurrent.futures.ProcessPoolExecutor() as executor:
for _, rv in zip(joblist, executor.map(jpeg_to_webp, joblist)):
count += 1
if rv and os.path.getsize(rv['destination']) > 0:
os.remove(rv['source'])
def recompress_fucntion(destination_file, tmp_folder):
"""
compresses the files from tmp_folder into file.cbz
:param destination_file: string new file.zip
:param tmp_folder: string
:return: bool
"""
def confirm_new_files(ziplocation):
"""
test if the file.zip/cbz has the same
amount of files as tmp_folder
:param ziplocation: string
:return: bool
"""
try:
zf = ZipFile(ziplocation)
filecontents = list(zf.namelist())
except BadZipFile:
os.remove(ziplocation)
print('OUTPUT FILE BROKEN')
return False
for walk in os.walk(tmp_folder):
files = [walk[0] + '/' + x for x in walk[2]]
if len(filecontents) < len(files):
os.remove(ziplocation)
shutil.rmtree(tmp_folder)
print('FILES MISSING')
return False
break
return True
zipfile = destination_file[0:-(len('.cbz'))]
if platform.system() != "Windows":
os.sync()
shutil.make_archive(zipfile, 'zip', tmp_folder)
zipfile += '.zip'
if platform.system() != "Windows":
os.sync()
if not confirm_new_files(zipfile):
return False
if not os.path.exists(zipfile) or os.path.getsize(zipfile) == 0:
print('WRITE OUTPUT ERROR')
if os.path.exists(zipfile):
os.remove(zipfile)
return False
shutil.move(zipfile, destination_file)
return True
class PDF2CBZmain(QtWidgets.QMainWindow):
def __init__(self):
super(PDF2CBZmain, self).__init__()
self.setStyleSheet('background-color: rgb(20,20,20) ; color: rgb(255,255,255)')
if 'devmode' in sys.argv:
self.dev_mode = True
else:
self.dev_mode = False
self.setFixedSize(1800, 1000)
self.widgets = dict(main=[], pdf=[], cbz=[])
self.wt = 3
self.ht = 3
self.reset_ht_wt()
self.from_dir = QtWidgets.QPlainTextEdit(self, toolTip='SOURCE FOLDER')
self.from_dir.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
self.from_dir.setGeometry(self.wt, self.ht, int(self.width() * 0.4), 30)
self.ht += self.from_dir.height() + 3
self.from_dir.textChanged.connect(self.from_dir_changed)
self.to_dir = QtWidgets.QPlainTextEdit(self, toolTip='DESTINATION FOLDER')
self.to_dir.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
self.to_dir.setGeometry(self.wt, self.ht, int(self.width() * 0.4), 30)
self.ht += self.to_dir.height() + 3
self.to_dir.textChanged.connect(self.to_dir_changed)
self.canvas = QtWidgets.QFrame(self)
self.canvas.setStyleSheet('background-color: rgb(25,25,25)')
self.canvas.setGeometry(self.wt, self.ht, self.width() - self.wt * 2, self.height() - self.ht - 5)
self.webp_label = QtWidgets.QLabel(self)
self.webp_label.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235) ; font: 14pt')
self.webp_label.move(self.from_dir.geometry().right() + 3, self.from_dir.geometry().top())
self.webp_label.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.webp_label.setFixedWidth(200)
DevLabel(self.webp_label, self)
webp_value = t.retrieve_setting(DB.settings.webp_slider)
if not webp_value:
webp_value = 70
self.webp_slider = QtWidgets.QSlider(self, minimum=0, maximum=100, value=webp_value)
self.webp_slider.setFixedWidth(self.webp_label.width())
self.webp_slider.move(self.webp_label.geometry().left(), self.webp_label.geometry().bottom() + 3)
self.webp_slider.setOrientation(1)
self.webp_slider.valueChanged.connect(self.slider_changed)
self.slider_changed()
self.continous_convertion = QtWidgets.QCheckBox(self, text='CONTINOUS')
self.continous_convertion.setToolTip('Continous conversions, start another once current is completed!')
self.continous_convertion.move(self.webp_label.geometry().right() + 3, 3)
self.continous_convertion.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
rv = t.retrieve_setting(DB.settings.continous)
if rv:
self.continous_convertion.setChecked(rv)
self.continous_convertion.stateChanged.connect(partial(
self.save_setting, self.continous_convertion, 'continous'))
self.delete_source_pdf = QtWidgets.QCheckBox(self, text='DELETE PDF')
self.delete_source_pdf.move(self.continous_convertion.geometry().right() + 3, 3)
self.delete_source_pdf.setToolTip('When jobs complete, the PDF source will be permanently deleted!')
self.delete_source_pdf.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
rv = t.retrieve_setting(DB.settings.del_source)
if rv:
self.delete_source_pdf.setChecked(rv)
self.delete_source_pdf.stateChanged.connect(partial(
self.save_setting, self.delete_source_pdf, 'del_source'))
self.pdf_threads = QtWidgets.QCheckBox(self, text='PDF THREADS', checked=True)
self.pdf_threads.setFixedWidth(self.pdf_threads.width() + 10)
self.pdf_threads.move(self.delete_source_pdf.geometry().right() + 3, 3)
self.pdf_threads.setToolTip('Checked == FASTER')
self.pdf_threads.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
self.wepb_threads = QtWidgets.QCheckBox(self, text='WEBP THREADS', checked=True)
self.wepb_threads.setFixedWidth(self.wepb_threads.width() + 20)
self.wepb_threads.move(self.pdf_threads.geometry().right() + 3, 3)
self.wepb_threads.setToolTip('Checked == FASTER')
self.wepb_threads.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
self.check_4k = QtWidgets.QCheckBox(self, text="RESIZE < 4K")
self.check_4k.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
self.check_4k.setToolTip('Images wider than 3840 pixels will be shrunk to 3840 pixels')
self.check_4k.move(self.wepb_threads.geometry().right() + 3, 3)
rv = t.retrieve_setting(DB.settings.resize_4k)
if rv:
self.check_4k.setChecked(rv)
self.check_4k.stateChanged.connect(partial(
self.save_setting, self.delete_source_pdf, 'resize_4k'))
self.btn_more = QtWidgets.QPushButton(self, text='NEXT')
self.btn_more.move(self.check_4k.geometry().right() + 3, 3)
self.btn_more.setFixedWidth(int(self.btn_more.width() * 0.7))
self.btn_more.clicked.connect(self.draw_more_pdf_files)
self.btn_refresh = QtWidgets.QPushButton(self, text='REFRESH')
self.btn_refresh.move(self.btn_more.geometry().right() + 3, 3)
self.btn_refresh.setFixedWidth(int(self.btn_refresh.width() * 0.7))
self.btn_refresh.clicked.connect(self.from_dir_changed)
tt = 'example -> /home/user/poppler-0.68.0/bin\n\nWindows download: http://blog.alivate.com.au/poppler-windows/'
self.poppler_path = QtWidgets.QPlainTextEdit(self, toolTip=tt)
self.poppler_path.setStyleSheet('background-color: rgb(30,30,30) ; color: rgb(235,235,235)')
x = self.webp_slider.geometry().right() + 3
y = self.webp_slider.geometry().top()
w = self.btn_refresh.geometry().right() - self.continous_convertion.geometry().left()
h = self.webp_label.height()
self.poppler_path.setGeometry(x, y, w, h)
self.poppler_path.textChanged.connect(self.poppler_path_changed)
cyd = {
'PDF SOURCE FOLDER': self.from_dir,
'CBZ DESTINATION FOLDER': self.to_dir,
'POPPLER PATH': self.poppler_path,
}
for i,j in cyd.items():
label = QtWidgets.QLabel(j, text=i, alignment=QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter)
label.setStyleSheet('background-color: rgba(0,0,0,0) ; color: gray ; font: 10pt')
label.setGeometry(0,0,j.width() - 20,j.height())
label.lower()
self.deside_figure_size()
if os.path.exists('background.webp'):
bg = QtWidgets.QLabel(self)
bg.setGeometry(0,0,self.width(),self.height())
pixmap = QPixmap('background.webp').scaled(bg.width(), bg.height())
bg.setPixmap(pixmap)
bg.lower()
self.show()
setting_plaintext_label = {
DB.settings.source_path: self.from_dir,
DB.settings.destination_path: self.to_dir,
DB.settings.poppler_path: self.poppler_path,
}
for key, label in setting_plaintext_label.items():
rv = t.retrieve_setting(key)
if rv:
label.setPlainText(rv.rstrip('\n'))
self.setWindowTitle(TITLE)
def show_hdd_spaces(self):
if 'space_timer' not in dir(self):
self.space_timer = int(time.time() - 100)
if int(time.time()) - self.space_timer < 1:
return
self.space_timer = int(time.time())
title = TITLE
base_dir = t.tmp_folder(create_dir=False, return_base=True)
if os.path.exists(base_dir):
tmp_total, tmp_used, tmp_free = shutil.disk_usage(base_dir)
title += f" | WORKING DIR SIZE: {int(tmp_total/1000000)}mb | "
title += f"USED: {int(tmp_used/1000000)}mb | FREE: {int(tmp_free/1000000)}mb"
to_dir = self.to_dir.toPlainText().strip()
if os.path.exists(to_dir):
to_total, to_used, to_free = shutil.disk_usage(to_dir)
title += f" | DESTINATION DIR SIZE: {int(to_total/1000000)}mb | "
title += f"USED: {int(to_used/1000000)}mb | FREE: {int(to_free/1000000)}mb"
self.setWindowTitle(title)
def get_poppler_path(self):
poppler_path = self.poppler_path.toPlainText().strip()
if not poppler_path or not os.path.exists(poppler_path) or len(poppler_path) < 1:
poppler_path = None
return poppler_path
def convert_pdf_to_images(self, inputpath, outputpath, widget):
"""
if large pdf job is spread across cpu's else just one cpu-job
extract jpeg files into a tmp_folder and then convert them to webp
:param inputpath: string
:param outputpath: string
:return: dictionary
"""
tmp_jpeg_folder = t.tmp_folder(inputpath, hash=True, delete=True)
tmp_folder = t.tmp_folder(outputpath, hash=True, delete=True)
image_list = []
poppler_path = self.get_poppler_path()
widget.status_label.setText('EXTRACTING')
if self.pdf_threads.isChecked():
rv = self.decide_pages_per_cpu(inputpath)
if rv:
image_list = convert_files_to_jpeg(
rv, inputpath, tmp_jpeg_folder, poppler_path)
if not image_list:
image_list = pdf_to_jpeg((inputpath, tmp_jpeg_folder, None, None, None, poppler_path,))
if not image_list:
return False
jobs = []
for count, jpeg_image_path in enumerate(image_list):
filename = t.zero_prefiller(count, lenght=5)
webp_save_path = f'{tmp_folder}/{filename}.webp'
webp_save_path = os.path.abspath(os.path.expanduser(webp_save_path))
jobs.append(
(jpeg_image_path, webp_save_path, outputpath, self.webp_slider.value(), self.check_4k.isChecked(),)
)
widget.status_label.setText('CONVERTING')
if not self.wepb_threads.isChecked():
for i in jobs:
convert_files_to_webp([i])
else:
convert_files_to_webp(jobs)
widget.status_label.setText('RECOMPRESSING')
rv = recompress_fucntion(outputpath, tmp_folder)
return dict(status=rv, tmp_webp_folder=tmp_folder, tmp_jpeg_folder=tmp_jpeg_folder, outputpath=outputpath)
def decide_pages_per_cpu(self, inputpath):
"""
counts physical cores and calculates a fair amount of images per core, a
dictionary is created with letter (key) that will be used to save the temporary
jpeg files. If the pdf has to less files, then job ignores multiple cpu's
:param inputpath: string
:return: dictionary or bool
"""
def correct_rvdict(rv):
"""
rv['a'] cannot be less than 2 (begin and end)
this investegates, interfers and corrects that
"""
if rv['a'] == []:
rv.pop('a')
elif rv['a'] == [0]:
rv['b'].append(0)
rv.pop('a')
for i in rv:
rv[i].sort()
page_count = self.get_page_count_for_pdf(inputpath)
cpu_count = psutil.cpu_count(logical=False)
alphabet = list(string.ascii_lowercase)
if cpu_count >= len(alphabet):
cpu_count = len(alphabet) - 1
if page_count and page_count / 3 > cpu_count:
rv = {}
pages_per_cpu = math.ceil(page_count / cpu_count)
pages_per_cpu = int(pages_per_cpu)
for c in range(cpu_count - 1, -1, -1):
letter = alphabet[c]
rv[letter] = []
for cc in range(pages_per_cpu):
if page_count < 0:
break
rv[letter].append(page_count)
page_count -= 1
correct_rvdict(rv)
return rv
return False
def deside_figure_size(self):
"""
calculates how large widgets should be to fill the self.canvas (frame)
"""
# HEIGHT >
self.figure_height = FIGURE_HEIGHT
av = self.canvas.height() / FIGURE_HEIGHT
left_over = self.canvas.height() - (FIGURE_HEIGHT * math.floor(av))
if left_over > av:
self.figure_height += math.floor(left_over / math.floor(av))
self.figure_height = int(self.figure_height)
self.figure_height -= 3 # gives geometry.height() breathing room
# WIDTH >
self.figure_width = self.figure_height * 0.6
av = math.floor(self.canvas.width() / self.figure_width)
left_over = self.canvas.width() - (self.figure_width * math.floor(av))
if | |
<reponame>kjdoore/spec_map_analysis
def gauss_func(x, a0, a1, a2, a3=None, a4=None, a5=None):
"""
Defines a function that consists of a Gaussian with the optional
addition of a polynomial up to degree 2.
Parameters
----------
x : 1-D array_like
The independent variable data, of length M, where the function
is to be evaluated.
a0 : scalar
The parameter that gives the height of the Gaussian in the
function given by the equation:
f = a0*exp(-((x-a1)/a2)^2/2) + a3 + a4*x + a5*x^2
a1 : scalar
The parameter that gives the location of the center of the
Gaussian in the above equation.
a2 : scalar
The parameter that gives the sigma (width) of the Gaussian in
the above equation.
a3 : scalar, optional
The parameter that gives the constant polynomial term in the
above equation. If not specified, then no constant term is
included in the function.
a4 : scalar, optional
The parameter that gives the linear polynomial term in the
above equation. If not specified, then no linear term is
included in the function.
a5 : scalar, optional
The parameter that gives the quadratic polynomial term in
the above equation. If not specified, then no quadratic term
is included in the function.
Returns
-------
fx : 1-D array
The dependent variable data, of length M, as determined from
the above function for each value in x
"""
import numpy as np
# Make x a numpy array
x = np.array(x)
# Determine the number of terms to include in
# the function
nterms = 3
if a3 is not None:
nterms = 4
if a4 is not None:
nterms = 5
if a5 is not None:
nterms = 6
# Check to make sure the width is non-zero and positive.
# If it is not, then assume there is no Gaussian.
if a2 > 0:
z = (x - a1) / a2
fx = a0 * np.exp(-z ** 2 / 2)
else:
fx = np.repeat(0, x.size)
# Generate the resulting output
if nterms == 4:
fx = fx + a3
elif nterms == 5:
fx = fx + a3 + a4 * x
elif nterms == 6:
fx = fx + a3 + a4 * x + a5 * x ** 2
return fx
def gaussfunc_jacobian(x, a0, a1, a2, a3=None, a4=None, a5=None):
"""
Defines the Jacobian matrix of a Gaussian with the optional
addition of a polynomial up to degree 2 with respect to the
parameters.
Parameters
----------
x : 1-D array_like
The independent variable data, of length M, where the Jacobian
is to be evaluated.
a0 : scalar
The parameter that gives the height of the Gaussian in the
function given by the equation:
f = a0*exp(-((x-a1)/a2)^2/2) + a3 + a4*x + a5*x^2
a1 : scalar
The parameter that gives the location of the center of the
Gaussian in the above equation.
a2 : scalar
The parameter that gives the sigma (width) of the Gaussian
in the above equation.
a3 : scalar, optional
The parameter that gives the constant polynomial term in
the above equation. If not specified, then the Jacobian
does not include this parameter.
a4 : scalar, optional
The parameter that gives the linear polynomial term in
the above equation. If not specified, then the Jacobian
does not include this parameter.
a5 : scalar, optional
The parameter that gives the quadratic polynomial term in
the above equation. If not specified, then the Jacobian
does not include this parameter.
Returns
-------
dfx : 2-D array
The Jacobian matrix, an (M, k)-shaped array, of the above
function for each value in x, where k is the number of
function parameters specified
"""
import numpy as np
# Make x a numpy array
x = np.array(x)
# Determine the number of terms to include in
# the function
nterms = 3
if a3 is not None:
nterms = 4
if a4 is not None:
nterms = 5
if a5 is not None:
nterms = 6
# Check to make sure width is non-zero and positive.
# If it is not then assume there is no Gaussian.
# Compute partial derivatives with respect to each parameter.
if a2 > 0:
z = (x - a1) / a2
d0 = np.exp(-z ** 2 / 2)
d1 = a0 * z / a2 * d0
d2 = d1 * z
else:
d0 = np.repeat(0, x.size)
d1 = np.repeat(0, x.size)
d2 = np.repeat(0, x.size)
# Combine derivatives into Jacobian matrix
if nterms == 3:
dfx = np.column_stack((d0, d1, d2))
elif nterms == 4:
d3 = np.repeat(1.0, np.size(x))
dfx = np.column_stack((d0, d1, d2, d3))
elif nterms == 5:
d3 = np.repeat(1.0, np.size(x))
d4 = x
dfx = np.column_stack((d0, d1, d2, d3, d4))
elif nterms == 6:
d3 = np.repeat(1.0, np.size(x))
d4 = x
d5 = x ** 2
dfx = np.column_stack((d0, d1, d2, d3, d4, d5))
return dfx
def gaussfunc_best_guess(x, y, nterms=3, min_bounds=None, max_bounds=None):
"""
Generates and initial guess of each parameter when fitting
a Gaussian with the optional addition of a polynomial up
to degree 2 using non-linear least squares methods.
Parameters
----------
x : 1-D array_like
The independent variable data, of length M
y : 1-D array_like
The dependent variable data, of length M, which a Gaussian
function with the optional addition of a polynomial up
to degree 2 is to be fit.
nterms : integer, optional
An integer from 3 to 6 specifying the number of terms
to include in the Gaussian function given by:
f = a0*exp(-((x-a1)/a2)^2/2) + a3 + a4*x + a5*x^2
If only a value of 3 is specified, then only estimates for a
Gaussian are returned (a0, a1, a2). If a value > 3 is specified
then a polynomial of degree = nterms - 4 is added to the
Gaussian up to a degree of 2 (a3, a4, a5).
If a value is not specified, then a default value of 3 is used.
min_bounds : 1-D array-like, optional
An array of length nterms giving the minimum bounds for each
parameter to be used in the non-linear least squares fitting.
Guesses are restricted to be larger than these values.
max_bounds : 1-D array-like, optional
An array of length nterms giving the maximum bounds for each
parameter to be used in the non-linear least squares fitting.
Guesses are restricted to be smaller than these values.
Returns
-------
parameter_guess : 1-D array
An array of length nterms giving the initial guesses for each
parameter to be used in the non-linear least squares fitting.
"""
import numpy as np
# Check to make sure inputs are of correct form
# x and y must be 1-D arrays and same size
x = np.array(x)
y = np.array(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y must be one dimensional")
if x.size != y.size:
raise ValueError("x and y must be the same length")
# nterms needs to be an integer between 3 and 6. So, check if integer and check range.
nterms = int(nterms)
if not 3 <= nterms <= 6:
raise ValueError(
("nterms must be between 3 and 6; value given: {0}"
).format(nterms)
)
# min_bounds and max_bounds need to be nterms long if specified else give
# them infinite range
if min_bounds is not None:
if len(min_bounds) != nterms:
raise ValueError("min_bounds must have nterms number of elements")
else:
min_bounds = np.repeat(-np.inf, nterms)
if max_bounds is not None:
if len(max_bounds) != nterms:
raise ValueError("max_bounds must have nterms number of elements")
else:
max_bounds = np.repeat(np.inf, nterms)
# min_bounds must be smaller or equal to max_bounds
if any(min_bounds > max_bounds):
raise ValueError("max_bounds must be larger than min_bounds")
# For a Gaussian with a polynomial, subtract off a constant or line
# to get good initial estimate. Use a constant if only a constant
# term is used, and a | |
en-ca)',
'Mozilla/4.0 (compatible; WebCapture 3.0; Macintosh)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5) ',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.8.1.8) Gecko/20071004 Iceweasel/2.0.0.8 (Debian-2.0.0.6+2.0.0.8-Oetch1)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; {1C69E7AA-C14E-200E-5A77-8EAB2D667A07})',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; acc=baadshah; acc=none; freenet DSL 1.1; (none))',
'Mozilla/4.0 (compatible; MSIE 5.5; Windows 98)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 8.51',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; Alexa Toolbar; mxie; .NET CLR 1.1.4322)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; ja-jp) AppleWebKit/417.9 (KHTML, like Gecko) Safari/417.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; de-de; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1 (.NET CLR 3.0.04506.648)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727; .NET4.0C; .NET4.0E',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14912/812; U; ru) Presto/2.4.15',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.57',
'Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95_8GB/31.0.015; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.8.0.5) Gecko/20060706 K-Meleon/1.0',
'Lynx/2.8.6rel.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8g',
'Mozilla/4.76 [en] (PalmOS; U; WebPro/3.0.1a; Palm-Arz1)',
'Mozilla/5.0 (Macintosh; U; PPC Mac OS X; de-de) AppleWebKit/418 (KHTML, like Gecko) Shiira/1.2.2 Safari/125',
'Mozilla/5.0 (X11; U; Linux i686 (x86_64); en-US; rv:1.8.1.6) Gecko/2007072300 Iceweasel/2.0.0.6 (Debian-2.0.0.6-0etch1+lenny1)',
'Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 3.5.30729; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Links (2.2; GNU/kFreeBSD 6.3-1-486 i686; 80x25)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; WOW64; Trident/4.0; SLCC1)',
'Mozilla/1.22 (compatible; Konqueror/4.3; Linux) KHTML/4.3.5 (like Gecko)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.5)',
'Opera/9.80 (Macintosh; U; de-de) Presto/2.8.131 Version/11.10',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.9) Gecko/20100318 Mandriva/2.0.4-69.1mib2010.0 SeaMonkey/2.0.4',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP) Gecko/20060706 IEMobile/7.0',
'Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10',
'Mozilla/5.0 (Macintosh; I; Intel Mac OS X 10_6_7; ru-ru)',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)',
'Mozilla/1.22 (compatible; MSIE 6.0; Windows NT 6.1; Trident/4.0; GTB6; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; OfficeLiveConnector.1.4; OfficeLivePatch.1.3)',
'Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)',
'Mozilla/4.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.205 Safari/534.16',
'Mozilla/1.22 (X11; U; Linux x86_64; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.0.30729; InfoPath.2)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'Mozilla/5.0 (compatible; MSIE 2.0; Windows CE; IEMobile 7.0)',
'Mozilla/4.0 (Macintosh; U; PPC Mac OS X; en-US)',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en; rv:1.9.1.7) Gecko/20091221 Firefox/3.5.7',
'BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0',
'Mozilla/1.22 (compatible; MSIE 2.0; Windows 3.1)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; Avant Browser [avantbrowser.com]; iOpus-I-M; QXW03416; .NET CLR 1.1.4322)',
'Mozilla/3.0 (Windows NT 6.1; ru-ru; rv:1.9.1.3.) Win32; x86 Firefox/3.5.3 (.NET CLR 2.0.50727)',
'Opera/7.0 (compatible; MSIE 2.0; Windows 3.1)',
'Opera/9.80 (Windows NT 5.1; U; en-US) Presto/2.8.131 Version/11.10',
'Mozilla/4.0 (compatible; MSIE 6.0; America Online Browser 1.1; rev1.5; Windows NT 5.1;)',
'Mozilla/5.0 (Windows; U; Windows CE 4.21; rv:1.8b4) Gecko/20050720 Minimo/0.007',
'BlackBerry9000/5.0.0.93 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/179',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; .NET CLR 1.1.4322)',
'Googlebot/2.1 (http://www.googlebot.com/bot.html)', 'Opera/9.20 (Windows NT 6.0; U; en)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.1) Gecko/20061205 Iceweasel/2.0.0.1 (Debian-2.0.0.1+dfsg-2)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; FDM; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 1.1.4322)',
'Opera/10.00 (X11; Linux i686; U; en) Presto/2.2.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; he-IL) AppleWebKit/528.16 (KHTML, like Gecko) Version/4.0 Safari/528.16',
'Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.13) Gecko/20101209 Firefox/3.6.13',
'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 5.1; Trident/5.0)',
'Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)', 'Mozilla/4.0 (compatible; MSIE 6.0b; Windows 98)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; ru; rv:1.9.2.3) Gecko/20100401 Firefox/4.0 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.8) Gecko/20100804 Gentoo Firefox/3.6.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100809 Fedora/3.6.7-1.fc14 Firefox/3.6.7',
'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
'Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)',
'YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; http://help.yahoo.com/help/us/shop/merchant/)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)',
'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)',
'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)',
'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51',
'AppEngine-Google; (+http://code.google.com/appengine; appid: webetrex)',
'Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.27; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.21; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; GTB7.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)',
'Mozilla/4.0 (compatible; MSIE 8.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C; .NET4.0E)',
'Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.7; AOLBuild 4343.19; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648; .NET CLR 3.0.4506.2152; .NET CLR | |
-> Sequence[str]:
"""
A list of pool IDs in failover priority to use for traffic reaching the given PoP.
"""
return pulumi.get(self, "pool_ids")
@property
@pulumi.getter
def region(self) -> str:
"""
A region code which must be in the list defined [here](https://support.cloudflare.com/hc/en-us/articles/115000540888-Load-Balancing-Geographic-Regions). Multiple entries should not be specified with the same region.
"""
return pulumi.get(self, "region")
@pulumi.output_type
class NotificationPolicyEmailIntegration(dict):
def __init__(__self__, *,
id: str,
name: Optional[str] = None):
"""
:param str name: The name of the notification policy.
"""
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the notification policy.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class NotificationPolicyPagerdutyIntegration(dict):
def __init__(__self__, *,
id: str,
name: Optional[str] = None):
"""
:param str name: The name of the notification policy.
"""
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the notification policy.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class NotificationPolicyWebhooksIntegration(dict):
def __init__(__self__, *,
id: str,
name: Optional[str] = None):
"""
:param str name: The name of the notification policy.
"""
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the notification policy.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class PageRuleActions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "alwaysOnline":
suggest = "always_online"
elif key == "alwaysUseHttps":
suggest = "always_use_https"
elif key == "automaticHttpsRewrites":
suggest = "automatic_https_rewrites"
elif key == "browserCacheTtl":
suggest = "browser_cache_ttl"
elif key == "browserCheck":
suggest = "browser_check"
elif key == "bypassCacheOnCookie":
suggest = "bypass_cache_on_cookie"
elif key == "cacheByDeviceType":
suggest = "cache_by_device_type"
elif key == "cacheDeceptionArmor":
suggest = "cache_deception_armor"
elif key == "cacheKeyFields":
suggest = "cache_key_fields"
elif key == "cacheLevel":
suggest = "cache_level"
elif key == "cacheOnCookie":
suggest = "cache_on_cookie"
elif key == "cacheTtlByStatuses":
suggest = "cache_ttl_by_statuses"
elif key == "disableApps":
suggest = "disable_apps"
elif key == "disablePerformance":
suggest = "disable_performance"
elif key == "disableRailgun":
suggest = "disable_railgun"
elif key == "disableSecurity":
suggest = "disable_security"
elif key == "edgeCacheTtl":
suggest = "edge_cache_ttl"
elif key == "emailObfuscation":
suggest = "email_obfuscation"
elif key == "explicitCacheControl":
suggest = "explicit_cache_control"
elif key == "forwardingUrl":
suggest = "forwarding_url"
elif key == "hostHeaderOverride":
suggest = "host_header_override"
elif key == "ipGeolocation":
suggest = "ip_geolocation"
elif key == "opportunisticEncryption":
suggest = "opportunistic_encryption"
elif key == "originErrorPagePassThru":
suggest = "origin_error_page_pass_thru"
elif key == "resolveOverride":
suggest = "resolve_override"
elif key == "respectStrongEtag":
suggest = "respect_strong_etag"
elif key == "responseBuffering":
suggest = "response_buffering"
elif key == "rocketLoader":
suggest = "rocket_loader"
elif key == "securityLevel":
suggest = "security_level"
elif key == "serverSideExclude":
suggest = "server_side_exclude"
elif key == "sortQueryStringForCache":
suggest = "sort_query_string_for_cache"
elif key == "trueClientIpHeader":
suggest = "true_client_ip_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PageRuleActions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PageRuleActions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PageRuleActions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
always_online: Optional[str] = None,
always_use_https: Optional[bool] = None,
automatic_https_rewrites: Optional[str] = None,
browser_cache_ttl: Optional[str] = None,
browser_check: Optional[str] = None,
bypass_cache_on_cookie: Optional[str] = None,
cache_by_device_type: Optional[str] = None,
cache_deception_armor: Optional[str] = None,
cache_key_fields: Optional['outputs.PageRuleActionsCacheKeyFields'] = None,
cache_level: Optional[str] = None,
cache_on_cookie: Optional[str] = None,
cache_ttl_by_statuses: Optional[Sequence['outputs.PageRuleActionsCacheTtlByStatus']] = None,
disable_apps: Optional[bool] = None,
disable_performance: Optional[bool] = None,
disable_railgun: Optional[bool] = None,
disable_security: Optional[bool] = None,
edge_cache_ttl: Optional[int] = None,
email_obfuscation: Optional[str] = None,
explicit_cache_control: Optional[str] = None,
forwarding_url: Optional['outputs.PageRuleActionsForwardingUrl'] = None,
host_header_override: Optional[str] = None,
ip_geolocation: Optional[str] = None,
minifies: Optional[Sequence['outputs.PageRuleActionsMinify']] = None,
mirage: Optional[str] = None,
opportunistic_encryption: Optional[str] = None,
origin_error_page_pass_thru: Optional[str] = None,
polish: Optional[str] = None,
resolve_override: Optional[str] = None,
respect_strong_etag: Optional[str] = None,
response_buffering: Optional[str] = None,
rocket_loader: Optional[str] = None,
security_level: Optional[str] = None,
server_side_exclude: Optional[str] = None,
sort_query_string_for_cache: Optional[str] = None,
ssl: Optional[str] = None,
true_client_ip_header: Optional[str] = None,
waf: Optional[str] = None):
"""
:param str always_online: Whether this action is `"on"` or `"off"`.
:param bool always_use_https: Boolean of whether this action is enabled. Default: false.
:param str automatic_https_rewrites: Whether this action is `"on"` or `"off"`.
:param str browser_cache_ttl: The Time To Live for the browser cache. `0` means 'Respect Existing Headers'
:param str browser_check: Whether this action is `"on"` or `"off"`.
:param str bypass_cache_on_cookie: String value of cookie name to conditionally bypass cache the page.
:param str cache_by_device_type: Whether this action is `"on"` or `"off"`.
:param str cache_deception_armor: Whether this action is `"on"` or `"off"`.
:param 'PageRuleActionsCacheKeyFieldsArgs' cache_key_fields: Controls how Cloudflare creates Cache Keys used to identify files in cache. See below for full description.
:param str cache_level: Whether to set the cache level to `"bypass"`, `"basic"`, `"simplified"`, `"aggressive"`, or `"cache_everything"`.
:param str cache_on_cookie: String value of cookie name to conditionally cache the page.
:param Sequence['PageRuleActionsCacheTtlByStatusArgs'] cache_ttl_by_statuses: Set cache TTL based on the response status from the origin web server. Can be specified multiple times. See below for full description.
:param bool disable_apps: Boolean of whether this action is enabled. Default: false.
:param bool disable_performance: Boolean of whether this action is enabled. Default: false.
:param bool disable_railgun: Boolean of whether this action is enabled. Default: false.
:param bool disable_security: Boolean of whether this action is enabled. Default: false.
:param int edge_cache_ttl: The Time To Live for the edge cache.
:param str email_obfuscation: Whether this action is `"on"` or `"off"`.
:param str explicit_cache_control: Whether origin Cache-Control action is `"on"` or `"off"`.
:param 'PageRuleActionsForwardingUrlArgs' forwarding_url: The URL to forward to, and with what status. See below.
:param str host_header_override: Value of the Host header to send.
:param str ip_geolocation: Whether this action is `"on"` or `"off"`.
:param Sequence['PageRuleActionsMinifyArgs'] minifies: The configuration for HTML, CSS and JS minification. See below for full list of options.
:param str mirage: Whether this action is `"on"` or `"off"`.
:param str opportunistic_encryption: Whether this action is `"on"` or `"off"`.
:param str origin_error_page_pass_thru: Whether this action is `"on"` or `"off"`.
:param str polish: Whether this action is `"off"`, `"lossless"` or `"lossy"`.
:param str resolve_override: Overridden origin server name.
:param str respect_strong_etag: Whether this action is `"on"` or `"off"`.
:param str response_buffering: Whether this action is `"on"` or `"off"`.
:param str rocket_loader: Whether to set the rocket loader to `"on"`, `"off"`.
:param str security_level: Whether to set the security level to `"off"`, `"essentially_off"`, `"low"`, `"medium"`, `"high"`, or `"under_attack"`.
:param str server_side_exclude: Whether this action is `"on"` or `"off"`.
:param str sort_query_string_for_cache: Whether this action is `"on"` or `"off"`.
:param str ssl: Whether to set the SSL mode to `"off"`, `"flexible"`, `"full"`, `"strict"`, or `"origin_pull"`.
:param str true_client_ip_header: Whether this action is `"on"` or `"off"`.
:param str waf: Whether this action is `"on"` or `"off"`.
"""
if always_online is not None:
pulumi.set(__self__, "always_online", always_online)
if always_use_https is not None:
pulumi.set(__self__, "always_use_https", always_use_https)
if automatic_https_rewrites is not None:
pulumi.set(__self__, "automatic_https_rewrites", automatic_https_rewrites)
if browser_cache_ttl is not None:
pulumi.set(__self__, "browser_cache_ttl", browser_cache_ttl)
if browser_check is not None:
pulumi.set(__self__, "browser_check", browser_check)
if bypass_cache_on_cookie is not None:
pulumi.set(__self__, "bypass_cache_on_cookie", bypass_cache_on_cookie)
if cache_by_device_type is not None:
pulumi.set(__self__, "cache_by_device_type", cache_by_device_type)
if cache_deception_armor is not None:
pulumi.set(__self__, "cache_deception_armor", cache_deception_armor)
if cache_key_fields is not None:
pulumi.set(__self__, "cache_key_fields", cache_key_fields)
if cache_level is not None:
pulumi.set(__self__, "cache_level", cache_level)
if cache_on_cookie is not None:
pulumi.set(__self__, "cache_on_cookie", cache_on_cookie)
if cache_ttl_by_statuses is not None:
pulumi.set(__self__, "cache_ttl_by_statuses", cache_ttl_by_statuses)
if disable_apps is not None:
pulumi.set(__self__, "disable_apps", disable_apps)
if disable_performance is not None:
pulumi.set(__self__, "disable_performance", disable_performance)
if disable_railgun is not None:
pulumi.set(__self__, "disable_railgun", disable_railgun)
if disable_security is not None:
pulumi.set(__self__, "disable_security", disable_security)
if edge_cache_ttl is not None:
pulumi.set(__self__, "edge_cache_ttl", edge_cache_ttl)
if email_obfuscation is not None:
pulumi.set(__self__, "email_obfuscation", email_obfuscation)
if explicit_cache_control is not None:
pulumi.set(__self__, "explicit_cache_control", explicit_cache_control)
if forwarding_url is not None:
pulumi.set(__self__, "forwarding_url", forwarding_url)
if host_header_override is not None:
pulumi.set(__self__, "host_header_override", host_header_override)
if ip_geolocation is not None:
pulumi.set(__self__, "ip_geolocation", | |
session or (username and password and dockey):
out("LOGIN ATTEMPT", "2")
sheet = ''
if (username and password and dockey):
gsp = gspread.login(username, password)
gdoc = gsp.open_by_key(dockey)
else:
if 'oa2' in session:
creds = Credentials(access_token=session['oa2'])
out("Credential object created.")
else:
out("Expired login.")
yield "Google Login expired. Log back in.", "Login under the \"burger button\" in the upper-right.", "", ""
yield "spinoff", "", "", ""
try:
gsp = gspread.authorize(creds)
except:
out("Login failed.")
yield "Google Login unsuccessful.", "", "", ""
yield "spinoff", "", "", ""
raise StopIteration
else:
out("Login successful.")
out("Opening Spreadsheet...")
yield("Opening Spreadsheet...", "", "", "")
stop = True
sheet = ''
for x in range(10):
yield lock
try:
gdoc = gsp.open_by_url(globs.PIPURL)
stop = False
break
except gspread.httpsession.HTTPError, e:
out("Login appeared successful, but rejected on document open attempt.")
yme = 'Please <a href="%s">Log In</a> again first.' % getLoginlink()
yield yme, "Login under the \"burger button\" in the upper-right.", "", ""
if session and 'loggedin' in session:
session.pop('loggedin', None)
if 'u' not in session and globs.PIPURL:
session['u'] = globs.PIPURL
Stop()
except gspread.exceptions.NoValidUrlKeyFound:
try:
gdoc = gsp.open("Pipulate")
stop = False
break
except gspread.httpsession.HTTPError, e:
pass
except:
yield "I see you're on a URL that is not a Google Spreadsheet. Would you like to grab links?", "", "", ""
yield "If so, just <a href='https://docs.google.com/spreadsheets/create' target='_new'>create</a> a new Spreadsheet, name it \"Pipulate\" and click Pipulate again.", "Google Spreadsheet Not Found.", "", ""
yield 'New to this odd but awesome approach? Watch the <a target="_blank" href="http://goo.gl/v71kw8">Demo</a> and read the <a target="_blank" href="http://goo.gl/p2zQa4">Docs</a>.', "", "", ""
Stop()
except gspread.exceptions.SpreadsheetNotFound:
yield "Please give the document a name to force first save.", "", "", ""
Stop()
except Exception as e:
yield dontgetfrustrated(x)
out("Retry login %s of %s" % (x, 10))
time.sleep(6)
if stop:
yield "spinoff", "", "", ""
yield badtuple
Stop()
# try:
# sheet = gdoc.id
# sheetlink = '<a target="_blank" href="https://docs.google.com/spreadsheets/d/%s/edit">Click here to open Pipulate Spreadsheet</a>.' % sheet
# yield sheetlink, "", "", ""
# except:
# pass
yield unlock
out("Google Spreadsheet successfully opened.")
if globs.PIPMODE == 'learn':
out("<script>alert('hit');</script>")
if globs.KEYWORDS and globs.KEYWORDS[:1] != '[' and globs.KEYWORDS[-1:] != ']':
# Keywords Tab
yield "Keyword Collection Detected", "Making Keywords Tab If Needed", "", ""
headers = ['Keyword', 'Source']
yield lock
offset = 0
newTab = False
try:
newTab = InitTab(gdoc, 'Keywords', headers)
except:
pass
if newTab:
offset = -1
yield unlock
ksheet = gdoc.worksheet("Keywords")
kcount = ksheet.row_count + offset
kwlist = globs.KEYWORDS.split(',')
kwrows = []
yme = "Collecting %s keywords." % len(kwlist)
yield yme, "Collecting keywords", "", ""
for kw in kwlist:
kwrows.append([kw.strip(), globs.PIPURL])
try:
InsertRows(ksheet, kwrows, kcount)
except:
pass
# _ _ _ _
# ___ ___| |_ _ _ _ __ ___| |__ ___ ___| |_ / |
# / __|/ _ \ __| | | | | '_ \ / __| '_ \ / _ \/ _ \ __| | |
# \__ \ __/ |_ | |_| | |_) | \__ \ | | | __/ __/ |_ | |
# |___/\___|\__| \__,_| .__/ |___/_| |_|\___|\___|\__| |_|
# |_|
# This is where special behavior like crawls get wedged in
anything = re.compile('.+')
initSheet1 = False
cell = None
try:
cell = gdoc.sheet1.find(anything)
except gspread.exceptions.CellNotFound:
# Questionmark replacement tab
initSheet1 = True
if initSheet1:
if globs.PIPMODE == 'clear':
pass
else:
try:
bothrows = sheetinitializer(globs.PIPMODE)
row1 = bothrows[0]
row2 = [bothrows[1]]
yield lock
try:
InitTab(gdoc, "sheet1", row1, row2)
except:
pass
yield unlock
except:
yme = "Action for %s not defined." % globs.PIPMODE
yield yme, "Action not defined.", "", ""
else:
anything = re.compile('.+')
if globs.PIPMODE == 'clear':
out("Clearing Tab 1...")
yield "Clearing Sheet 1. Use revision history if a mistake.", "Clearing Sheet 1", "", ""
try:
CellList = gdoc.sheet1.findall(anything)
for cell in CellList:
cell.value = ''
result = gdoc.sheet1.update_cells(CellList)
yield "Sheet1 Cleared.", "", "", ""
yield "spinoffsuccess", "", "", ""
Stop()
except:
out("Could not clear tap one.")
Stop()
yield "Checking Tabs: Sheet 1", "Then we check for tabs...", "", ""
# How To Tab
yield ", How To", "", "", ""
headers = ['Expand column. Hey, you did it! Good job so far.', 'Welcome to Pipulate!']
InitTab(gdoc, 'How To', headers, documentation())
# Config Tab
yield ", Config", "", "", ""
headers = ['NAME', 'VALUE']
config = []
config.append(['RepeatJobEvery','day'])
config.append(['MaxRowsPerHour','3'])
yield lock
try:
InitTab(gdoc, 'Config', headers, config)
except:
Stop()
yield unlock
# Scrapers Tab
yield ", Scrapers", "", "", ""
headers = ['name', 'type', 'pattern']
InitTab(gdoc, 'Scrapers', headers, scrapes())
sst = None
out("Loading Scrapers.")
stop = True
for x in range(5):
yield lock
try:
sst = gdoc.worksheet("Scrapers")
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry get Scraper sheet %s of %s" % (x, 5))
time.sleep(3)
if stop:
yield badtuple
Stop()
yield unlock
try:
out("Reading Config tab into globals.")
globs.config = RefreshConfig(gdoc, "Config") #HTTPError
except:
out("Copying Config tag to globals failed.")
else:
out("Config tab copied to globals.")
out("Counting rows in Pipulate tab.")
stop = True
for x in range(5):
yield lock
try:
globs.sheet = gdoc.sheet1
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry get Pipulate sheet %s of %s" % (x, 10))
time.sleep(5)
if stop:
yield badtuple
Stop()
yield unlock
stop = True
for x in range(5):
yield lock
try:
CellList = globs.sheet.findall("?")
for cell in CellList:
qset.add(cell.row)
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry get rows with question marks %s of %s" % (x, 10))
time.sleep(5)
if stop:
yield badtuple
Stop()
yield unlock
stop = True
for x in range(10):
yield lock
try:
globs.numrows = len(globs.sheet.col_values(1)) #!!!UnboundLocalError HTTPError OPTIMIZE!
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry count rows %s of %s" % (x, 10))
time.sleep(10)
if stop == True:
yield badtuple
Stop()
yield unlock
yme = "%s rows found in Pipulate tab." % globs.numrows
out(yme)
yield yme, "", "", ""
if globs.numrows == 0:
yield "spinoff", "", "", ""
Stop()
stop = True
for x in range(5):
try:
lod = sst.get_all_records() #Returns list of dictionaries
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry count rows %s of %s" % (x, 10))
time.sleep(10)
if stop == True:
yield badtuple
Stop()
yield unlock
pat = [[d['pattern']][0] for d in lod]
typ = [[d['type']][0] for d in lod]
nam = [[d['name']][0] for d in lod]
scrapetypes = ziplckey(nam, typ)
scrapepatterns = ziplckey(nam, pat)
transscrape = ziplckey(nam, nam)
out("Scrapers loaded.")
yield "Analyzing spreadsheet for request...", "Reading spreadsheet...", "", ""
out("Loading row1 into globals.")
stop = True
for x in range(10):
yield lock
try:
globs.row1 = lowercaselist(globs.sheet.row_values(1))
stop = False
break
except:
yield dontgetfrustrated(x)
out("Retry load Row1 %s of %s" % (x, 10))
time.sleep(5)
if stop:
yield badtuple
Stop()
yield unlock
trendlistoflists = []
out("Scanning row 1 for function and scraper names.")
fargs = {}
for coldex2, fname in enumerate(globs.row1):
try:
fname = fname.lower()
except:
pass
if fname in transfuncs.keys():
out("Found function %s in row 1." % fname)
fargs[coldex2] = {}
from inspect import getargspec
argspec = getargspec(eval(fname))
if argspec:
out("%s has arguments." % (fname))
myargs = argspec[0]
mydefs = argspec[3]
offset = 0
if mydefs:
out("%s has defaults," % (fname))
offset = len(myargs) - len(mydefs)
if offset:
for i in range(0, offset-1):
fargs[coldex2][myargs[i]] = None
for i in range(offset, len(myargs)):
fargs[coldex2][myargs[i]] = mydefs[offset-i]
else:
out("%s has no defaults." % (fname))
for anarg in myargs:
fargs[coldex2][anarg] = None
for argdex, anarg in enumerate(myargs): #For each argument of function
fargs[coldex2][anarg] = None
# _ _ _
# __ _ ___| |_ ___ _ __(_)___| | _____
# / _` / __| __/ _ \ '__| / __| |/ / __|
# | (_| \__ \ || __/ | | \__ \ <\__ \
# \__,_|___/\__\___|_| |_|___/_|\_\___/
#
trended = False
out("Scan down Pipulate tab looking for asterisks.", "2")
for rowdex in range(1, globs.numrows+1):
out("Scanning row %s for asterisks." % rowdex) #This can have a pretty long delay
stop = True
for x in range(8):
yield lock
try:
onerow = globs.sheet.row_values(rowdex) #!!! | |
book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent_noCode(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats_300 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin2.append(df_sub_book_feats_300)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, | |
<filename>update_leds.py<gh_stars>1-10
#!/usr/bin/python3
"""
# update_leds.py
# Moved all of the airport specific data / metar analysis functions to update_airport.py
# This module creates a class updateLEDs that is specifically focused around
# managing a string of LEDs.
#
# All of the functions to initialise, manipulate, wipe, change the LEDs are
# being included here.
#
# This also includes the wipe patterns from wipes-v4.py
#
# As this transition completes, all older code will be removed from here, so that the focus is only
# on managing an LED self.strip
#
# metar-v4.py - by <NAME>. Capable of displaying METAR data, TAF or MOS data. Using a rotary switch to select 1 of 12 positions
# Updated to run under Python 3.7
# Added Sleep Timer routine to turn-off map at night if desired.
# Added the ability to display TAF or MOS data along with METAR's
# Note: MOS data is only available for United States, Puerto Rico, and the U.S. Virgin Islands.
# The timeframe of the TAF, MOS data to display can be selected via the rotary switch. A switch with up to 12 positions can be used.
# If no Rotary Switch is used, this script's config will set default data to display.
# Added routine by by <NAME> to decode flight category if flight category is not provided by the FAA.
# Fixed bug that wouldn't allow the last airport to be 'NULL' without causing all LED's to show white.
# Added auto restart when config.py is changed, so settings will be automatically re-loaded.
# Added internet availability check and retry if necessary. This should help when power is disrupted and board reboots before router does.
# Added Logging capabilities which is stored in /NeoSectional/logs/logfile.log with 3 backup files for older logfile data.
# Added ability to specify specific LED pins to reverse the normal rgb_grb setting. For mixing models of LED strings.
# Added a Heat Map of what airports the user has landed at. Not available through Rotary switch. Only Web interface.
# Added new wipes, some based on lat/lon of airports
# Fixed bug where wipes would execute twice on map startup.
# Added admin.py for behinds the scenes variables to be stored. i.e. use_mos=1 to determine if bash files should or should not download MOS data.
# Added ability to detect a Rotary Switch is NOT installed and react accordingly.
# Added logging of Current RPI IP address whenever FAA weather update is retrieved
# Fixed bug where TAF XML reports OVC without a cloud level agl. It uses vert_vis_ft as a backup.
# Fixed bug when debug mode is changed to 'Debug'.
# Switch Version control over to Github at https://github.com/markyharris/livesectional
# Fixed METAR Decode routine to handle FAA results that don't include flight_category and forecast fields.
# Added routine to check time and reboot each night if setting in admin.py are set accordingly.
# Fixed bug that missed lowest sky_condition altitude on METARs not reporting flight categories.
"""
# This version retains the features included in metar-v3.py, including hi-wind blinking and lightning when thunderstorms are reported.
# However, this version adds representations for snow, rain, freezing rain, dust sand ash, and fog when reported in the metar.
# The LED's will show the appropriate color for the reported flight category (vfr, mvfr, ifr, lifr) then blink a specific color for the weather
# For instance, an airport reporting IFR with snow would display Red then blink white for a short period to denote snow. Blue for rain,
# purple for freezing rain, brown for dust sand ash, and silver for fog. This makes for a colorful map when weather is in the area.
# A home airport feature has been added as well. When enabled, the map can be dimmed in relation to the home airport as well as
# have the home alternate between weather color and a user defined marker color(s).
# Most of these features can be disabled to downgrade the map display in the user-defined variables below.
# For detailed instructions on building an Aviation Map, visit http://www.livesectional.com
# Hardware features are further explained on this site as well. However, this software allows for a power-on/update weather switch,
# and Power-off/Reboot switch. The use of a display is handled by metar-display.py and not this script.
# Flight Category Definitions. (https://www.aviationweather.gov/taf/help?page=plot)
# +--------------------------------------+---------------+-------------------------------+-------+----------------------------+
# |Category |Color |Ceiling | |Visibility |
# |--------------------------------------+---------------+-------------------------------+-------+----------------------------+
# |VFR Visual Flight Rules |Green |greater than 3,000 feet AGL |and |greater than 5 miles |
# |MVFR Marginal Visual Flight Rules |Blue |1,000 to 3,000 feet AGL |and/or |3 to 5 miles |
# |IFR Instrument Flight Rules |Red |500 to below 1,000 feet AGL |and/or |1 mile to less than 3 miles |
# |LIFR Low Instrument Flight Rules |Magenta | below 500 feet AGL |and-or |less than 1 mile |
# +--------------------------------------+---------------+-------------------------------+-------+----------------------------+
# AGL = Above Ground Level
# RPI GPIO Pinouts reference
###########################
# 3V3 (1) (2) 5V #
# GPIO2 (3) (4) 5V #
# GPIO3 (5) (6) GND #
# GPIO4 (7) (8) GPIO14 #
# GND (9) (10) GPIO15 #
# GPIO17 (11) (12) GPIO18 #
# GPIO27 (13) (14) GND #
# GPIO22 (15) (16) GPIO23 #
# 3V3 (17) (18) GPIO24 #
# GPIO10 (19) (20) GND #
# GPIO9 (21) (22) GPIO25 #
# GPIO11 (23) (24) GPIO8 #
# GND (25) (26) GPIO7 #
# GPIO0 (27) (28) GPIO1 #
# GPIO5 (29) (30) GND #
# GPIO6 (31) (32) GPIO12 #
# GPIO13 (33) (34) GND #
# GPIO19 (35) (36) GPIO16 #
# GPIO26 (37) (38) GPIO20 #
# GND (39) (40) GPIO21 #
###########################
# Import needed libraries
# Removing URL related actions from update_leds
# import urllib.request
# import urllib.error
# import urllib.parse
# import socket
# import xml.etree.ElementTree as ET
import time
from datetime import datetime
from datetime import timedelta
from datetime import time as time_
import sys
# import os
# from os.path import getmtime
import random
import collections
import re
import ast
import RPi.GPIO as GPIO
from rpi_ws281x import * # works with python 3.7. sudo pip3 install rpi_ws281x
# Moved logging activities to debugging.py
# import logging
# import logzero # had to manually install logzero. https://logzero.readthedocs.io/en/latest/
# from logzero import logger
# import config # Config.py holds user settings used by the various scripts
# import admin
import debugging
import utils
import colors
class UpdateLEDs:
""" Class to manage LSD self.strips """
def __init__(self, conf, airport_database):
# ****************************************************************************
# * User defined items to be set below - Make changes to config.py, not here *
# ****************************************************************************
self.conf = conf
self.airport_database = airport_database
# list of pins that need to reverse the rgb_grb setting. To accommodate two different models of LED's are used.
# self.rev_rgb_grb = self.conf.rev_rgb_grb # [] # ['1', '2', '3', '4', '5', '6', '7', '8']
# Specific Variables to default data to display if Rotary Switch is not installed.
# hour_to_display # Offset in HOURS to choose which TAF/MOS to display
self.hour_to_display = self.conf.get_int("rotaryswitch", "time_sw0")
# metar_taf_mos # 0 = Display TAF, 1 = Display METAR, 2 = Display MOS, 3 = Heat Map (Heat map not controlled by rotary switch)
self.metar_taf_mos = self.conf.get_int("rotaryswitch", "data_sw0")
# Set toggle_sw to an initial value that forces rotary switch to dictate data displayed
self.toggle_sw = -1
# MOS/TAF Config settings
# self.prob = self.conf.prob # probability threshhold in Percent to assume reported weather will be displayed on map or not. MOS Only.
# Heat Map settings
# self.bin_grad = self.conf.bin_grad # 0 = Binary display, 1 = Gradient display
# self.fade_yesno = self.conf.fade_yesno # 0 = No, 1 = Yes, if using gradient display, fade in/out the home airport color. will override use_homeap.
# self.use_homeap = self.conf.use_homeap # 0 = No, 1 = Yes, Use a separate color to denote home airport.
# delay in fading the home airport if used
self.fade_delay = conf.get_float("rotaryswitch", "fade_delay")
# MOS Config settings
# self.prob = self.conf.prob # probability threshhold in Percent to assume reported weather will be displayed on map or not.
# Specific settings for on/off timer. Used to turn off LED's at night if desired.
# Verify Raspberry Pi is set to the correct time zone, otherwise the timer will be off.
# self.usetimer = self.conf.usetimer # 0 = No, 1 = Yes. Turn the timer on or |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.