input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
time):
return len([1 for elem in self._infections_dict.values() if elem.get(CONTRACTION_TIME, np.inf) <= time])
def mean_day_increase_until(self, time):
mean_increase = 0.0
i = 0
for k, v in self._per_day_increases.items():
if k <= time:
mean_increase = (mean_increase * i + v) / (i + 1)
return mean_increase
def detected_cases(self, df_r1):
cond1 = ~df_r1.tdetection.isna()
cond2a = ~df_r1.trecovery.isna()
cond2b = df_r1.tdetection > df_r1.trecovery
cond2 = ~np.logical_and(cond2a, cond2b)
if len(df_r1[~df_r1.tdeath.isna()]) > 0:
cond3a = ~df_r1.tdeath.isna()
cond3b = df_r1.tdetection > df_r1.tdeath
cond3 = ~np.logical_and(cond3a, cond3b)
cond23 = np.logical_and(cond2, cond3)
else:
cond23 = cond2
cond = np.logical_and(cond1, cond23)
df = df_r1[cond]
detected_cases = df.sort_values(by='tdetection').tdetection
return detected_cases
@staticmethod
def store_parameter(simulation_output_dir, parameter, filename):
save_path = os.path.join(simulation_output_dir, filename)
with open(save_path, 'wb') as f:
pickle.dump(parameter, f)
def _save_population_parameters(self, simulation_output_dir):
run_id = f'{int(time.monotonic() * 1e9)}_{self._params[RANDOM_SEED]}'
if self._params[SAVE_EXPECTED_SEVERITY]:
self.store_parameter(simulation_output_dir, self._expected_case_severity, 'expected_case_severity.pkl')
self.store_parameter(simulation_output_dir, self._infection_status, 'infection_status.pkl')
self.store_parameter(simulation_output_dir, self._detection_status, 'detection_status.pkl')
self.store_parameter(simulation_output_dir, self._quarantine_status, 'quarantine_status.pkl')
def _save_dir(self, prefix=''):
underscore_if_prefix = '_' if len(prefix) > 0 else ''
json_name = os.path.splitext(os.path.basename(self.params_path))[0]
run_id = f'{prefix}{underscore_if_prefix}{json_name}_{int(time.monotonic() * 1e9)}_{self._params[RANDOM_SEED]}'
simulation_output_dir = os.path.join(self._params[OUTPUT_ROOT_DIR],
self._params[EXPERIMENT_ID],
run_id)
os.makedirs(simulation_output_dir)
return simulation_output_dir
def save_serial_interval(self, simulation_output_dir):
if len(self.serial_intervals) == 0:
return np.nan
np_intervals = np.array(self.serial_intervals)
serial_interval_median = np.median(np_intervals)
description = scipy.stats.describe(np_intervals)
serial_interval_str = f'serial interval: measured from {self._params[SERIAL_INTERVAL][MIN_TIME]}'\
f' to {self._params[SERIAL_INTERVAL][MAX_TIME]};'\
f' median={serial_interval_median}, stats describe: {description}'
logger.info(serial_interval_str)
np.save(os.path.join(simulation_output_dir, 'serial_intervals.npy'), np_intervals)
output_log_file = os.path.join(simulation_output_dir, 'serial_interval_stats.txt')
with open(output_log_file, "w") as out:
out.write(serial_interval_str)
return serial_interval_median
def log_outputs(self, simulation_output_dir):
self._save_population_parameters(simulation_output_dir)
copyfile(self.params_path, os.path.join(simulation_output_dir,
f'input_{os.path.basename(self.params_path)}'))
if self._params[SAVE_INPUT_DATA]:
copyfile(self.df_individuals_path, os.path.join(simulation_output_dir,
f'input_{os.path.basename(self.df_individuals_path)}'))
household_input_path = os.path.join(self._params[OUTPUT_ROOT_DIR], self._params[EXPERIMENT_ID],
'input_df_households.csv')
if not os.path.exists(household_input_path):
self._df_households.to_csv(household_input_path)
repo = Repo(config.ROOT_DIR)
git_active_branch_log = os.path.join(simulation_output_dir, 'git_active_branch_log.txt')
with open(git_active_branch_log, 'w') as f:
f.write(f'Active branch name {repo.active_branch.name}\n')
f.write(str(repo.active_branch.log()))
git_status = os.path.join(simulation_output_dir, 'git_status.txt')
with open(git_status, 'w') as f:
f.write(repo.git.status())
serial_interval = self.save_serial_interval(simulation_output_dir)
if self._params[ENABLE_VISUALIZATION]:
self._vis.visualize_simulation(simulation_output_dir, serial_interval, self.fear,
self.active_people, self._max_time_offset, self.detected_cases,
self.df_progression_times,
self.df_infections
)
def update_max_time_offset(self):
if self._params[MOVE_ZERO_TIME_ACCORDING_TO_DETECTED]:
if self._max_time_offset == np.inf:
if self._params[NUMBER_OF_DETECTED_AT_ZERO_TIME] <= self._detected_people:
self._max_time_offset = self._global_time
self._init_for_stats = self._active_people
def quick_return_condition(self, initiated_through):
""" Checks if event of type 'initiated_through' should be abandoned given current situation """
if initiated_through == HOUSEHOLD:
return False
r = mocos_helper.rand()
if initiated_through == CONSTANT and len(self._params[R_OUT_SCHEDULE]) > 0:
t = self._global_time - self._max_time_offset
for s in self._params[R_OUT_SCHEDULE]:
if s[MIN_TIME] <= t <= s[MAX_TIME]:
if r > s[OVERRIDE_R_FRACTION]:
return True
else:
return False
if r > self.fear(initiated_through):
return True
return False
def add_new_infection(self, person_id, infection_status,
initiated_by, initiated_through):
self._detection_status[person_id] = DetectionStatus.NotDetected.value
self._infections_dict[len(self._infections_dict)] = {
SOURCE: initiated_by,
TARGET: person_id,
CONTRACTION_TIME: self.global_time,
KERNEL: initiated_through
}
if self.global_time >= self._params[SERIAL_INTERVAL][MIN_TIME]:
if self.global_time < self._params[SERIAL_INTERVAL][MAX_TIME]:
if initiated_by is not None:
serial_interval = self.global_time - self._progression_times_dict[initiated_by][TMINUS1]
self.serial_intervals.append(serial_interval)
self._affected_people += 1
self.generate_disease_progression(person_id,
self.global_time,
infection_status)
# 'Event', [TIME, PERSON_INDEX, TYPE, INITIATED_BY, INITIATED_THROUGH, ISSUED_TIME])
def process_event(self, event) -> bool:
type_ = getattr(event, TYPE)
time = getattr(event, TIME)
if int(time / self._params[LOG_TIME_FREQ]) != int(self._global_time / self._params[LOG_TIME_FREQ]):
memory_use = ps.memory_info().rss / 1024 / 1024
fearC = self.fear(CONSTANT)
fearH = self.fear(HOUSEHOLD)
per_day_increase = 0
if self._last_affected:
per_day_increase = (self.affected_people - self._last_affected)/self._last_affected*100
self._last_affected = self.affected_people
self._per_day_increases[int(self._global_time)] = per_day_increase
logger.info(f'Time: {time:.2f}'
f'\tAffected: {self.affected_people}'
f'\tDetected: {self.detected_people}'
f'\tQuarantined: {self.quarantined_people}'
f'\tPer-day-increase: {per_day_increase:.2f} %'
f'\tActive: {self.active_people}'
f'\tDeaths: {self.deaths}'
f'\tFearC: {fearC}'
f'\tFearH: {fearH}'
f'\tPhysical memory use: {memory_use:.2f} MB')
self._global_time = time
if self._global_time > self._max_time + self._max_time_offset:
return False
person_id = getattr(event, PERSON_INDEX)
initiated_by = getattr(event, INITIATED_BY)
initiated_through = getattr(event, INITIATED_THROUGH)
# TODO the remaining attribute will be useful when we will take into account for backtracing
# issued_time = getattr(event, ISSUED_TIME)
if initiated_by is None and initiated_through != DISEASE_PROGRESSION:
if self.get_infection_status(person_id) == InfectionStatus.Healthy:
if type_ == TMINUS1:
self.add_new_infection(person_id, InfectionStatus.Contraction.value,
initiated_by, initiated_through)
elif type_ == T0:
self.add_new_infection(person_id, InfectionStatus.Infectious.value,
initiated_by, initiated_through)
elif type_ == TMINUS1:
# check if this action is still valid first
try:
initiated_inf_status = self._infection_status[initiated_by]
except KeyError:
logging.error(f'infection status should not be blank for infection! key: {initiated_by}')
if initiated_inf_status in active_states:
if self.quick_return_condition(initiated_through):
return True
current_status = self.get_infection_status(person_id)
if current_status == InfectionStatus.Healthy:
new_infection = False
# TODO below is a spaghetti code that should be sorted out! SORRY!
if initiated_through != HOUSEHOLD:
if initiated_inf_status != InfectionStatus.StayHome:
new_infection = True
if self.get_quarantine_status_(initiated_by) == QuarantineStatus.Quarantine:
new_infection = False
if self.get_quarantine_status_(person_id) == QuarantineStatus.Quarantine:
new_infection = False
else: # HOUSEHOLD kernel:
new_infection = True
if new_infection:
self.add_new_infection(person_id, InfectionStatus.Contraction.value,
initiated_by, initiated_through)
elif type_ == T0:
if self.get_infection_status(person_id) == InfectionStatus.Contraction:
self.handle_t0(person_id)
elif type_ == T1:
if self.get_infection_status(person_id) == InfectionStatus.Infectious:
self._infection_status[person_id] = InfectionStatus.StayHome.value
elif type_ == T2:
if self.get_infection_status(person_id) in [
InfectionStatus.StayHome,
InfectionStatus.Infectious
]:
self._infection_status[person_id] = InfectionStatus.Hospital.value
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
self._icu_needed += 1
elif type_ == TDEATH:
if self.get_infection_status(person_id) not in [
InfectionStatus.Death,
InfectionStatus.Recovered
]:
self._deaths += 1
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
if self._progression_times_dict[person_id][T2] < self.global_time:
self._icu_needed -= 1
self._active_people -= 1
self._infection_status[person_id] = InfectionStatus.Death.value
elif type_ == TRECOVERY: # TRECOVERY is exclusive with regards to TDEATH (when this comment was added)
if self.get_infection_status(person_id) not in [
InfectionStatus.Recovered,
InfectionStatus.Death
]:
if initiated_through != INITIAL_CONDITIONS:
self._active_people -= 1
if self._expected_case_severity[person_id] == ExpectedCaseSeverity.Critical:
if self._progression_times_dict[person_id][T2] < self.global_time:
self._icu_needed -= 1
self._infection_status[person_id] = InfectionStatus.Recovered
self._immune_people += 1
elif type_ == TDETECTION:
if self.get_infection_status(person_id) not in [
InfectionStatus.Recovered,
InfectionStatus.Healthy
]:
if self.get_detection_status_(person_id) == DetectionStatus.NotDetected:
self._detection_status[person_id] = DetectionStatus.Detected.value
self._detected_people += 1
self.update_max_time_offset()
household_id = self._individuals_household_id[person_id]
for inhabitant in self._households_inhabitants[household_id]:
if self.get_quarantine_status_(inhabitant) == QuarantineStatus.NoQuarantine:
if self.get_infection_status(inhabitant) != InfectionStatus.Death:
self._quarantine_status[inhabitant] = QuarantineStatus.Quarantine.value
self._quarantined_people += 1
if inhabitant not in self._progression_times_dict:
self._progression_times_dict[inhabitant] = {}
self._progression_times_dict[inhabitant][QUARANTINE] = self.global_time
if self.get_infection_status(inhabitant) in [InfectionStatus.Infectious,
InfectionStatus.StayHome]:
# TODO: this has to be implemented better, just a temporary solution:
if self._progression_times_dict[inhabitant].get(TDETECTION, None) is None:
new_detection_time = self.global_time + 2.0
self._progression_times_dict[inhabitant][TDETECTION] = new_detection_time
ev = Event(new_detection_time, inhabitant, TDETECTION,
person_id, 'quarantine_followed_detection',
self.global_time)
self.append_event(ev)
else:
raise ValueError(f'unexpected status of event: {event}')
return True
def run_simulation(self):
def _inner_loop(iter):
threshold_type = self._params[STOP_SIMULATION_THRESHOLD_TYPE]
value_to_be_checked = None
start = time.time()
times_mean = 0.0
i = 0
while not q.empty():
event_start = time.time()
if threshold_type == PREVALENCE:
value_to_be_checked = self.affected_people
elif threshold_type == DETECTIONS:
value_to_be_checked = self.detected_people
if value_to_be_checked is None:
logging.error(f"we have an error here")
if value_to_be_checked >= self.stop_simulation_threshold:
logging.info(
f"The outbreak reached a high number {self.stop_simulation_threshold} ({threshold_type})")
break
event = q.get()
if not self.process_event(event):
logging.info(f"Processing event {event} returned False")
q.task_done()
break
q.task_done()
event_end = time.time()
elapsed = event_end - event_start
times_mean = ( times_mean * i + elapsed ) / (i + 1)
i += 1
end = time.time()
print(f'Sim runtime {end - start}, event proc. avg time: {times_mean}')
# cleaning up priority queue:
while not q.empty():
q.get_nowait()
q.task_done()
simulation_output_dir = self._save_dir()
self.save_progression_times(os.path.join(simulation_output_dir, 'output_df_progression_times.csv'))
self.save_potential_contractions(os.path.join(simulation_output_dir, 'output_df_potential_contractions.csv'))
if self._params[LOG_OUTPUTS]:
logger.info('Log outputs')
self.log_outputs(simulation_output_dir)
if self._icu_needed >= self._params[ICU_AVAILABILITY]:
return True
if value_to_be_checked >= self.stop_simulation_threshold:
return True
return False
seeds = None
if isinstance(self._params[RANDOM_SEED], str):
seeds = eval(self._params[RANDOM_SEED]) # TODO: warning, this is unsafe! not use in production
elif isinstance(self._params[RANDOM_SEED], int):
seeds = [self._params[RANDOM_SEED]]
runs = 0
output_log = 'Last_processed_time;Total_#Affected;Total_#Detected;Total_#Deceased;Total_#Quarantined;'\
'c;c_norm;Init_#people;Band_hit_time;Subcritical;runs;fear;detection_rate;'\
'incidents_per_last_day;over_icu;hospitalized;zero_time_offset;total_#immune'
if self._params[ENABLE_ADDITIONAL_LOGS]:
output_log += ';Prevalence_30days;Prevalence_60days;Prevalence_90days;Prevalence_120days;'\
'Prevalence_150days;Prevalence_180days;Prevalence_360days;'\
'increase_10;increase_20;increase_30;increase_40;increase_50;increase_100;increase_150'
output_log += '\n'
for i, seed in enumerate(seeds):
runs += 1
self.parse_random_seed(seed)
self.setup_simulation()
logger.info('Filling queue based on initial conditions...')
self._fill_queue_based_on_initial_conditions()
logger.info('Filling queue based on auxiliary functions...')
self._fill_queue_based_on_auxiliary_functions()
logger.info('Initialization step is done!')
outbreak = _inner_loop(i + 1)
last_processed_time = self._global_time
c = self._params[TRANSMISSION_PROBABILITIES][CONSTANT]
c_norm = c * self._params[AVERAGE_INFECTIVITY_TIME_CONSTANT_KERNEL]
subcritical = self._active_people < self._init_for_stats / 2 # at 200 days
bandtime = self.band_time
#if bandtime:
# return 0
fear_ = self.fear(CONSTANT)
detection_rate = self._params[DETECTION_MILD_PROBA]
affected = self.affected_people
detected = self.detected_people
deceased = self.deaths
quarantined = self.quarantined_people
incidents_per_last_day = self.prevalance_at(self._global_time) - self.prevalance_at(self._global_time - 1)
hospitalized = self._icu_needed
zero_time_offset = self._max_time_offset
immune = self._immune_people
output_add = f'{last_processed_time };{affected};{detected};{deceased};{quarantined};{c};{c_norm};'\
f'{self._init_for_stats};{bandtime};{subcritical};{runs};{fear_};{detection_rate};'\
f'{incidents_per_last_day};{outbreak};{hospitalized};{zero_time_offset};{immune}'
if self._params[ENABLE_ADDITIONAL_LOGS]:
prev30 = self.prevalance_at(30)
prev60 = self.prevalance_at(60)
prev90 = self.prevalance_at(90)
prev120 = self.prevalance_at(120)
prev150 = self.prevalance_at(150)
prev180 = self.prevalance_at(180)
prev360 = self.prevalance_at(360)
mean_increase_at_10 = self.mean_day_increase_until(10)
mean_increase_at_20 = self.mean_day_increase_until(20)
mean_increase_at_30 = self.mean_day_increase_until(30)
mean_increase_at_40 = self.mean_day_increase_until(40)
mean_increase_at_50 = self.mean_day_increase_until(50)
mean_increase_at_100 = self.mean_day_increase_until(100)
mean_increase_at_150 = self.mean_day_increase_until(150)
output_add += f'{prev30};{prev60};{prev90};{prev120};{prev150};{prev180};{prev360};'\
f'{mean_increase_at_10};{mean_increase_at_20};{mean_increase_at_30};'\
f'{mean_increase_at_40};{mean_increase_at_50};{mean_increase_at_100};'\
f'{mean_increase_at_150}'
output_add += '\n'
logger.info(output_add)
output_log = f'{output_log}{output_add}'
logger.info(output_log)
simulation_output_dir = self._save_dir('aggregated_results')
output_log_file = os.path.join(simulation_output_dir, 'results.txt')
if self._params[ENABLE_VISUALIZATION]:
self._vis.visualize_scenario(simulation_output_dir)
with open(output_log_file, "w") as out:
out.write(output_log)
def setup_simulation(self):
self._init_for_stats = 0 # TODO support different import methods
if isinstance(self._params[INITIAL_CONDITIONS], dict):
cardinalities = self._params[INITIAL_CONDITIONS][CARDINALITIES]
self._init_for_stats = cardinalities.get(CONTRACTION, 0) + cardinalities.get(INFECTIOUS, 0)
# TODO and think how to | |
commandName):
'''
Return the source legend of an @button/@command node.
'G' leoSettings.leo
'M' myLeoSettings.leo
'L' local .leo File
' ' not an @command or @button node
'''
c = ga.c
if commandName.startswith('@'):
d = c.commandsDict
func = d.get(commandName)
if hasattr(func, 'source_c'):
c2 = func.source_c
fn2 = c2.shortFileName().lower()
if fn2.endswith('myleosettings.leo'):
return 'M'
elif fn2.endswith('leosettings.leo'):
return 'G'
else:
return 'L'
else:
return '?'
else:
return ' '
#@-others
#@+node:ekr.20061031131434.74: ** class KeyHandlerClass
class KeyHandlerClass(object):
'''
A class to support emacs-style commands.
c.k is an instance of this class.
'''
#@+others
#@+node:ekr.20061031131434.75: *3* k.Birth
#@+node:ekr.20061031131434.76: *4* k.__init__& helpers
def __init__(self, c):
'''Create a key handler for c.'''
trace = (False or g.trace_startup) and not g.unitTesting
if trace: g.es_debug('(k)')
self.c = c
self.dispatchEvent = None
self.fnc = None
# A singleton defined in k.finishCreate.
self.getArgInstance = None
# A singleton defined in k.finishCreate.
self.inited = False
# Set at end of finishCreate.
self.killedBindings = []
# A list of commands whose bindings have been set to None in the local file.
self.swap_mac_keys = False
# How to init this??
self.w = None
# Note: will be None for NullGui.
# Generalize...
self.x_hasNumeric = ['sort-lines', 'sort-fields']
self.altX_prompt = 'full-command: '
# Access to data types defined in leoKeys.py
self.KeyStroke = g.KeyStroke
# Define all ivars...
self.defineExternallyVisibleIvars()
self.defineInternalIvars()
self.reloadSettings()
self.defineTkNames()
self.defineSpecialKeys()
self.defineSingleLineCommands()
self.defineMultiLineCommands()
self.autoCompleter = AutoCompleterClass(self)
self.qcompleter = None # Set by AutoCompleter.start.
self.setDefaultUnboundKeyAction()
self.setDefaultEditingAction()
#@+node:ekr.20061031131434.78: *5* k.defineExternallyVisibleIvars
def defineExternallyVisibleIvars(self):
self.abbrevOn = False
# True: abbreviations are on.
self.arg = ''
# The value returned by k.getArg.
self.argSelectedText = '' # The selected text in state 0.
self.commandName = None # The name of the command being executed.
self.funcReturn = None # For k.simulateCommand
self.functionTail = None # For commands that take minibuffer arguments.
# These are true globals
self.getArgEscapes = []
self.getArgEscapeFlag = False # A signal that the user escaped getArg in an unusual way.
self.givenArgs = [] # New in Leo 4.4.8: arguments specified after the command name in k.simulateCommand.
self.inputModeBindings = {}
self.inputModeName = '' # The name of the input mode, or None.
self.modePrompt = '' # The mode promopt.
self.negativeArg = False
self.newMinibufferWidget = None # Usually the minibuffer restores focus. This overrides this default.
# self.regx = g.bunch(iter=None,key=None)
self.repeatCount = None
self.state = g.bunch(kind=None, n=None, handler=None)
#@+node:ekr.20061031131434.79: *5* k.defineInternalIvars
def defineInternalIvars(self):
'''Define internal ivars of the KeyHandlerClass class.'''
self.abbreviationsDict = {}
# Abbreviations created by @alias nodes.
# Previously defined bindings...
self.bindingsDict = {}
# Keys are Tk key names, values are lists of ShortcutInfo's.
# Previously defined binding tags.
self.bindtagsDict = {}
# Keys are strings (the tag), values are 'True'
self.commandHistory = []
self.commandIndex = 0
# List/stack of previously executed commands.
# Up arrow will select commandHistory[commandIndex]
self.masterBindingsDict = {}
# Keys are scope names: 'all','text',etc. or mode names.
# Values are dicts: keys are strokes, values are ShortcutInfo's.
self.masterGuiBindingsDict = {}
# Keys are strokes; value is True;
# Special bindings for k.fullCommand...
self.mb_copyKey = None
self.mb_pasteKey = None
self.mb_cutKey = None
# Keys whose bindings are computed by initSpecialIvars...
self.abortAllModesKey = None
self.autoCompleteForceKey = None
self.demoNextKey = None # New support for the demo.py plugin.
self.demoPrevKey = None # New support for the demo.py plugin.
self.fullCommandKey = None
self.universalArgKey = None
# Used by k.masterKeyHandler...
self.stroke = None
self.mb_event = None
self.mb_history = []
self.mb_help = False
self.mb_helpHandler = None
# Important: these are defined in k.defineExternallyVisibleIvars...
# self.getArgEscapes = []
# self.getArgEscapeFlag
# For onIdleTime...
self.idleCount = 0
# For modes...
self.modeBindingsDict = {}
self.modeWidget = None
self.silentMode = False
#@+node:ekr.20080509064108.7: *5* k.defineMultiLineCommands
def defineMultiLineCommands(self):
k = self
k.multiLineCommandList = [
# EditCommandsClass
'add-space-to-lines',
'add-tab-to-lines',
'back-page',
'back-page-extend-selection',
'back-paragraph',
'back-paragraph-extend-selection',
'back-sentence',
'back-sentence-extend-selection',
'backward-kill-paragraph',
'beginning-of-buffer',
'beginning-of-buffer-extend-selection',
'center-line',
'center-region',
'clean-all-lines',
'clean-lines',
'downcase-region',
'end-of-buffer',
'end-of-buffer-extend-selection',
'extend-to-paragraph',
'extend-to-sentence',
'fill-paragraph',
'fill-region',
'fill-region-as-paragraph',
'flush-lines',
'forward-page',
'forward-page-extend-selection',
'forward-paragraph',
'forward-paragraph-extend-selection',
'forward-sentence',
'forward-sentence-extend-selection',
'indent-relative',
'indent-rigidly',
'indent-to-comment-column',
'move-lines-down',
'move-lines-up',
'next-line',
'next-line-extend-selection',
'previous-line',
'previous-line-extend-selection',
'remove-blank-lines',
'remove-space-from-lines',
'remove-tab-from-lines',
'reverse-region',
'reverse-sort-lines',
'reverse-sort-lines-ignoring-case',
'scroll-down-half-page',
'scroll-down-line',
'scroll-down-page',
'scroll-up-half-page',
'scroll-up-line',
'scroll-up-page',
'simulate-begin-drag',
'simulate-end-drag',
'sort-columns',
'sort-fields',
'sort-lines',
'sort-lines-ignoring-case',
'split-line',
'tabify',
'transpose-lines',
'untabify',
'upcase-region',
# KeyHandlerCommandsClass
'repeat-complex-command',
# KillBufferCommandsClass
'backward-kill-sentence',
'kill-sentence',
'kill-region',
'kill-region-save',
# QueryReplaceCommandsClass
'query-replace',
'query-replace-regex',
# RectangleCommandsClass
'clear-rectangle',
'close-rectangle',
'delete-rectangle',
'kill-rectangle',
'open-rectangle',
'string-rectangle',
'yank-rectangle',
# SearchCommandsClass
'change',
'change-then-find',
'find-next',
'find-prev',
]
#@+node:ekr.20080509064108.6: *5* k.defineSingleLineCommands
def defineSingleLineCommands(self):
k = self
# These commands can be executed in the minibuffer.
k.singleLineCommandList = [
# EditCommandsClass
'back-to-indentation',
'back-to-home', # 2010/02/01
'back-char',
'back-char-extend-selection',
'back-word',
'back-word-extend-selection',
'backward-delete-char',
'backward-find-character',
'backward-find-character-extend-selection',
'beginning-of-line',
'beginning-of-line-extend-selection',
'capitalize-word',
'delete-char',
'delete-indentation',
'delete-spaces',
'downcase-word',
'end-of-line',
'end-of-line-extend-selection',
'escape',
'exchange-point-mark',
'extend-to-line',
'extend-to-word',
'find-character',
'find-character-extend-selection',
'find-word',
'find-word-in-line',
'forward-char',
'forward-char-extend-selection',
'forward-end-word',
'forward-end-word-extend-selection',
'forward-word',
'forward-word-extend-selection',
'insert-newline',
'insert-parentheses',
'move-past-close',
'move-past-close-extend-selection',
'newline-and-indent',
'select-all',
'transpose-chars',
'transpose-words',
'upcase-word',
# KeyHandlerCommandsClass
# 'auto-complete',
# 'negative-argument',
# 'number-command',
# 'number-command-0',
# 'number-command-1',
# 'number-command-2',
# 'number-command-3',
# 'number-command-4',
# 'number-command-5',
# 'number-command-6',
# 'number-command-7',
# 'number-command-8',
# 'universal-argument',
# KillBufferCommandsClass
'backward-kill-word',
'kill-line',
'kill-word',
'kill-ws',
'yank',
'yank-pop',
'zap-to-character',
# leoCommands
'cut-text',
'copy-text',
'paste-text',
# MacroCommandsClass
'call-last-kbd-macro',
# search commands
# 'replace-string', # A special case so Shift-Ctrl-r will work after Ctrl-f.
'set-find-everywhere', # 2011/06/07
'set-find-node-only', # 2011/06/07
'set-find-suboutline-only', # 2011/06/07
'toggle-find-collapses_nodes',
'toggle-find-ignore-case-option',
'toggle-find-in-body-option',
'toggle-find-in-headline-option',
'toggle-find-mark-changes-option',
'toggle-find-mark-finds-option',
'toggle-find-regex-option',
'toggle-find-reverse-option',
'toggle-find-word-option',
'toggle-find-wrap-around-option',
]
#@+node:ekr.20070123085931: *5* k.defineSpecialKeys
def defineSpecialKeys(self):
'''Define k.guiBindNamesDict and k.guiBindNamesInverseDict.
Important: all gui's use these dictionaries because bindings in
leoSettings.leo use these representations.'''
k = self
# These are defined at http://tcl.activestate.com/man/tcl8.4/TkCmd/keysyms.htm.
# Important: only the inverse dict is actually used in the new key binding scheme.
# Tk may return the *values* of this dict in event.keysym fields.
# Leo will warn if it gets a event whose keysym not in values of this table.
k.guiBindNamesDict = {
"&": "ampersand",
"^": "asciicircum",
"~": "asciitilde",
"*": "asterisk",
"@": "at",
"\\": "backslash",
"|": "bar",
"{": "braceleft",
"}": "braceright",
"[": "bracketleft",
"]": "bracketright",
":": "colon", # removed from code.
",": "comma",
"$": "dollar",
"=": "equal",
"!": "exclam", # removed from code.
">": "greater",
"<": "less",
"-": "minus",
"#": "numbersign",
'"': "quotedbl",
"'": "quoteright",
"(": "parenleft",
")": "parenright", # removed from code.
"%": "percent",
".": "period", # removed from code.
"+": "plus",
"?": "question",
"`": "quoteleft",
";": "semicolon",
"/": "slash",
" ": "space", # removed from code.
"_": "underscore",
}
# No translation.
for s in k.tkNamesList:
k.guiBindNamesDict[s] = s
# Create the inverse dict.
k.guiBindNamesInverseDict = {}
for key in k.guiBindNamesDict:
k.guiBindNamesInverseDict[k.guiBindNamesDict.get(key)] = key
#@+node:ekr.20070123143428: *5* k.defineTkNames
def defineTkNames(self):
k = self
# These are the key names used in Leo's core *regardless* of the gui actually in effect.
# The gui is responsible for translating gui-dependent keycodes into these values.
k.tkNamesList = (
# Arrow keys.
'Left', 'Right', 'Up', 'Down',
# Page up/down keys.
'Next', 'Prior',
# Home end keys.
'Home', 'End'
# Modifier keys.
'Caps_Lock', 'Num_Lock',
# F-keys.
'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12',
# All others.
'Begin', 'Break', 'Clear', 'Delete', 'Escape',
# Dubious: these are ascii characters!
# But there is no harm in retaining these in Leo's core.
'BackSpace', 'Linefeed', 'Return', 'Tab',
)
# These keys settings that may be specied in leoSettings.leo.
# Keys are lowercase, so that case is not significant *for these items only* in leoSettings.leo.
k.settingsNameDict = {
'bksp': 'BackSpace', # Dubious: should be '\b'
'dnarrow': 'Down',
'esc': 'Escape',
'ltarrow': 'Left',
'pageup': 'Prior',
'pagedn': 'Next',
'rtarrow': 'Right',
'uparrow': 'Up',
}
# Add lowercase version of special keys.
for s in k.tkNamesList:
k.settingsNameDict[s.lower()] = s
#@+at
# The following are not translated, so what appears in the menu is the
# same as what is passed to the gui. Case is significant. Note: the Tk
# documentation states that not all of these may be available on all
# platforms.
#
# Num_Lock, Pause, Scroll_Lock, Sys_Req,
# KP_Add, KP_Decimal, KP_Divide, KP_Enter, KP_Equal,
# KP_Multiply, KP_Separator,KP_Space, KP_Subtract, KP_Tab,
# KP_F1,KP_F2,KP_F3,KP_F4,
# KP_0,KP_1,KP_2,KP_3,KP_4,KP_5,KP_6,KP_7,KP_8,KP_9,
# Insert
#@+node:ekr.20150509035028.1: *4* k.cmd (decorator)
def cmd(name):
'''Command decorator for the leoKeys class.'''
| |
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Parsers for algebraic expressions coming from UFO, outputting into
different languages/frameworks (Fortran and Pythia8). Uses the PLY 3.3
Lex + Yacc framework"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import logging
import numbers
import os
import re
import sys
from six.moves import input
from six.moves import range
root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0]
sys.path.append(os.path.join(root_path))
from . import aloha_lib
from .aloha_object import *
import vendor.ply.lex as lex
import vendor.ply.yacc as yacc
from aloha.aloha_lib import KERNEL
logger = logging.getLogger('aloha.parsers')
try:
import madgraph.various.misc as misc
except Exception:
import aloha.misc as misc
# PLY lexer class
class UFOExpressionParser(object):
"""A base class for parsers for algebraic expressions coming from UFO."""
parsed_string = ""
def __init__(self, **kw):
"""Ininitialize the lex and yacc"""
modname = self.__class__.__name__
self.debugfile = os.path.devnull
self.tabmodule = os.path.join(root_path, "iolibs", modname + "_" + "parsetab.py")
lex.lex(module=self, debug=0)
yacc.yacc(module=self, debug=0, debugfile=self.debugfile,
tabmodule=self.tabmodule)
def parse(self, buf):
"""Parse the string buf"""
yacc.parse(buf)
return self.parsed_string
# List of tokens and literals
tokens = (
'POWER', 'CSC', 'SEC', 'ACSC', 'ASEC',
'SQRT', 'CONJ', 'RE', 'IM', 'PI', 'COMPLEX', 'FUNCTION',
'VARIABLE', 'NUMBER'
)
literals = "=+-*/(),'"
# Definition of tokens
def t_CSC(self, t):
r'(?<!\w)csc(?=\()'
return t
def t_SEC(self, t):
r'(?<!\w)sec(?=\()'
return t
def t_ACSC(self, t):
r'(?<!\w)acsc(?=\()'
return t
def t_ASEC(self, t):
r'(?<!\w)asec(?=\()'
return t
def t_SQRT(self, t):
r'cmath\.sqrt'
return t
def t_PI(self, t):
r'cmath\.pi'
return t
def t_CONJ(self, t):
r'complexconjugate'
return t
def t_IM(self, t):
r'(?<!\w)im(?=\()'
return t
def t_RE(self, t):
r'(?<!\w)re(?=\()'
return t
def t_COMPLEX(self, t):
r'(?<!\w)complex(?=\()'
return t
def t_FUNCTION(self, t):
r'(cmath\.){0,1}[a-zA-Z_][0-9a-zA-Z_]*(?=\()'
return t
def t_VARIABLE(self, t):
r'[a-zA-Z_][0-9a-zA-Z_]*'
return t
t_NUMBER = r'([0-9]+\.[0-9]*|\.[0-9]+|[0-9]+)([eE][+-]{0,1}[0-9]+){0,1}'
t_POWER = r'\*\*'
t_ignore = " \t"
re_cmath_function = re.compile("cmath\.(?P<name>[0-9a-zA-Z_]+)")
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
logger.error("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
def build(self,**kwargs):
self.lexer = lex.lex(module=self, **kwargs)
# Definitions for the PLY yacc parser
# Parsing rules
precedence = (
('left','='),
('left','+','-'),
('left','*','/'),
('right','UMINUS'),
('left','POWER'),
('right','CSC'),
('right','SEC'),
('right','ACSC'),
('right','ASEC'),
('right','SQRT'),
('right','CONJ'),
('right','RE'),
('right','IM'),
('right','FUNCTION'),
('right','COMPLEX')
)
# Dictionary of parser expressions
def p_statement_expr(self, p):
'statement : expression'
self.parsed_string = p[1]
def p_expression_binop(self, p):
'''expression : expression '=' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression'''
p[0] = p[1] + p[2] + p[3]
def p_expression_uminus(self, p):
"expression : '-' expression %prec UMINUS"
p[0] = '-' + p[2]
def p_group_parentheses(self, p):
"group : '(' expression ')'"
p[0] = '(' + p[2] +')'
def p_expression_group(self, p):
"expression : group"
p[0] = p[1]
def p_error(self, p):
if p:
try:
print(p)
print(p[:])
print(p.value)
except:
pass
raise Exception("Syntax error at '%s' in '%s'" % (p.value, self.f))
else:
logger.error("Syntax error at EOF")
self.parsed_string = "Error"
class ALOHAExpressionParser(UFOExpressionParser):
aloha_object = ['P', 'PBar', 'PVec','Gamma','Gamma5','Sigma','Mass','PSlash',
'OverMass2','Width','Scalar','Spinor','Vector',
'Spin2','Spin32','C','Epsilon','Metric','Identity', 'IdentityL',
'ProjM','ProjP','Coup','Norm', 'EPSL', 'EPST1', 'EPST2', 'PT',
'UFP', 'UFM', 'UFPC', 'UFMC',
'VFP', 'VFM', 'VFPC', 'VFMC',
'Tnorm', 'TnormZ']
def p_expression_pi(self, p):
'''expression : PI'''
KERNEL.has_pi = True
p[0] = 'Param(\'PI\')'
def p_expression_power(self, p):
'expression : expression POWER expression'
obj = p[1]
if '(' in p[1]:
obj = p[1].split('(',1)[0]
if obj in self.aloha_object:
p2 = [x for i,x in enumerate(p) if i>0]
p[0] = ''.join(p2)
else:
new = aloha_lib.KERNEL.add_function_expression('pow', eval(p[1]), eval(p[3]))
p[0] = str(new)
def p_expression_variable(self, p):
"expression : VARIABLE"
p[0] = 'Param(\'%s\')' % p[1]
def p_expression_variable2(self, p):
"expression : '\\'' VARIABLE '\\''"
p[0] = '\'%s\'' % p[2]
def p_expression_expression(self, p):
"expression : '\\'' expression '\\''"
p[0] = '\'%s\'' % p[2]
def p_expression_complex(self, p):
"expression : COMPLEX '(' expression ',' expression ')'"
p[0] = 'complex(' + p[3] + ',' + p[5] + ')'
def p_expression_number(self, p):
"expression : NUMBER"
p[0] = p[1]
if float(p[1]) == int(float(p[1])) and float(p[1]) < 1000:
p[0] = str(int(float(p[1])))
def p_expression_func(self, p):
'''expression : CSC group
| SEC group
| ACSC group
| ASEC group
| RE group
| IM group
| SQRT group
| CONJ group'''
new = aloha_lib.KERNEL.add_function_expression(p[1], eval(p[2]))
p[0] = str(new)
def p_expression_function(self, p):
"""expression : FUNCTION '(' expression ')'
expression : FUNCTION '(' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
expression : FUNCTION '(' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ',' expression ')'
"""
if p[1] in self.aloha_object:
p[0] = ''.join(p[1:])
return
p1 = p[1]
re_groups = self.re_cmath_function.match(p1)
if re_groups:
p1 = re_groups.group("name")
args = [eval(p[2*i+1]) for i in range(1, len(p)//2)]
new = aloha_lib.KERNEL.add_function_expression(p1, *args)
p[0] = str(new)
def p_expression_binop(self, p):
'''expression : expression '=' expression
| expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression'''
if p[2] != '/' or p[3].isdigit() or p[3].endswith('.'):
p[0] = p[1] + p[2] + p[3]
else:
denom = eval(p[3])
if | |
#!/usr/bin/env python
from __future__ import division
"""@package etddf
ROS interface script for delta tiering filter
Filter operates in ENU
"""
from etddf.delta_tier import DeltaTier
import rospy
import threading
from minau.msg import ControlStatus
from etddf.msg import Measurement, MeasurementPackage, NetworkEstimate, AssetEstimate, EtddfStatistics, PositionVelocity
from etddf.srv import GetMeasurementPackage
import numpy as np
import tf
np.set_printoptions(suppress=True)
from copy import deepcopy
from std_msgs.msg import Header, Float64
from geometry_msgs.msg import PoseWithCovariance, Pose, Point, Quaternion, Twist, Vector3, TwistWithCovariance, PoseWithCovarianceStamped
from nav_msgs.msg import Odometry
from minau.msg import SonarTargetList, SonarTarget
from cuprint.cuprint import CUPrint
__author__ = "<NAME>"
__copyright__ = "Copyright 2020, COHRINT Lab"
__email__ = "<EMAIL>"
__status__ = "Development"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__version__ = "3.0"
NUM_OWNSHIP_STATES = 6
class ETDDF_Node:
def __init__(self, my_name, \
update_rate, \
delta_tiers, \
asset2id, \
delta_codebook_table, \
buffer_size, \
meas_space_table, \
missed_meas_tolerance_table, \
x0,\
P0,\
Q,\
default_meas_variance,
use_control_input):
self.update_rate = update_rate
self.asset2id = asset2id
self.Q = Q
self.use_control_input = use_control_input
self.default_meas_variance = default_meas_variance
self.my_name = my_name
self.landmark_dict = rospy.get_param("~landmarks", {})
self.cuprint = CUPrint(rospy.get_name())
self.filter = DeltaTier(NUM_OWNSHIP_STATES, \
x0,\
P0,\
buffer_size,\
meas_space_table,\
missed_meas_tolerance_table, \
delta_codebook_table,\
delta_tiers,\
self.asset2id,\
my_name)
self.network_pub = rospy.Publisher("etddf/estimate/network", NetworkEstimate, queue_size=10)
self.statistics_pub = rospy.Publisher("etddf/statistics", EtddfStatistics, queue_size=10)
self.statistics = EtddfStatistics(0, rospy.get_rostime(), 0, 0, delta_tiers, [0 for _ in delta_tiers], 0.0, [], False)
self.asset_pub_dict = {}
for asset in self.asset2id.keys():
if "surface" in asset:
continue
self.asset_pub_dict[asset] = rospy.Publisher("etddf/estimate/" + asset, Odometry, queue_size=10)
self.update_seq = 0
self.last_depth_meas = None
rospy.sleep(rospy.Duration(1 / self.update_rate))
self.last_update_time = rospy.get_rostime() - rospy.Duration(1 / self.update_rate)
self.meas_lock = threading.Lock()
self.update_lock = threading.Lock()
self.last_orientation = None
self.red_asset_found = False
self.red_asset_names = rospy.get_param("~red_team_names")
# Depth Sensor
if rospy.get_param("~measurement_topics/depth") != "None":
rospy.Subscriber(rospy.get_param("~measurement_topics/depth"), Float64, self.depth_callback, queue_size=1)
# Modem & Measurement Packages
rospy.Subscriber("etddf/packages_in", MeasurementPackage, self.meas_pkg_callback, queue_size=1)
if self.use_control_input:
self.control_input = None
rospy.Subscriber("uuv_control/control_status", ControlStatus, self.control_status_callback, queue_size=1)
if rospy.get_param("~strapdown"):
rospy.Subscriber(rospy.get_param("~measurement_topics/imu_est"), Odometry, self.orientation_estimate_callback, queue_size=1)
rospy.wait_for_message(rospy.get_param("~measurement_topics/imu_est"), Odometry)
# IMU Covariance Intersection
if rospy.get_param("~strapdown") and rospy.get_param("~measurement_topics/imu_ci") != "None":
self.cuprint("Intersecting with strapdown")
self.intersection_pub = rospy.Publisher("strapdown/intersection_result", PositionVelocity, queue_size=1)
rospy.Subscriber(rospy.get_param("~measurement_topics/imu_ci"), PositionVelocity, self.nav_filter_callback, queue_size=1)
else:
self.cuprint("Not intersecting with strapdown filter")
rospy.Timer(rospy.Duration(1 / self.update_rate), self.no_nav_filter_callback)
# Sonar Subscription
if rospy.get_param("~measurement_topics/sonar") != "None":
rospy.Subscriber(rospy.get_param("~measurement_topics/sonar"), SonarTargetList, self.sonar_callback)
self.data_x, self.data_y = None, None
# rospy.Subscriber("pose_gt", Odometry, self.gps_callback, queue_size=1)
# Initialize Buffer Service
rospy.Service('etddf/get_measurement_package', GetMeasurementPackage, self.get_meas_pkg_callback)
self.cuprint("loaded")
def gps_callback(self, msg):
self.data_x = msg.pose.pose.position.x + np.random.normal(0, scale=0.05)
self.data_y = msg.pose.pose.position.y + np.random.normal(0, scale=0.05)
def orientation_estimate_callback(self, odom):
self.meas_lock.acquire()
self.last_orientation = odom.pose.pose.orientation
self.last_orientation_cov = np.array(odom.pose.covariance).reshape(6,6)
self.last_orientation_dot = odom.twist.twist.angular
self.last_orientation_dot_cov = np.array(odom.twist.covariance).reshape(6,6)
self.meas_lock.release()
def sonar_callback(self, sonar_list):
for target in sonar_list.targets:
# self.cuprint("Receiving sonar measurements")
if self.last_orientation is None: # No orientation, no linearization of the sonar measurement
# print("no ori")
return
if target.id == "detection":
continue
# self.cuprint("Receiving sonar data")
# Convert quaternions to Euler angles.
self.meas_lock.acquire()
(r, p, y) = tf.transformations.euler_from_quaternion([self.last_orientation.x, \
self.last_orientation.y, self.last_orientation.z, self.last_orientation.w])
self.meas_lock.release()
# y = (np.pi/180.0) * 8
bearing_world = y + target.bearing_rad
z = target.range_m * np.sin(target.elevation_rad)
xy_dist = target.range_m * np.cos(target.elevation_rad)
x = xy_dist * np.cos(bearing_world)
y = xy_dist * np.sin(bearing_world)
now = rospy.get_rostime()
sonar_x, sonar_y = None, None
if "landmark_" in target.id:
sonar_x = Measurement("sonar_x", now, self.my_name, "", x, self.default_meas_variance["sonar_x"], self.landmark_dict[target.id[len("landmark_"):]])
sonar_y = Measurement("sonar_y", now, self.my_name, "", y, self.default_meas_variance["sonar_x"], self.landmark_dict[target.id[len("landmark_"):]])
else:
sonar_x = Measurement("sonar_x", now, self.my_name, target.id, x, self.default_meas_variance["sonar_x"], [])
sonar_y = Measurement("sonar_y", now, self.my_name, target.id, y, self.default_meas_variance["sonar_y"], [])
if target.id in self.red_asset_names and not self.red_asset_found:
self.cuprint("Red Asset detected!")
self.red_asset_found = True
# sonar_z = Measurement("sonar_z", now, self.my_name, target.id, z, self.default_meas_variance["sonar_z"], []
self.filter.add_meas(sonar_x)
self.filter.add_meas(sonar_y)
# self.filter.add_meas(sonar_z)
# self.cuprint("meas added")
def publish_stats(self, last_update_time):
self.statistics.seq = self.update_seq
self.statistics.stamp = last_update_time
self.statistics.overflown, delta, buffer = self.filter.peek_buffer()
self.statistics.current_lowest_multiplier = delta
meas_name_list = [x.meas_type for x in buffer]
self.statistics.current_lowest_buffer = meas_name_list
self.statistics_pub.publish(self.statistics)
def no_nav_filter_callback(self, event):
t_now = rospy.get_rostime()
delta_t_ros = t_now - self.last_update_time
self.update_lock.acquire()
### Run Prediction ###
### Run Prediction ###
if self.use_control_input and self.control_input is not None:
self.filter.predict(self.control_input, self.Q, delta_t_ros.to_sec(), False)
else:
self.filter.predict(np.zeros((3,1)), self.Q, delta_t_ros.to_sec(), False)
### Run Correction ###
# Construct depth measurement
z_r = self.default_meas_variance["depth"]
z_data = self.last_depth_meas
if z_data != None:
z = Measurement("depth", t_now, self.my_name,"", z_data, z_r, [])
self.filter.add_meas(z)
self.last_depth_meas = None
# correction
self.filter.correct(t_now)
self.publish_estimates(t_now)
self.last_update_time = t_now
self.update_seq += 1
self.update_lock.release()
self.publish_stats(t_now)
def nav_filter_callback(self, pv_msg):
# Update at specified rate
t_now = rospy.get_rostime()
delta_t_ros = t_now - self.last_update_time
if delta_t_ros < rospy.Duration(1/self.update_rate):
return
self.update_lock.acquire()
### Run Prediction ###
if self.use_control_input and self.control_input is not None:
self.filter.predict(self.control_input, self.Q, delta_t_ros.to_sec(), False)
else:
self.filter.predict(np.zeros((3,1)), self.Q, delta_t_ros.to_sec(), False)
### Run Correction ###
# Construct depth measurement
z_r = self.default_meas_variance["depth"]
z_data = self.last_depth_meas
if z_data != None:
z = Measurement("depth", t_now, self.my_name,"", z_data, z_r, []) # Flip z data to transform enu -> NED
self.filter.add_meas(z)
self.last_depth_meas = None
if self.data_x != None:
x = Measurement("gps_x", t_now, self.my_name,"", self.data_x, 0.1, [])
self.filter.add_meas(x)
self.data_x = None
if self.data_y != None:
y = Measurement("gps_y", t_now, self.my_name,"", self.data_y, 0.1, [])
self.filter.add_meas(y)
self.data_y = None
# correction
self.filter.correct(t_now)
### Covariancee Intersect ###
# Turn odom estimate into numpy
mean = np.array([[pv_msg.position.x, pv_msg.position.y, pv_msg.position.z, \
pv_msg.velocity.x, pv_msg.velocity.y, pv_msg.velocity.z]]).T
cov = np.array(pv_msg.covariance).reshape(6,6)
# Run covariance intersection
c_bar, Pcc = self.filter.intersect(mean, cov)
position = Vector3(c_bar[0,0], c_bar[1,0], c_bar[2,0])
velocity = Vector3(c_bar[3,0], c_bar[4,0], c_bar[5,0])
covariance = list(Pcc.flatten())
new_pv_msg = PositionVelocity(position, velocity, covariance)
self.intersection_pub.publish(new_pv_msg)
self.publish_estimates(t_now)
self.last_update_time = t_now
self.update_seq += 1
self.update_lock.release()
self.publish_stats(t_now)
def control_status_callback(self, msg):
self.update_lock.acquire()
if msg.is_setpoint_active and msg.is_heading_velocity_setpoint_active:
self.control_input = np.array([[msg.setpoint_velocity.y, msg.setpoint_velocity.z, -msg.setpoint_velocity.z]]).T
else:
self.control_input = None
# GRAB CONTROL INPUT
self.update_lock.release()
def depth_callback(self, msg):
self.meas_lock.acquire()
self.last_depth_meas = msg.data
self.meas_lock.release()
def publish_estimates(self, timestamp):
ne = NetworkEstimate()
for asset in self.asset2id.keys():
if "surface" in asset:
continue
if "red" in asset and not self.red_asset_found:
continue
# else:
# print("publishing " + asset + "'s estimate")
# Construct Odometry Msg for Asset
mean, cov = self.filter.get_asset_estimate(asset)
pose_cov = np.zeros((6,6))
pose_cov[:3,:3] = cov[:3,:3]
if asset == self.my_name:
pose = Pose(Point(mean[0],mean[1],mean[2]), \
self.last_orientation)
pose_cov[3:,3:] = self.last_orientation_cov[3:,3:]
else:
pose = Pose(Point(mean[0],mean[1],mean[2]), \
Quaternion(0,0,0,1))
pose_cov[3:,3:] = np.eye(3) * 3
pwc = PoseWithCovariance(pose, list(pose_cov.flatten()))
twist_cov = np.zeros((6,6))
twist_cov[:3,:3] = cov[3:6,3:6]
if asset == self.my_name:
tw = Twist(Vector3(mean[3],mean[4],mean[5]), self.last_orientation_dot)
twist_cov[3:, 3:] = self.last_orientation_dot_cov[3:,3:]
else:
tw = Twist(Vector3(mean[3],mean[4],mean[5]), Vector3(0,0,0))
twist_cov[3:, 3:] = np.eye(3) * -1
twc = TwistWithCovariance(tw, list(twist_cov.flatten()))
h = Header(self.update_seq, timestamp, "map")
o = Odometry(h, "map", pwc, twc)
ae = AssetEstimate(o, asset)
ne.assets.append(ae)
self.asset_pub_dict[asset].publish(o)
self.network_pub.publish(ne)
def meas_pkg_callback(self, msg):
# Modem Meas taken by surface
if msg.src_asset == "surface":
self.cuprint("Receiving Surface Modem Measurements")
for meas in msg.measurements:
# Approximate the fuse on the next update, so we can get other asset's position immediately
if meas.meas_type == "modem_elevation":
rospy.logerr("Ignoring Modem Elevation Measurement since we have depth measurements")
continue
elif meas.meas_type == "modem_azimuth":
meas.global_pose = list(meas.global_pose)
# self.cuprint("azimuth: " + str(meas.data))
meas.data = (meas.data * np.pi) / 180
meas.variance = self.default_meas_variance["modem_azimuth"]
elif meas.meas_type == "modem_range":
meas.global_pose = list(meas.global_pose)
# self.cuprint("range: " + str(meas.data))
meas.variance = self.default_meas_variance["modem_range"]
self.filter.add_meas(meas, force_fuse=True)
# Modem Meas taken by me
elif msg.src_asset == self.my_name:
# self.cuprint("Receiving Modem Measurements Taken by Me")
for meas in msg.measurements:
# Approximate the fuse on the next update, so we can get other asset's position immediately
if meas.meas_type == "modem_elevation":
rospy.logerr("Ignoring Modem Elevation Measurement since we have depth measurements")
continue
elif meas.meas_type == "modem_azimuth":
meas.global_pose = list(meas.global_pose)
meas.data = (meas.data * np.pi) / 180
meas.variance = self.default_meas_variance["modem_azimuth"]
elif meas.meas_type == "modem_range":
meas.global_pose = list(meas.global_pose)
meas.variance = self.default_meas_variance["modem_range"]
self.filter.add_meas(meas, force_fuse=True)
# Buffer
else:
self.cuprint("receiving buffer")
self.update_lock.acquire()
# Loop through buffer and see if we've found the red agent
for m in msg.measurements:
if m.measured_asset in self.red_asset_names and not self.red_asset_found:
self.red_asset_found = True
self.cuprint("Red asset measurement received!")
implicit_cnt, explicit_cnt = self.filter.catch_up(msg.delta_multiplier, msg.measurements)
self.cuprint("...caught up")
self.update_lock.release()
self.statistics.implicit_count += implicit_cnt
self.statistics.explicit_count += explicit_cnt
def get_meas_pkg_callback(self, req):
self.cuprint("pulling buffer")
delta, buffer = self.filter.pull_buffer()
ind = self.statistics.delta_tiers.index(delta)
self.statistics.buffer_counts[ind] += 1
mp = MeasurementPackage(buffer, self.my_name, delta)
print(mp)
return mp
################################
### Initialization Functions ###
################################
def get_indices_from_asset_names(blue_team):
my_name = rospy.get_param("~my_name")
red_team = rospy.get_param("~red_team_names")
asset2id = {}
asset2id[my_name] = 0
next_index = 1
for asset in blue_team:
if asset == my_name:
continue
else:
asset2id[asset] = next_index
next_index += 1
for asset in red_team:
asset2id[asset] = next_index
next_index += 1
if my_name != "surface":
asset2id["surface"] = -1 # arbitrary negative number
return asset2id
def get_delta_codebook_table():
delta_codebook = {}
meas_info = rospy.get_param("~measurements")
for meas in meas_info.keys():
base_et_delta = meas_info[meas]["base_et_delta"]
delta_codebook[meas] = base_et_delta
return | |
with PSF.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: convolve_PSF'
# debugging comments
if isinstance(psf, GaussianPSF):
logger.debug('-' * 70)
logger.debug(stage + 'with GaussianPSF')
logger.debug('-' * 70)
# convolve val with classes GaussianPSF, FilePSF and FunctionPSF
val = psf.convolve(wav=self.wav, array=self.val, resolution=self.resolution)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
i.val = np.array(val)
return i
def add_noise(self, mu_noise, sigma_noise, seed=None, diagnostics=None):
'''
Adds normal distributed noise to the val image of SyntheticImage.
Parameters
----------
mu_noise : float
Mean of the normal distribution.
Good choice: mu_noise = 0.
sigma_noise : float
Standard deviation of the normal distribution.
Good choice arround:
* ``'ergs/cm^2/s'`` : sigma_noise = 10.**(-13)
* ``'ergs/cm^2/s/Hz'`` : sigma_noise = 10.**(-26)
* ``'Jy'`` : sigma_noise = 10.**(-3)
* ``'mJy'`` : sigma_noise = 10.**(-1)
* ``'MJy/sr'`` : sigma_noise = 10.**(-10)
seed : float, ``None``
When float seed fixes the random numbers to a certain sequence in order to create reproducible results.
Default is ``None``.
diagnostics : truetype
When ``True`` noise array is stored in a fits file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_noise'
if sigma_noise != 0. and sigma_noise != 0:
if seed is not None:
np.random.seed(seed=seed)
noise = normal(mu_noise, sigma_noise, self.pixel)
if sigma_noise == 0. or sigma_noise == 0:
noise = np.zeros(self.pixel)
# Get noise.fits file
if diagnostics is True:
fits.writeto(self.name + '_' + 'process-output_SI-noise.fits', noise, clobber=True)
# add noise if val is already collapsed (x, y)
val = self.val.copy() + noise
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
i.val = np.array(val)
return i
def get_total_val(self):
'''
Collapses the val image of SyntheticImage into a 0D val array.
Returns
-------
flux : SyntheticFlux
'''
stage = 'SyntheticImage: get_total_val'
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=self.val)
val = s.get_unit(in_units=self.units, out_units='Jy', input_resolution=self.resolution['arcsec'])
else: val = self.val
# collapse 2D image to a single scalar val
total_val = np.sum(val)
if self.unit_out == 'MJy/sr' or self.unit_out == 'Jy/arcsec^2':
s = ConvertUnits(wav=self.wav, val=total_val)
total_val = s.get_unit(in_units='Jy', out_units=self.unit_out, input_resolution=self.resolution['arcsec'] * self.pixel[0])
# return SyntheticFlux
from .flux import SyntheticFlux
f = SyntheticFlux(self)
f.log.append(stage)
f.stage = 'SyntheticFlux: initial'
f.log.append(f.stage)
f.val = np.array(total_val)
return f
def plot_image(self, prefix=None, name=None, multi_cut=None, single_cut=None, set_cut=None, dpi=None):
'''
Plots the val image of SyntheticImage. The wavelength interval
around the central wavelength labels the plot.
Parameters
----------
prefix : str
Name of the image. Default naming chain is switched off.
name : str
Name of image within the default naming chain to distinguish the
plot files. E. g. 'PSF_gaussian'
mulit_cut : ``True``, ``None``
* ``True`` : plots chosen image slice at cuts of [100, 99, 95, 90]%.
* ``None`` : no mulit-plot is returned.
Default is ``None``.
single_cut : float [0,100], ``None``
* float : cut level for single plot of image slice.
* ``None`` : no single plot is returned.
set_cut : tuple, ``None``
* tuple : set_cut(v_min, v_max)
Minimal and maximal physical val presented in the colorbars.
* ``None`` : no plot with minimal and maximal cut is returned.
Default is ``None``.
dpi : ``None``, scalar > 0
The resolution in dots per inch.
``None`` is default and will use the val savefig.dpi
in the matplotlibrc file.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: plot_image'
if prefix is None and name is None:
raise Exception('If prefix name is not given, you need to give the a name to enable the default naming chain.')
if prefix is not None:
if multi_cut is True and (single_cut is not None or set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
elif multi_cut is None and (single_cut is not None and set_cut is not None):
raise Exception('If prefix naming is enabled only one plotting option can be chosen.')
plot = MakePlots(prefix=prefix, name=name, input_array=SyntheticImage(self), multi_cut=multi_cut, single_cut=single_cut, set_cut=set_cut, dpi=dpi)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
return i
def add_to_observation(self, fits_file, name, position_pix=None, position_world=None, zero_edges=None):
'''
Blends the modeled realistic synthetic observation to a real observation in a fits file.
Parameters
----------
fits_file : str
fits_file of the observation.
name : str
Name of the output fits file.
position_pix : list, ``None``
Center position of the model in observation pixel coordinates.
Default is ``None``.
position_world : list, ``None``
Center position of the model in observation world coordinates.
Default is ``None``.
zero_edges : ``True``, ``None``
If ``True`` edges of model are normalized to zero.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_to_observation'
# world coordinates from fits_file
w = WCS(fits_file)
if position_world is None and position_pix is None:
raise Exception('WARNING: Position of model center needs to be given either in world or pixel coordinates.')
if position_pix is not None:
pos = position_pix
p_x_pos, p_y_pos = pos[0], pos[1]
else:
pos = position_world
p_x_pos, p_y_pos = w.wcs_world2pix(pos[0], pos[1], 1)
# center position in pixel and adjust position in current grid
x_round = np.round(p_x_pos, 0)
x_int = int(p_x_pos)
y_round = np.round(p_y_pos, 0)
y_int = int(p_y_pos)
# even or odd
if len(self.val[0]) % 2 == 0 and len(self.val[1]) % 2 == 0:
pos = np.array([x_round, y_round])
else:
if x_int == int(x_round):
if y_int == int(y_round):
pos = np.array([x_round + 0.5, y_round + 0.5])
else:
pos = np.array([x_round + 0.5, y_round - 0.5])
else:
if y_int == int(y_round):
pos = np.array([x_round - 0.5, y_round + 0.5])
else:
pos = np.array([x_round - 0.5, y_round - 0.5])
# limits of model in observation
start_x = pos[0] - len(self.val[0]) / 2.
stop_x = pos[0] + len(self.val[0]) / 2.
start_y = pos[1] - len(self.val[1]) / 2.
stop_y = pos[1] + len(self.val[1]) / 2.
# normalized that edges are zero
if zero_edges is True:
model = self.val.copy() - np.min(self.val)
else:
model = self.val.copy()
# open fits_file
hdulist = fits.open(fits_file)
hdu = hdulist[0]
header = hdu.header
if np.allclose(np.abs(header['CDELT1'] * 3600), self.resolution['arcsec']) is not True:
raise Exception('WARNING: make sure that resolution of observation and model are the same! E. g. change resolution of FC_object first.')
image = hdu.data
# add model to observation
image[start_y:stop_y, start_x:stop_x] = image[start_y:stop_y, start_x:stop_x] + model
# store to name.fits file
fits.writeto(name + '.fits', image, clobber=True)
# return SyntheticImage
i = SyntheticImage(self)
i.stage = stage
i.log.append(i.stage)
return i
def add_field_stars(self, extinction_map, database=None, star_file=None, seed=None, ISMextinction=None):
'''
Adds field stars to synthetic image.
Parameters
----------
extinction_map : object
Created with ``fluxcompensator.utils.fieldstars.extract_extinction_map``.
database : dict, ``None``
Dictionary sets the parameters for field stars loaded for the respective
band from the built-in database.
dict = {'number':200, 'distance_range':[3*kpc, 50*kpc], 'ground': 0.02}
The dictionary is structured as follows:
* ``'number'`` : int in [0,288]
* ``'distance_range'`` : list
Distance lower and upper limit in units of cm
* ``'ground'`` : str, float
Distribution of stars before (``'foreground'``) or behind (``'background'``) the synthetic object.
When ``'ground'`` is a ``float`` in the limits of [0,1] then this is the fraction of foreground stars.
Default is ``None``.
star_file : str, ``None``
To load individual file with field stars in the format of (distance[pc], mag[band]).
Default is ``None``.
seed : int, ``None``
To create reproducible results for the positions of field stars.
Default is ``None``.
ISMextinction : float, ``None``
Optical extinction A_V along the line of sight in units mag/kpc.
Default is ``None``.
Returns
-------
image : SyntheticImage
'''
stage = 'SyntheticImage: add_field_stars'
# make sure resolution and PSF was not applied before
if 'SyntheticImage: convolve_PSF' in self.log or 'SyntheticCube: | |
# -*- coding: utf-8 -*-
"""
Basic Arithmetic
The functions here are the basic arithmetic operations that you might find on a calculator.
"""
from mathics.version import __version__ # noqa used in loading to check consistency.
import sympy
import mpmath
from mathics.builtin.arithmetic import _MPMathFunction, create_infix
from mathics.builtin.base import (
Builtin,
BinaryOperator,
PrefixOperator,
SympyFunction,
)
from mathics.core.expression import (
Complex,
Expression,
Integer,
Integer0,
Integer1,
Number,
Rational,
Real,
String,
Symbol,
SymbolComplexInfinity,
SymbolDirectedInfinity,
SymbolInfinity,
SymbolN,
SymbolNull,
SymbolSequence,
from_mpmath,
)
from mathics.core.numbers import min_prec, dps
from mathics.core.convert import from_sympy
class CubeRoot(Builtin):
"""
<dl>
<dt>'CubeRoot[$n$]'
<dd>finds the real-valued cube root of the given $n$.
</dl>
>> CubeRoot[16]
= 2 2 ^ (1 / 3)
#> CubeRoot[-5]
= -5 ^ (1 / 3)
#> CubeRoot[-510000]
= -10 510 ^ (1 / 3)
#> CubeRoot[-5.1]
= -1.7213
#> CubeRoot[b]
= b ^ (1 / 3)
#> CubeRoot[-0.5]
= -0.793701
#> CubeRoot[3 + 4 I]
: The parameter 3 + 4 I should be real valued.
= (3 + 4 I) ^ (1 / 3)
"""
attributes = {"Listable", "NumericFunction", "ReadProtected"}
messages = {
"preal": "The parameter `1` should be real valued.",
}
rules = {
"CubeRoot[n_?NumberQ]": "If[n > 0, Power[n, Divide[1, 3]], Times[-1, Power[Times[-1, n], Divide[1, 3]]]]",
"CubeRoot[n_]": "Power[n, Divide[1, 3]]",
"MakeBoxes[CubeRoot[x_], f:StandardForm|TraditionalForm]": (
"RadicalBox[MakeBoxes[x, f], 3]"
),
}
summary_text = "cubed root"
def apply(self, n, evaluation):
"CubeRoot[n_Complex]"
evaluation.message("CubeRoot", "preal", n)
return Expression("Power", n, Expression("Divide", 1, 3))
class Divide(BinaryOperator):
"""
<dl>
<dt>'Divide[$a$, $b$]'</dt>
<dt>'$a$ / $b$'</dt>
<dd>represents the division of $a$ by $b$.
</dl>
>> 30 / 5
= 6
>> 1 / 8
= 1 / 8
>> Pi / 4
= Pi / 4
Use 'N' or a decimal point to force numeric evaluation:
>> Pi / 4.0
= 0.785398
>> 1 / 8
= 1 / 8
>> N[%]
= 0.125
Nested divisions:
>> a / b / c
= a / (b c)
>> a / (b / c)
= a c / b
>> a / b / (c / (d / e))
= a d / (b c e)
>> a / (b ^ 2 * c ^ 3 / e)
= a e / (b ^ 2 c ^ 3)
#> 1 / 4.0
= 0.25
#> 10 / 3 // FullForm
= Rational[10, 3]
#> a / b // FullForm
= Times[a, Power[b, -1]]
"""
operator = "/"
precedence = 470
attributes = ("Listable", "NumericFunction")
grouping = "Left"
default_formats = False
rules = {
"Divide[x_, y_]": "Times[x, Power[y, -1]]",
"MakeBoxes[Divide[x_, y_], f:StandardForm|TraditionalForm]": (
"FractionBox[MakeBoxes[x, f], MakeBoxes[y, f]]"
),
}
formats = {
(("InputForm", "OutputForm"), "Divide[x_, y_]"): (
'Infix[{HoldForm[x], HoldForm[y]}, "/", 400, Left]'
),
}
summary_text = r"division"
class Minus(PrefixOperator):
"""
<dl>
<dt>'Minus[$expr$]'
<dd> is the negation of $expr$.
</dl>
>> -a //FullForm
= Times[-1, a]
'Minus' automatically distributes:
>> -(x - 2/3)
= 2 / 3 - x
'Minus' threads over lists:
>> -Range[10]
= {-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}
"""
operator = "-"
precedence = 480
attributes = ("Listable", "NumericFunction")
rules = {
"Minus[x_]": "Times[-1, x]",
}
formats = {
"Minus[x_]": 'Prefix[{HoldForm[x]}, "-", 480]',
# don't put e.g. -2/3 in parentheses
"Minus[expr_Divide]": 'Prefix[{HoldForm[expr]}, "-", 399]',
"Minus[Infix[expr_, op_, 400, grouping_]]": (
'Prefix[{Infix[expr, op, 400, grouping]}, "-", 399]'
),
}
summary_text = "arithmetic negation"
def apply_int(self, x, evaluation):
"Minus[x_Integer]"
return Integer(-x.to_sympy())
class Plus(BinaryOperator, SympyFunction):
"""
<dl>
<dt>'Plus[$a$, $b$, ...]'</dt>
<dt>$a$ + $b$ + ...</dt>
<dd>represents the sum of the terms $a$, $b$, ...
</dl>
>> 1 + 2
= 3
'Plus' performs basic simplification of terms:
>> a + b + a
= 2 a + b
>> a + a + 3 * a
= 5 a
>> a + b + 4.5 + a + b + a + 2 + 1.5 b
= 6.5 + 3 a + 3.5 b
Apply 'Plus' on a list to sum up its elements:
>> Plus @@ {2, 4, 6}
= 12
The sum of the first 1000 integers:
>> Plus @@ Range[1000]
= 500500
'Plus' has default value 0:
>> DefaultValues[Plus]
= {HoldPattern[Default[Plus]] :> 0}
>> a /. n_. + x_ :> {n, x}
= {0, a}
The sum of 2 red circles and 3 red circles is...
>> 2 Graphics[{Red,Disk[]}] + 3 Graphics[{Red,Disk[]}]
= 5 -Graphics-
#> -2a - 2b
= -2 a - 2 b
#> -4+2x+2*Sqrt[3]
= -4 + 2 Sqrt[3] + 2 x
#> 2a-3b-c
= 2 a - 3 b - c
#> 2a+5d-3b-2c-e
= 2 a - 3 b - 2 c + 5 d - e
#> 1 - I * Sqrt[3]
= 1 - I Sqrt[3]
#> Head[3 + 2 I]
= Complex
#> N[Pi, 30] + N[E, 30]
= 5.85987448204883847382293085463
#> % // Precision
= 30.
"""
operator = "+"
precedence = 310
attributes = (
"Flat",
"Listable",
"NumericFunction",
"OneIdentity",
"Orderless",
"Protected",
)
default_formats = False
defaults = {
None: "0",
}
summary_text = "addition of numbers, lists, arrays, or symbolic expressions"
sympy_name = "Add"
def format_plus(self, items, evaluation):
"Plus[items__]"
def negate(item):
if item.has_form("Times", 1, None):
if isinstance(item.leaves[0], Number):
neg = -item.leaves[0]
if neg.sameQ(Integer1):
if len(item.leaves) == 1:
return neg
else:
return Expression("Times", *item.leaves[1:])
else:
return Expression("Times", neg, *item.leaves[1:])
else:
return Expression("Times", -1, *item.leaves)
elif isinstance(item, Number):
return -item.to_sympy()
else:
return Expression("Times", -1, item)
def is_negative(value):
if isinstance(value, Complex):
real, imag = value.to_sympy().as_real_imag()
if real <= 0 and imag <= 0:
return True
elif isinstance(value, Number) and value.to_sympy() < 0:
return True
return False
items = items.get_sequence()
values = [Expression("HoldForm", item) for item in items[:1]]
ops = []
for item in items[1:]:
if (
item.has_form("Times", 1, None) and is_negative(item.leaves[0])
) or is_negative(item):
item = negate(item)
op = "-"
else:
op = "+"
values.append(Expression("HoldForm", item))
ops.append(String(op))
return Expression(
"Infix",
Expression("List", *values),
Expression("List", *ops),
310,
Symbol("Left"),
)
def apply(self, items, evaluation):
"Plus[items___]"
items = items.numerify(evaluation).get_sequence()
leaves = []
last_item = last_count = None
prec = min_prec(*items)
is_machine_precision = any(item.is_machine_precision() for item in items)
numbers = []
def append_last():
if last_item is not None:
if last_count == 1:
leaves.append(last_item)
else:
if last_item.has_form("Times", None):
leaves.append(
Expression(
"Times", from_sympy(last_count), *last_item.leaves
)
)
else:
leaves.append(
Expression("Times", from_sympy(last_count), last_item)
)
for item in items:
if isinstance(item, Number):
numbers.append(item)
else:
count = rest = None
if item.has_form("Times", None):
for leaf in item.leaves:
if isinstance(leaf, Number):
count = leaf.to_sympy()
rest = item.get_mutable_leaves()
rest.remove(leaf)
if len(rest) == 1:
rest = rest[0]
else:
rest.sort()
rest = Expression("Times", *rest)
break
if count is None:
count = sympy.Integer(1)
rest = item
if last_item is not None and last_item == rest:
last_count = last_count + count
else:
append_last()
last_item = rest
last_count = count
append_last()
if numbers:
if prec is not None:
if is_machine_precision:
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fsum(numbers)
number = from_mpmath(number)
else:
with mpmath.workprec(prec):
numbers = [item.to_mpmath() for item in numbers]
number = mpmath.fsum(numbers)
number = from_mpmath(number, dps(prec))
else:
number = from_sympy(sum(item.to_sympy() for item in numbers))
else:
number = Integer0
if not number.sameQ(Integer0):
leaves.insert(0, number)
if not leaves:
return Integer0
elif len(leaves) == 1:
return leaves[0]
else:
leaves.sort()
return Expression("Plus", *leaves)
class Power(BinaryOperator, _MPMathFunction):
"""
<dl>
<dt>'Power[$a$, $b$]'</dt>
<dt>'$a$ ^ $b$'</dt>
<dd>represents $a$ raised to the power of $b$.
</dl>
>> 4 ^ (1/2)
= 2
>> 4 ^ (1/3)
= 2 ^ (2 / 3)
>> 3^123
= 48519278097689642681155855396759336072749841943521979872827
>> (y ^ 2) ^ (1/2)
= Sqrt[y ^ 2]
>> (y ^ 2) ^ 3
= y ^ 6
>> Plot[Evaluate[Table[x^y, {y, 1, 5}]], {x, -1.5, 1.5}, AspectRatio -> 1]
= -Graphics-
Use a decimal point to force numeric evaluation:
>> 4.0 ^ (1/3)
= 1.5874
'Power' has default value 1 for its second argument:
>> DefaultValues[Power]
= {HoldPattern[Default[Power, 2]] :> 1}
>> a /. x_ ^ n_. :> {x, n}
= {a, 1}
'Power' can be used with complex numbers:
>> (1.5 + 1.0 I) ^ 3.5
= -3.68294 + 6.95139 I
>> (1.5 + 1.0 I) ^ (3.5 + 1.5 | |
import torch
import logging
from transformers import BertModel, BertTokenizer
from transformers import *
from typing import List
from itertools import chain
import argparse
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import json
from tqdm import tqdm
import sklearn
torch.manual_seed(1)
START_TAG = "<START>"
STOP_TAG = "<STOP>"
logger = logging.getLogger(__name__)
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return idx.item()
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_examples_to_features(examples, tokenizer, max_seq_length,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def select_field(features, field):
"""As the output is dic, return relevant field"""
return [[choice[field] for choice in feature.choices_features] for feature in features]
def create_examples(_list, set_type="train"):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(_list):
guid = "%s-%s" % (set_type, i)
text_a = line
# text_b = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a))
return examples
class BertBiLSTM_CRF(nn.Module):
def __init__(self,
tag_to_idx,
hidden_dim,
device,
pretrained_weights='bert-base-uncased',
tokenizer_class=BertTokenizer,
model_class=BertModel,
max_seq_len=128,
init_checkpoint="model/ckpt_794.pt"):
super().__init__()
self.tag_to_ix = tag_to_idx
self.pretrained_weights = pretrained_weights
self.tokenizer_class = tokenizer_class
self.model_class = model_class
self.tokenizer = self.tokenizer_class.from_pretrained(pretrained_weights)
self.model = self.model_class.from_pretrained(pretrained_weights)
self.model.load_state_dict(torch.load(init_checkpoint, map_location='cpu')["model"], strict=False)
self.max_seq_len = max_seq_len
self.hidden_dim = hidden_dim
self.tagset_size = len(tag_to_idx)
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
self.lstm = nn.LSTM(768, hidden_dim // 2,
num_layers=1, bidirectional=True)
self.dropout = nn.Dropout(0.1)
self.device = device
# Matrix of transition parameters. Entry i,j is the score of
# transitioning *to* i *from* j.
self.transitions = nn.Parameter(
torch.randn(self.tagset_size, self.tagset_size, device=self.device))
# These two statements enforce the constraint that we never transfer
# to the start tag and we never transfer from the stop tag
self.transitions.data[tag_to_idx[START_TAG], :] = -10000
self.transitions.data[:, tag_to_idx[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
# for param in self.model.parameters():
# param.requires_grad = False
# tokenizer = BertTokenizer.from_pretrained(pretrained_weights)
# model = BertModel.from_pretrained(pretrained_weights)
# Clause by clause when getting BERT embeddings
# Performance as training data varies (10%$25% of the paragraphs)
#
def bert_embeddings(self, all_input_ids, all_input_mask, all_segment_ids) -> torch.tensor:
##### Check if this is actually doing what you think it's doing
# examples = create_examples(raw_text)
# features = convert_examples_to_features(
# examples, self.tokenizer, self.max_seq_len, True)
# all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long, device=self.device)
# all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long, device=self.device)
# all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long, device=self.device)
# cls_embeddings_ = []
# i = 0
##### Check if this
# while all_input_ids.size()[0] > 100:
# i+=1
# all_input_ids_ = all_input_ids[:100]
# all_input_mask_ = all_input_mask[:100]
# all_segment_ids_ = all_segment_ids[:100]
# print(i)
# cls_embeddings_.append(self.model(all_input_ids_,all_input_mask_,all_segment_ids_)[1])
# all_input_ids = all_input_ids[100:]
# all_input_mask = all_input_mask[100:]
# all_segment_ids = all_segment_ids[100:]
# cls_embeddings_.append(self.model(all_input_ids,all_input_mask,all_segment_ids)[1])
# cls_embeddings = torch.cat(cls_embeddings_)
cls_embeddings = self.model(all_input_ids, all_input_mask, all_segment_ids)[1]
return cls_embeddings
# def bert_embeddings(self, all_input_ids, all_input_mask, all_segment_ids):
# cls_embeddings = self.model(all_input_ids,all_input_mask,all_segment_ids)[1]
# return cls_embeddings
def init_hidden(self):
return (torch.randn(2, 1, self.hidden_dim // 2, device=self.device),
torch.randn(2, 1, self.hidden_dim // 2, device=self.device))
def _get_lstm_features(self, all_input_ids, all_input_mask, all_segment_ids):
self.hidden = self.init_hidden()
embeds = self.bert_embeddings(all_input_ids, all_input_mask, all_segment_ids)
steps = embeds.size()[0]
embeds = embeds.view(steps, 1, -1)
embeds = self.dropout(embeds)
lstm_out, self.hidden = self.lstm(embeds, self.hidden)
lstm_out = lstm_out.view(steps, self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.full((1, self.tagset_size), -10000., device=self.device)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that | |
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.vyos.vyos.tests.unit.compat.mock import patch
from ansible_collections.vyos.vyos.plugins.modules import (
vyos_bgp_address_family,
)
from ansible_collections.vyos.vyos.tests.unit.modules.utils import (
set_module_args,
)
from .vyos_module import TestVyosModule, load_fixture
class TestVyosBgpafModule(TestVyosModule):
module = vyos_bgp_address_family
def setUp(self):
super(TestVyosBgpafModule, self).setUp()
self.mock_get_resource_connection_config = patch(
"ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module_base.get_resource_connection"
)
self.get_resource_connection_config = (
self.mock_get_resource_connection_config.start()
)
self.mock_execute_show_command = patch(
"ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.facts."
+ "bgp_address_family.bgp_address_family.Bgp_address_familyFacts.get_device_data"
)
self.execute_show_command = self.mock_execute_show_command.start()
def tearDown(self):
super(TestVyosBgpafModule, self).tearDown()
self.mock_get_resource_connection_config.stop()
self.mock_execute_show_command.stop()
def load_fixtures(self, commands=None, transport="cli", filename=None):
if filename is None:
filename = "vyos_bgp_address_family_config.cfg"
def load_from_file(*args, **kwargs):
output = load_fixture(filename)
return output
self.execute_show_command.side_effect = load_from_file
def test_vyos_bgp_address_family_merged_idempotent(self):
set_module_args(
dict(
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", as_set=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
dict(prefix="172.16.17.32/24", backdoor=True),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ripng", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="export", route_map="map01"
)
],
soft_reconfiguration=True,
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv6",
attribute_unchanged=dict(next_hop=True),
)
],
),
],
)
)
)
self.execute_module(changed=False, commands=[])
def test_vyos_bgp_address_family_merged(self):
set_module_args(
dict(
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", summary_only=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ospfv3", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.168.3.11",
address_family=[
dict(
afi="ipv6",
distribute_list=[
dict(action="export", acl=10)
],
route_server_client=True,
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv4",
filter_list=[
dict(
action="export", path_list="list01"
),
],
capability=dict(orf="send"),
)
],
),
],
)
)
)
commands = [
"set protocols bgp 65536 address-family ipv4-unicast aggregate-address 192.0.2.0/24 as-setipv4-unicast aggregate-address 192.0.2.0/24 summary-only",
"set protocols bgp 65536 address-family ipv6-unicast redistribute ospfv3 metric 20",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv4-unicast filter-list export list01",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv4-unicast capability prefix-list send",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast distribute-list export 10",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast route-server-client",
]
self.execute_module(changed=True, commands=commands)
def test_vyos_bgp_address_family_replaced_idempotent(self):
set_module_args(
dict(
state="replaced",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", as_set=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
dict(prefix="172.16.17.32/24", backdoor=True),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ripng", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="export", route_map="map01"
)
],
soft_reconfiguration=True,
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv6",
attribute_unchanged=dict(next_hop=True),
)
],
),
],
),
)
)
self.execute_module(changed=False, commands=[])
def test_vyos_bgp_address_family_replaced(self):
set_module_args(
dict(
state="replaced",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", summary_only=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ospfv3", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.168.3.11",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="import", route_map="map01"
)
],
),
dict(
afi="ipv6",
distribute_list=[
dict(action="export", acl=10)
],
route_server_client=True,
),
],
),
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="export", route_map="map01"
)
],
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv4",
filter_list=[
dict(
action="export", path_list="list01"
),
],
capability=dict(orf="send"),
)
],
),
],
),
)
)
commands = [
"delete protocols bgp 65536 neighbor 203.0.113.5 address-family ipv6-unicast attribute-unchanged",
"delete protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast soft-reconfiguration",
"delete protocols bgp 65536 address-family ipv6-unicast redistribute ripng",
"delete protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24",
"set protocols bgp 65536 address-family ipv4-unicast aggregate-address 192.0.2.0/24 summary-only",
"set protocols bgp 65536 address-family ipv6-unicast redistribute ospfv3 metric 20",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv4-unicast route-map import map01",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast distribute-list export 10",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast route-server-client",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv4-unicast filter-list export list01",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv4-unicast capability prefix-list send",
]
self.execute_module(changed=True, commands=commands)
def test_vyos_bgp_address_family_overridden_idempotent(self):
set_module_args(
dict(
state="overridden",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", as_set=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
dict(prefix="172.16.17.32/24", backdoor=True),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ripng", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="export", route_map="map01"
)
],
soft_reconfiguration=True,
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv6",
attribute_unchanged=dict(next_hop=True),
)
],
),
],
),
)
)
self.execute_module(changed=False, commands=[])
def test_vyos_bgp_address_family_overridden(self):
set_module_args(
dict(
state="overridden",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ospfv3", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.168.3.11",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="import", route_map="map01"
)
],
),
dict(
afi="ipv6",
distribute_list=[
dict(action="export", acl=10)
],
route_server_client=True,
),
],
),
],
),
)
)
commands = [
"delete protocols bgp 65536 neighbor 203.0.113.5 address-family",
"delete protocols bgp 65536 neighbor 192.0.2.25 address-family",
"delete protocols bgp 65536 address-family ipv6-unicast redistribute ripng",
"delete protocols bgp 65536 address-family ipv4 aggregate-address",
"delete protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24",
"set protocols bgp 65536 address-family ipv6-unicast redistribute ospfv3 metric 20",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv4-unicast route-map import map01",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast distribute-list export 10",
"set protocols bgp 65536 neighbor 192.168.3.11 address-family ipv6-unicast route-server-client",
]
self.execute_module(changed=True, commands=commands)
def test_vyos_bgp_address_family_deleted(self):
set_module_args(
dict(
state="deleted",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
),
],
neighbors=[
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
),
],
),
dict(
neighbor_address="203.0.113.5",
),
],
),
)
)
commands = [
"delete protocols bgp 65536 address-family ipv4-unicast",
"delete protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast",
"delete protocols bgp 65536 neighbor 203.0.113.5 address-family",
]
self.execute_module(changed=True, commands=commands)
def test_vyos_bgp_address_family_incorrect_instance(self):
set_module_args(
dict(
state="overridden",
config=dict(
as_number=100,
address_family=[
dict(
afi="ipv4",
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ospfv3", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.168.3.11",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="import", route_map="map01"
)
],
),
dict(
afi="ipv6",
distribute_list=[
dict(action="export", acl=10)
],
route_server_client=True,
),
],
),
],
),
)
)
result = self.execute_module(failed=True)
self.assertIn(
"Only one bgp instance is allowed per device", result["msg"]
)
def test_vyos_bgp_address_family_rendered(self):
set_module_args(
dict(
state="rendered",
config=dict(
as_number=65536,
address_family=[
dict(
afi="ipv4",
aggregate_address=[
dict(prefix="192.0.2.0/24", as_set=True)
],
networks=[
dict(
prefix="172.16.17.32/24", route_map="map01"
),
dict(prefix="172.16.17.32/24", backdoor=True),
],
),
dict(
afi="ipv6",
redistribute=[dict(protocol="ripng", metric=20)],
),
],
neighbors=[
dict(
neighbor_address="192.0.2.25",
address_family=[
dict(
afi="ipv4",
route_map=[
dict(
action="export", route_map="map01"
)
],
soft_reconfiguration=True,
),
],
),
dict(
neighbor_address="203.0.113.5",
address_family=[
dict(
afi="ipv6",
attribute_unchanged=dict(next_hop=True),
)
],
),
],
),
)
)
rendered_cmds = [
"set protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24 route-map map01",
"set protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24 backdoor",
"set protocols bgp 65536 address-family ipv4-unicast aggregate-address 192.0.2.0/24 as-set",
"set protocols bgp 65536 address-family ipv6-unicast redistribute ripng metric 20",
"set protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast route-map export map01",
"set protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast soft-reconfiguration inbound",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv6-unicast attribute-unchanged next-hop",
]
result = self.execute_module(changed=False)
self.assertEqual(
sorted(result["rendered"]),
sorted(rendered_cmds),
result["rendered"],
)
def test_vyos_bgp_address_family_parsed(self):
commands = [
"set protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24 route-map map01",
"set protocols bgp 65536 address-family ipv4-unicast network 172.16.17.32/24 backdoor",
"set protocols bgp 65536 address-family ipv4-unicast aggregate-address 192.0.2.0/24 as-set",
"set protocols bgp 65536 address-family ipv6-unicast redistribute ripng metric 20",
"set protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast route-map export map01",
"set protocols bgp 65536 neighbor 192.0.2.25 address-family ipv4-unicast soft-reconfiguration inbound",
"set protocols bgp 65536 neighbor 203.0.113.5 address-family ipv6-unicast attribute-unchanged next-hop",
]
parsed_str = "\n".join(commands)
set_module_args(dict(running_config=parsed_str, state="parsed"))
result = self.execute_module(changed=False)
parsed_list = {
"as_number": 65536,
"address_family": [
{
"afi": "ipv4",
"networks": [
{"prefix": "172.16.17.32/24", "route_map": "map01"},
{"prefix": "172.16.17.32/24", "backdoor": True},
],
"aggregate_address": [
{"prefix": "192.0.2.0/24", "as_set": True}
],
},
{
"afi": "ipv6",
"redistribute": [{"protocol": "ripng", "metric": 20}],
},
],
"neighbors": [
{
"neighbor_address": "192.0.2.25",
"address_family": [
{"afi": "ipv4", "soft_reconfiguration": True},
],
},
{
"neighbor_address": "203.0.113.5",
"address_family": [
{
"afi": "ipv6",
"attribute_unchanged": {"next_hop": True},
}
],
},
],
}
self.assertEqual(sorted(parsed_list), sorted(result["parsed"]))
def test_vyos_bgp_address_family_gathered(self):
set_module_args(dict(state="gathered"))
result = self.execute_module(changed=False)
gather_list = {
"as_number": 65536,
"address_family": [
{
"afi": "ipv4",
"networks": [
{"prefix": "172.16.17.32/24", "route_map": "map01"},
{"prefix": "172.16.17.32/24", "backdoor": True},
],
"aggregate_address": [
{"prefix": "192.0.2.0/24", "as_set": True}
],
},
{
"afi": "ipv6",
"redistribute": [{"protocol": "ripng", | |
if inputClass in ("File", "Directory"): # input files
inputDestDir = workflowInputs_destdir
globExplode = None
path_tokens = linearKey.split('.')
# Filling in the defaults
assert len(path_tokens) >= 1
if len(path_tokens) >= 1:
pretty_relname = path_tokens[-1]
if len(path_tokens) > 1:
relative_dir = os.path.join(*path_tokens[0:-1])
else:
relative_dir = None
else:
pretty_relname = None
relative_dir = None
if inputClass == 'Directory':
# We have to autofill this with the outputs directory,
# so results are properly stored (without escaping the jail)
if inputs.get('autoFill', False):
if inputs.get('autoPrefix', True):
autoFilledDir = os.path.join(self.outputsDir, *path_tokens)
else:
autoFilledDir = self.outputsDir
theInputs.append(MaterializedInput(linearKey, [autoFilledDir]))
continue
globExplode = inputs.get('globExplode')
elif inputClass == 'File' and inputs.get('autoFill', False):
# We have to autofill this with the outputs directory,
# so results are properly stored (without escaping the jail)
autoFilledFile = os.path.join(self.outputsDir, path_tokens)
autoFilledDir = os.path.dirname(autoFilledFile)
# This is needed to assure the path exists
if autoFilledDir != self.outputsDir:
os.makedirs(autoFilledDir, exist_ok=True)
theInputs.append(MaterializedInput(linearKey, [autoFilledFile]))
continue
remote_files = inputs.get('url')
# It has to exist
if remote_files is not None:
# We are sending the context name thinking in the future,
# as it could contain potential hints for authenticated access
contextName = inputs.get('security-context')
secondary_remote_files = inputs.get('secondary-urls')
preferred_name_conf = inputs.get('preferred-name')
if isinstance(preferred_name_conf, str):
pretty_relname = preferred_name_conf
elif not preferred_name_conf:
# Remove the pre-computed relative dir
pretty_relname = None
cacheable = not self.paranoidMode if inputs.get('cache', True) else False
# Setting up the relative dir preference
reldir_conf = inputs.get('relative-dir')
if isinstance(reldir_conf, str):
relative_dir = reldir_conf
elif not reldir_conf:
# Remove the pre-computed relative dir
relative_dir = None
if relative_dir is not None:
newInputDestDir = os.path.realpath(os.path.join(inputDestDir, relative_dir))
if newInputDestDir.startswith(os.path.realpath(inputDestDir)):
inputDestDir = cast(AbsPath, newInputDestDir)
# The storage dir depends on whether it can be cached or not
storeDir : Union[CacheType, AbsPath] = CacheType.Input if cacheable else workflowInputs_destdir
if not isinstance(remote_files, list): # more than one input file
remote_files = [ remote_files ]
remote_pairs : MutableSequence[MaterializedContent] = []
for remote_file in remote_files:
lastInput += 1
t_remote_pairs = self._fetchRemoteFile(
remote_file,
contextName,
offline,
storeDir,
cacheable,
inputDestDir,
globExplode,
prefix=str(lastInput) + '_',
prettyRelname=pretty_relname
)
remote_pairs.extend(t_remote_pairs)
secondary_remote_pairs : Optional[MutableSequence[MaterializedContent]]
if secondary_remote_files is not None:
if not isinstance(secondary_remote_files, list): # more than one secondary input file
secondary_remote_files = [ secondary_remote_files ]
secondary_remote_pairs = []
for secondary_remote_file in secondary_remote_files:
# The last fetched content prefix is the one used
# for all the secondaries
t_secondary_remote_pairs = self._fetchRemoteFile(
secondary_remote_file,
contextName,
offline,
storeDir,
cacheable,
inputDestDir,
globExplode,
prefix=str(lastInput) + '_'
)
secondary_remote_pairs.extend(t_secondary_remote_pairs)
else:
secondary_remote_pairs = None
theInputs.append(
MaterializedInput(
name=linearKey,
values=remote_pairs,
secondaryInputs=secondary_remote_pairs
)
)
else:
if inputClass == 'File':
# Empty input, i.e. empty file
inputDestPath = cast(AbsPath, os.path.join(inputDestDir, *linearKey.split('.')))
os.makedirs(os.path.dirname(inputDestPath), exist_ok=True)
# Creating the empty file
with open(inputDestPath, mode="wb") as idH:
pass
contentKind = ContentKind.File
else:
inputDestPath = inputDestDir
contentKind = ContentKind.Directory
theInputs.append(
MaterializedInput(
name=linearKey,
values=[
MaterializedContent(
local=cast(AbsPath, inputDestPath),
licensed_uri=LicensedURI(uri=cast(URIType, "data:,")),
prettyFilename=cast(RelPath, os.path.basename(inputDestPath)),
kind=contentKind
)
],
)
)
else:
raise WFException(
'Unrecognized input class "{}", attached to "{}"'.format(inputClass, linearKey))
else:
# possible nested files
newInputsAndParams, lastInput = self.fetchInputs(inputs,
workflowInputs_destdir=workflowInputs_destdir,
prefix=linearKey + '.', lastInput=lastInput,
offline=offline)
theInputs.extend(newInputsAndParams)
else:
if not isinstance(inputs, list):
inputs = [inputs]
theInputs.append(MaterializedInput(linearKey, inputs))
return theInputs, lastInput
def stageWorkDir(self) -> StagedSetup:
"""
This method is here to simplify the understanding of the needed steps
"""
self.fetchWorkflow()
self.setupEngine()
self.materializeWorkflow()
self.materializeInputs()
self.marshallStage()
return self.getStagedSetup()
def workdirToBagit(self):
"""
BEWARE: This is a destructive step! So, once run, there is no back!
"""
return bagit.make_bag(self.workDir)
DefaultCardinality = '1'
CardinalityMapping = {
'1': (1, 1),
'?': (0, 1),
'*': (0, sys.maxsize),
'+': (1, sys.maxsize),
}
OutputClassMapping = {
ContentKind.File.name: ContentKind.File,
ContentKind.Directory.name: ContentKind.Directory,
ContentKind.Value.name: ContentKind.Value,
}
def parseExpectedOutputs(self, outputs: Union[List[Any], Mapping[str, Any]]) -> List[ExpectedOutput]:
expectedOutputs = []
# TODO: implement parsing of outputs
outputsIter = outputs.items() if isinstance(outputs, dict) else enumerate(outputs)
for outputKey, outputDesc in outputsIter:
# The glob pattern
patS = outputDesc.get('glob')
if patS is not None:
if len(patS) == 0:
patS = None
# Fill from this input
fillFrom = outputDesc.get('fillFrom')
# Parsing the cardinality
cardS = outputDesc.get('cardinality')
cardinality = None
if cardS is not None:
if isinstance(cardS, int):
if cardS < 1:
cardinality = (0, 1)
else:
cardinality = (cardS, cardS)
elif isinstance(cardS, list):
cardinality = (int(cardS[0]), int(cardS[1]))
else:
cardinality = self.CardinalityMapping.get(cardS)
if cardinality is None:
cardinality = self.CardinalityMapping[self.DefaultCardinality]
eOutput = ExpectedOutput(
name=outputKey,
kind=self.OutputClassMapping.get(outputDesc.get('c-l-a-s-s'), ContentKind.File),
preferredFilename=outputDesc.get('preferredName'),
cardinality=cardinality,
fillFrom=fillFrom,
glob=patS,
)
expectedOutputs.append(eOutput)
return expectedOutputs
def parseExportActions(self, raw_actions: Sequence[Mapping[str, Any]]) -> Sequence[ExportAction]:
assert self.outputs is not None
o_raw_actions = {
"exports": raw_actions
}
valErrors = config_validate(o_raw_actions, self.EXPORT_ACTIONS_SCHEMA)
if len(valErrors) > 0:
errstr = f'ERROR in export actions definition block: {valErrors}'
self.logger.error(errstr)
raise WFException(errstr)
actions : MutableSequence[ExportAction] = []
for actionDesc in raw_actions:
actionId = cast(SymbolicName, actionDesc['id'])
pluginId = cast(SymbolicName, actionDesc['plugin'])
whatToExport = []
for encoded_name in actionDesc['what']:
colPos = encoded_name.find(':')
assert colPos >= 0
if colPos == 0:
if encoded_name[-1] != ':':
raise WFException(f'Unexpected element to export {encoded_name}')
rawItemType = encoded_name[1:-1]
whatName = None
else:
rawItemType = encoded_name[0:colPos]
whatName = encoded_name[colPos+1:]
assert len(whatName) > 0
whatToExport.append(
ExportItem(
type=ExportItemType(rawItemType),
name=whatName
)
)
action = ExportAction(
action_id=actionId,
plugin_id=pluginId,
what=whatToExport,
context_name=actionDesc.get('security-context'),
setup=actionDesc.get('setup'),
preferred_scheme=actionDesc.get('preferred-scheme'),
preferred_id=actionDesc.get('preferred-pid'),
)
actions.append(action)
return actions
def executeWorkflow(self, offline: bool = False):
self.unmarshallStage(offline=offline)
assert self.materializedEngine is not None
assert self.materializedParams is not None
assert self.outputs is not None
exitVal, augmentedInputs, matCheckOutputs = WorkflowEngine.ExecuteWorkflow(self.materializedEngine,
self.materializedParams,
self.outputs)
self.exitVal = exitVal
self.augmentedInputs = augmentedInputs
self.matCheckOutputs = matCheckOutputs
self.logger.debug(exitVal)
self.logger.debug(augmentedInputs)
self.logger.debug(matCheckOutputs)
# Store serialized version of exitVal, augmentedInputs and matCheckOutputs
self.marshallExecute()
def listMaterializedExportActions(self) -> Sequence[MaterializedExportAction]:
"""
This method should return the pids generated from the contents
"""
self.unmarshallExport(offline=True)
assert self.runExportActions is not None
return self.runExportActions
def exportResultsFromFiles(
self,
exportActionsFile: Optional[AnyPath] = None,
securityContextFile: Optional[AnyPath] = None,
action_ids: Sequence[SymbolicName] = [],
fail_ok: bool = False
) -> Tuple[Sequence[MaterializedExportAction], Sequence[Tuple[ExportAction, Exception]]]:
if exportActionsFile is not None:
with open(exportActionsFile, mode="r", encoding="utf-8") as eaf:
raw_actions = unmarshall_namedtuple(yaml.load(eaf, Loader=YAMLLoader))
actions = self.parseExportActions(raw_actions['exports'])
else:
actions = None
if securityContextFile is not None:
creds_config = self.ReadSecurityContextFile(securityContextFile)
valErrors = config_validate(creds_config, self.SECURITY_CONTEXT_SCHEMA)
if len(valErrors) > 0:
errstr = f'ERROR in security context block: {valErrors}'
self.logger.error(errstr)
raise WFException(errstr)
else:
creds_config = None
return self.exportResults(actions, creds_config, action_ids, fail_ok=fail_ok)
def exportResults(
self,
actions: Optional[Sequence[ExportAction]] = None,
creds_config: Optional[SecurityContextConfigBlock] = None,
action_ids: Sequence[SymbolicName] = [],
fail_ok: bool = False
) -> Tuple[Sequence[MaterializedExportAction], Sequence[Tuple[ExportAction, Exception]]]:
# The precondition
if self.unmarshallExport(offline=True, fail_ok=True) is None:
# TODO
raise WFException("FIXME")
# If actions is None, then try using default ones
matActions : MutableSequence[MaterializedExportAction] = []
actionErrors : MutableSequence[Tuple[ExportAction, Exception]] = []
if actions is None:
actions = self.default_actions
# Corner case
if actions is None:
return matActions, actionErrors
filtered_actions : Sequence[ExportAction]
if len(action_ids) > 0:
action_ids_set = set(action_ids)
filtered_actions = list(filter(lambda action: action.action_id in action_ids_set, actions))
else:
filtered_actions = actions
# First, let's check all the requested actions are viable
for action in filtered_actions:
try:
# check the export items are available
elems = self.locateExportItems(action.what)
# check the security context is available
a_setup_block : Optional[SecurityContextConfig] = action.setup
if a_setup_block is not None:
# Clone it
a_setup_block = a_setup_block.copy()
if action.context_name is None:
pass
elif creds_config is not None:
setup_block = creds_config.get(action.context_name)
if setup_block is None:
raise ExportActionException(f"No configuration found for context {action.context_name} (action {action.action_id})")
# Merging both setup blocks
if a_setup_block is None:
a_setup_block = setup_block
else:
a_setup_block.update(setup_block)
else:
raise ExportActionException(f"Missing security context block with requested context {action.context_name} (action {action.action_id})")
# check whether plugin is available
export_p = self.wfexs.instantiateExportPlugin(self, action.plugin_id, a_setup_block)
# Export the contents and obtain a PID
new_pids = export_p.push(
elems,
preferred_scheme=action.preferred_scheme,
preferred_id=action.preferred_id
)
# Last, register the PID
matAction = MaterializedExportAction(
action=action,
elems=elems,
pids=new_pids
)
matActions.append(matAction)
except Exception as e:
self.logger.exception(f"Export action {action.action_id} (plugin {action.plugin_id}) failed")
actionErrors.append((action, e))
if len(actionErrors) > 0:
errmsg = "There were errors in actions {0}, skipping:\n{1}".format(
",".join(map(lambda err: err[0].action_id, actionErrors)),
"\n".join(map(lambda err: str(err[1]), actionErrors))
)
self.logger.error(errmsg)
if not fail_ok:
| |
``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes. ALPHA: This feature is part of an alpha
release. This API might be changed in backward-incompatible ways and is
not recommended for production use. It is not subject to any SLA or
deprecation policy.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.Duration`
labels (dict[str -> str]): See <a href="/pubsub/docs/labels"> Creating and managing labels</a>.
expiration_policy (Union[dict, ~google.cloud.pubsub_v1.types.ExpirationPolicy]): A policy that specifies the conditions for this subscription's
expiration. A subscription is considered active as long as any connected
subscriber is successfully consuming messages from the subscription or
is issuing operations on the subscription. If ``expiration_policy`` is
not set, a *default policy* with ``ttl`` of 31 days will be used. The
minimum allowed value for ``expiration_policy.ttl`` is 1 day. BETA: This
feature is part of a beta release. This API might be changed in
backward-incompatible ways and is not recommended for production use. It
is not subject to any SLA or deprecation policy.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.ExpirationPolicy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Subscription` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_subscription' not in self._inner_api_calls:
self._inner_api_calls[
'create_subscription'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_subscription,
default_retry=self._method_configs['CreateSubscription'].
retry,
default_timeout=self._method_configs['CreateSubscription'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.Subscription(
name=name,
topic=topic,
push_config=push_config,
ack_deadline_seconds=ack_deadline_seconds,
retain_acked_messages=retain_acked_messages,
message_retention_duration=message_retention_duration,
labels=labels,
expiration_policy=expiration_policy,
)
return self._inner_api_calls['create_subscription'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_subscription(self,
subscription,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the configuration details of a subscription.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.SubscriberClient()
>>>
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>>
>>> response = client.get_subscription(subscription)
Args:
subscription (str): The name of the subscription to get. Format is
``projects/{project}/subscriptions/{sub}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Subscription` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_subscription' not in self._inner_api_calls:
self._inner_api_calls[
'get_subscription'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_subscription,
default_retry=self._method_configs['GetSubscription'].
retry,
default_timeout=self._method_configs['GetSubscription'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.GetSubscriptionRequest(
subscription=subscription, )
return self._inner_api_calls['get_subscription'](
request, retry=retry, timeout=timeout, metadata=metadata)
def update_subscription(self,
subscription,
update_mask,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates an existing subscription. Note that certain properties of a
subscription, such as its topic, are not modifiable.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.SubscriberClient()
>>>
>>> ack_deadline_seconds = 42
>>> subscription = {'ack_deadline_seconds': ack_deadline_seconds}
>>> paths_element = 'ack_deadline_seconds'
>>> paths = [paths_element]
>>> update_mask = {'paths': paths}
>>>
>>> response = client.update_subscription(subscription, update_mask)
Args:
subscription (Union[dict, ~google.cloud.pubsub_v1.types.Subscription]): The updated subscription object.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.Subscription`
update_mask (Union[dict, ~google.cloud.pubsub_v1.types.FieldMask]): Indicates which fields in the provided subscription to update.
Must be specified and non-empty.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.pubsub_v1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.pubsub_v1.types.Subscription` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_subscription' not in self._inner_api_calls:
self._inner_api_calls[
'update_subscription'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_subscription,
default_retry=self._method_configs['UpdateSubscription'].
retry,
default_timeout=self._method_configs['UpdateSubscription'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.UpdateSubscriptionRequest(
subscription=subscription,
update_mask=update_mask,
)
return self._inner_api_calls['update_subscription'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_subscriptions(self,
project,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists matching subscriptions.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.SubscriberClient()
>>>
>>> project = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_subscriptions(project):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_subscriptions(project).pages:
... for element in page:
... # process element
... pass
Args:
project (str): The name of the project in which to list subscriptions. Format is
``projects/{project-id}``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.pubsub_v1.types.Subscription` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_subscriptions' not in self._inner_api_calls:
self._inner_api_calls[
'list_subscriptions'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_subscriptions,
default_retry=self._method_configs['ListSubscriptions'].
retry,
default_timeout=self._method_configs['ListSubscriptions'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.ListSubscriptionsRequest(
project=project,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_subscriptions'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='subscriptions',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def delete_subscription(self,
subscription,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes an existing subscription. All messages retained in the
subscription are immediately dropped. Calls to ``Pull`` after deletion
will return ``NOT_FOUND``. After a subscription is deleted, a new one
may be created with the same name, but the new one has no association
with the old subscription or its topic unless the same topic is
specified.
Example:
>>> from google.cloud import pubsub_v1
>>>
>>> client = pubsub_v1.SubscriberClient()
>>>
>>> subscription = client.subscription_path('[PROJECT]', '[SUBSCRIPTION]')
>>>
>>> client.delete_subscription(subscription)
Args:
subscription (str): The subscription to delete. Format is
``projects/{project}/subscriptions/{sub}``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_subscription' not in self._inner_api_calls:
self._inner_api_calls[
'delete_subscription'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_subscription,
default_retry=self._method_configs['DeleteSubscription'].
retry,
default_timeout=self._method_configs['DeleteSubscription'].
timeout,
client_info=self._client_info,
)
request = pubsub_pb2.DeleteSubscriptionRequest(
subscription=subscription, )
| |
0.9714
Epoch 457/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0868 - accuracy: 0.9626
Epoch 458/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0851 - accuracy: 0.9692
Epoch 459/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0797 - accuracy: 0.9626
Epoch 460/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0825 - accuracy: 0.9670
Epoch 461/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1011 - accuracy: 0.9560
Epoch 462/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1056 - accuracy: 0.9516
Epoch 463/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0784 - accuracy: 0.9692
Epoch 464/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0852 - accuracy: 0.9692
Epoch 465/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0849 - accuracy: 0.9670
Epoch 466/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1224 - accuracy: 0.9495
Epoch 467/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.3110 - accuracy: 0.9143
Epoch 468/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1101 - accuracy: 0.9648
Epoch 469/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0911 - accuracy: 0.9648
Epoch 470/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1135 - accuracy: 0.9582
Epoch 471/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1340 - accuracy: 0.9451
Epoch 472/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0882 - accuracy: 0.9648
Epoch 473/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1200 - accuracy: 0.9604
Epoch 474/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1078 - accuracy: 0.9626
Epoch 475/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0862 - accuracy: 0.9648
Epoch 476/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0972 - accuracy: 0.9582
Epoch 477/1000
15/15 [==============================] - 0s 5ms/step - loss: 0.0840 - accuracy: 0.9670
Epoch 478/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0867 - accuracy: 0.9626
Epoch 479/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1438 - accuracy: 0.9473
Epoch 480/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1281 - accuracy: 0.9495
Epoch 481/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0824 - accuracy: 0.9692
Epoch 482/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0924 - accuracy: 0.9714
Epoch 483/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1093 - accuracy: 0.9538
Epoch 484/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0865 - accuracy: 0.9626
Epoch 485/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0781 - accuracy: 0.9670
Epoch 486/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0869 - accuracy: 0.9692
Epoch 487/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0737 - accuracy: 0.9692
Epoch 488/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1080 - accuracy: 0.9560
Epoch 489/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1003 - accuracy: 0.9582
Epoch 490/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0767 - accuracy: 0.9626
Epoch 491/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0932 - accuracy: 0.9670
Epoch 492/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0767 - accuracy: 0.9714
Epoch 493/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0862 - accuracy: 0.9604
Epoch 494/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0899 - accuracy: 0.9648
Epoch 495/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0811 - accuracy: 0.9692
Epoch 496/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0797 - accuracy: 0.9648
Epoch 497/1000
15/15 [==============================] - 0s 4ms/step - loss: 0.0903 - accuracy: 0.9714
Epoch 498/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0980 - accuracy: 0.9560
Epoch 499/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1279 - accuracy: 0.9495
Epoch 500/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1095 - accuracy: 0.9582
Epoch 501/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1061 - accuracy: 0.9582
Epoch 502/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0949 - accuracy: 0.9670
Epoch 503/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1119 - accuracy: 0.9538
Epoch 504/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1386 - accuracy: 0.9451
Epoch 505/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1100 - accuracy: 0.9560
Epoch 506/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1081 - accuracy: 0.9582
Epoch 507/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0812 - accuracy: 0.9670
Epoch 508/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0825 - accuracy: 0.9582
Epoch 509/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0853 - accuracy: 0.9670
Epoch 510/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0810 - accuracy: 0.9670
Epoch 511/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1796 - accuracy: 0.9319
Epoch 512/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1032 - accuracy: 0.9604
Epoch 513/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0915 - accuracy: 0.9670
Epoch 514/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0776 - accuracy: 0.9692
Epoch 515/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0847 - accuracy: 0.9670
Epoch 516/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0776 - accuracy: 0.9692
Epoch 517/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0975 - accuracy: 0.9626
Epoch 518/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1042 - accuracy: 0.9538
Epoch 519/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0846 - accuracy: 0.9648
Epoch 520/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0988 - accuracy: 0.9582
Epoch 521/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0775 - accuracy: 0.9714
Epoch 522/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0737 - accuracy: 0.9714
Epoch 523/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0781 - accuracy: 0.9692
Epoch 524/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1317 - accuracy: 0.9407
Epoch 525/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0839 - accuracy: 0.9692
Epoch 526/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0723 - accuracy: 0.9714
Epoch 527/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0838 - accuracy: 0.9736
Epoch 528/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0851 - accuracy: 0.9560
Epoch 529/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1125 - accuracy: 0.9495
Epoch 530/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0813 - accuracy: 0.9736
Epoch 531/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0968 - accuracy: 0.9648
Epoch 532/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1006 - accuracy: 0.9626
Epoch 533/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1178 - accuracy: 0.9604
Epoch 534/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1304 - accuracy: 0.9495
Epoch 535/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1524 - accuracy: 0.9451
Epoch 536/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1067 - accuracy: 0.9516
Epoch 537/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0787 - accuracy: 0.9670
Epoch 538/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0834 - accuracy: 0.9670
Epoch 539/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1178 - accuracy: 0.9582
Epoch 540/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1089 - accuracy: 0.9538
Epoch 541/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.2039 - accuracy: 0.9231
Epoch 542/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1428 - accuracy: 0.9407
Epoch 543/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1275 - accuracy: 0.9429
Epoch 544/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1022 - accuracy: 0.9604
Epoch 545/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0855 - accuracy: 0.9736
Epoch 546/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0960 - accuracy: 0.9604
Epoch 547/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0857 - accuracy: 0.9670
Epoch 548/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0935 - accuracy: 0.9604
Epoch 549/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0911 - accuracy: 0.9670
Epoch 550/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0982 - accuracy: 0.9670
Epoch 551/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1270 - accuracy: 0.9582
Epoch 552/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0767 - accuracy: 0.9780
Epoch 553/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0834 - accuracy: 0.9670
Epoch 554/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0798 - accuracy: 0.9648
Epoch 555/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0986 - accuracy: 0.9560
Epoch 556/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0688 - accuracy: 0.9758
Epoch 557/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0897 - accuracy: 0.9648
Epoch 558/1000
15/15 [==============================] - 0s 4ms/step - loss: 0.0879 - accuracy: 0.9692
Epoch 559/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0909 - accuracy: 0.9626
Epoch 560/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0803 - accuracy: 0.9736
Epoch 561/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0814 - accuracy: 0.9670
Epoch 562/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1041 - accuracy: 0.9560
Epoch 563/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0787 - accuracy: 0.9692
Epoch 564/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0891 - accuracy: 0.9560
Epoch 565/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0758 - accuracy: 0.9692
Epoch 566/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0765 - accuracy: 0.9736
Epoch 567/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0878 - accuracy: 0.9670
Epoch 568/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0741 - accuracy: 0.9692
Epoch 569/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0889 - accuracy: 0.9736
Epoch 570/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.1051 - accuracy: 0.9604
Epoch 571/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0956 - accuracy: 0.9604
Epoch 572/1000
15/15 [==============================] - 0s 3ms/step - loss: 0.0833 - accuracy: 0.9626
Epoch 573/1000
15/15 [==============================] - | |
<reponame>Climate-Crisis-AI-Team-Vel-Ice/sea-ice-dataviz<filename>ml_pipeline/refine.py<gh_stars>0
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn import linear_model
import sklearn
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn import linear_model
import xgboost as xgb
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.ensemble import ExtraTreesClassifier
import seaborn as sns
from sklearn.decomposition import PCA, IncrementalPCA, NMF
import multilabelClassification as mc
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif, f_regression, mutual_info_regression
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from multilabelClassificationMetrics import metrics_precision_recall
import NN as nn_nn
import NN_regression as nr
import matplotlib as mpl
from sklearn.isotonic import IsotonicRegression
import statistics
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.svm import SVR
from sklearn.utils.validation import check_random_state
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RidgeCV
import NN_regression_pytorch as nrp
import multioutput_nn_regressor as mnr
from sklearn.multioutput import RegressorChain
from sklearn.multioutput import MultiOutputRegressor
import uuid
import matplotlib as mpl
from imputer import impute
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
import csv
from functools import reduce
sns.set_style("white")
sns.set_palette("Set2")
plt.style.use('seaborn-white') #sets the size of the charts
#style.use('ggplot')
def adjusted_rsquare(r2,n,p):
return 1-(1-r2)*(n-1)/(n-p-1)
def plot_features(df, columns, layer):
missing = []
for i in columns:
print('number of missing data ' + i + ' :', (len(df[i]) - df[i].count()))
print(df[i].min())
print(df[i].max())
print(df[i].mean())
missing.append(len(df[i]) - df[i].count())
fig = plt.figure(i)
X = (df[df[i].notna()][i].sort_values().to_numpy()).reshape(-1,1)
# scaler = MinMaxScaler()
# scaler.fit((df[df[i].notna()][i].sort_values().to_numpy()).reshape(-1,1))
# X = scaler.transform((df[df[i].notna()][i].sort_values().to_numpy()).reshape(-1,1))
#plt.plot(X.reshape(-1,1))
#ax = df[df[i].notna()][i].sort_values().plot.kde(bw_method=0.3)
#plt.show()
#print(len(df[df[i].notna()][i].sort_values().tolist())
ax = sns.distplot(X, hist = True, kde = True,
kde_kws = {'linewidth': 3},
label = i)
ax2 = ax.twinx()
sns.boxplot(x=X, ax=ax2)
ax2.set(ylim=(-.5, 10))
plt.ylabel('Density')
plt.title(i)
plt.savefig('0_' + i +' '+ layer +'.png' )
plt.close()
dff = pd.DataFrame(dict(features=columns, number_of_missing=missing))
sns.barplot(x='features', y = 'number_of_missing', data=dff)
plt.xticks(
rotation=45,
horizontalalignment='right',
fontweight='light')
plt.title('#missing data ' + layer)
plt.savefig('missing')
plt.close()
def reform_targets(response_columns, df):
for index, value in df['TEXTURE'].items():
#print(value.split(' '))
#print(index)
df['TEXTURE'].update(pd.Series([value.split(' ')[0]], index=[index]))
print(df['TEXTURE'].unique())
print(df['TEXTURE'].value_counts())
def plot_y(y,df):
plt.close()
df['TEXTURE'].value_counts().plot.bar()
plt.tight_layout()
plt.savefig('plot_y')
plt.close()
def feature_in_onefigure(data, target, df):
fig,axes =plt.subplots(9,3, figsize=(12, 9)) # 3 columns each containing 9 figures, total 27 features
ARGILE=df.loc[df['TEXTURE']=='ARGILE', data].to_numpy()
SABLE=df[df.columns.intersection(data)].loc[df['TEXTURE']=='SABLE', data].to_numpy()
LOAM=df[df.columns.intersection(data)].loc[df['TEXTURE']=='LOAM', data].to_numpy()
ax=axes.ravel() # flat axes with numpy ravel
for i in range(len(data)):
_,bins=np.histogram(df[df.columns.intersection(data)].to_numpy()[:,i],bins=40)
ax[i].hist(ARGILE[:,i],bins=bins,color='r',alpha=.5)# red color for malignant class
ax[i].hist(SABLE[:,i],bins=bins,color='g',alpha=0.3)# alpha is for transparency in the overlapped region
ax[i].hist(LOAM[:,i],bins=bins,color='b',alpha=0.1)
ax[i].set_title(df.columns.intersection(data)[i],fontsize=9)
ax[i].axes.get_xaxis().set_visible(False) # the x-axis co-ordinates are not so useful, as we just want to look how well separated the histograms are
ax[i].set_yticks(())
ax[0].legend(['ARGILE','SABLE', 'LOAM'],loc='best',fontsize=8)
plt.tight_layout() # let's make good plots
plt.savefig('feature')
plt.close()
def feature(data, target, df):
#fig,axes =plt.subplots(9,3, figsize=(12, 9)) # 3 columns each containing 9 figures, total 27 features
ARGILE=df.loc[df['TEXTURE']=='ARGILE', data].to_numpy()
SABLE=df[df.columns.intersection(data)].loc[df['TEXTURE']=='SABLE', data].to_numpy()
LOAM=df[df.columns.intersection(data)].loc[df['TEXTURE']=='LOAM', data].to_numpy()
#ax=axes.ravel() # flat axes with numpy ravel
for i in range(len(data)):
fig,ax =plt.subplots(1,1, figsize=(24, 14))
_,bins=np.histogram(df[df.columns.intersection(data)].to_numpy()[:,i],bins=40)
ax.hist(ARGILE[:,i],bins=bins,color='r',alpha=.5)# red color for malignant class
ax.hist(SABLE[:,i],bins=bins,color='g',alpha=0.3)# alpha is for transparency in the overlapped region
ax.hist(LOAM[:,i],bins=bins,color='b',alpha=0.1)
ax.set_title(df.columns.intersection(data)[i],fontsize=25)
#ax.axes.get_xaxis().set_visible(False) # the x-axis co-ordinates are not so useful, as we just want to look how well separated the histograms are
ax.set_yticks(())
ax.legend(['ARGILE','SABLE', 'LOAM'],loc='best',fontsize=22)
plt.tight_layout() # let's make good plots
plt.savefig('feature' + str(i) + df.columns.intersection(data)[i] )
plt.close()
def visualization2d(X, y):
X = preprocessing.scale(X)
data_subset = X
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_subset)
sns.set_palette("Set2")
plt.figure(figsize=(16,10))
sns.scatterplot(x=tsne_results[:, 0], y=tsne_results[:, 1], hue=y
,cmap='Set2', legend='brief') # hue = y
plt.legend(title='Tsne', loc='upper left', labels=['Low', 'Medium', 'High'])
plt.title('Tsne Visualization in 2D')
plt.tight_layout()
plt.savefig('Tsne')
plt.close()
def visualization3d(X, y):
X = preprocessing.scale(X)
data_subset = X
tsne = TSNE(n_components=3, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_subset)
ax = plt.axes(projection='3d')
for color, i, target_name in zip(mpl.cm.Set2.colors[:4], [1, 2, 3], ['Low', 'Medium', 'High']):
ax.scatter(tsne_results[np.where(y.to_numpy() == i), 0], tsne_results[np.where(y.to_numpy() == i), 1], tsne_results[np.where(y.to_numpy() == i), 2],
label=target_name, color=color)
plt.title('tsne visualization' + " of chemical dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.tight_layout()
plt.savefig('3d_tsne')
plt.close()
#visualization(df[columns].astype('float'), df['TEXTURE'])
def pca(X, dim):
pca = PCA(dim)
pca.fit(X)
print('number of the components', pca.n_components_ )
print('explained variance', pca.explained_variance_)
return(pca.fit_transform(X))
def clustering_dbscan(X, labels_true):
X = StandardScaler().fit_transform(X)
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# Plot result
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each)
for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', #, markerfacecolor=tuple(col)
markeredgecolor='k' ,markersize=8. ) #markeredgecolor='k',
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', #, markerfacecolor=tuple(col),
markeredgecolor='k', markersize=2)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.xlabel('First Component')
plt.ylabel('Second Component')
plt.legend()
plt.savefig('clusteringdbscan')
plt.close()
def clustering_kmeans(X, labels_true):
#X = df[columns].astype('float')
#print((X.values.shape))
#print(score)
X = StandardScaler().fit_transform(X)
# #############################################################################
# Compute DBSCAN
kmeans = KMeans(n_clusters=4, random_state=0).fit(X)
#core_samples_mask = np.zeros_like(kmeans.labels_, dtype=bool)
#core_samples_mask[db.core_sample_indices_] = True
labels = kmeans.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
print('Estimated number of clusters: %d' % n_clusters_)
print('Estimated number of noise points: %d' % n_noise_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
# Plot result
# Black removed and is used for noise instead.
#X = pca(X)
#unique_labels = set(labels)
sns.set_palette('Set2')
sns.scatterplot(x=X[:, 0], y=X[:, 1],
hue=labels_true, style=labels, legend='brief')
plt.savefig('clustering_kmeans')
plt.close()
def feature_selection(X, y, data, number_features):
bestfeatures = SelectKBest(score_func=chi2, k=number_features)
fit = bestfeatures.fit(X,y)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
#concat two dataframes for better visualization
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['X','Score'] #naming the dataframe columns
print(featureScores.nlargest(number_features,'Score')) #print 10 best features
model = ExtraTreesClassifier()
model.fit(X,y)
print(model.feature_importances_) #use inbuilt class feature_importances of tree based classifiers
#plot graph of feature importances for better visualization
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(number_features).plot(kind='barh')
plt.rcParams.update({'font.size': 8})
plt.title('Feature selection ExtraTreesClassifier')
plt.savefig('feature_selection')
plt.close()
#get correlations of each features in dataset
corrmat = data.corr()
top_corr_features = corrmat.index
plt.figure(figsize=(25,25))
#plot heat map
plt.rcParams.update({'font.size': 20})
g=sns.heatmap(data[top_corr_features].corr(), annot=True, cmap="RdYlGn", )
plt.savefig('heatmap')
plt.close()
def incremental_pca(X, y, data):
# Authors: <NAME>
# License: BSD 3 clause
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
#plt.figure()
for color, i, target_name in zip(mpl.cm.Set2.colors[:4] , [1, 2, 3], ['Low', 'Medium', 'High']):
plt.scatter(X_transformed[np.where(y.to_numpy() == i), 0], X_transformed[np.where(y.to_numpy() == i), 1],
color=color, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of chemical dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of chemical dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.savefig('2d_pca' + title)
plt.close()
n_components = 3
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
ax = plt.axes(projection='3d')
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
#plt.figure()
for color, i, target_name in zip(mpl.cm.Set2.colors[:4], [1, 2, 3], ['Low', 'Medium', 'High']):
ax.scatter(X_transformed[np.where(y.to_numpy() == i), 0], X_transformed[np.where(y.to_numpy() == i), 1], X_transformed[np.where(y.to_numpy() == i), 2],
color=color, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of chemical dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of chemical dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.savefig('3d_pca' + title)
plt.close()
def logistic_regression(X, y):
print(mc.averageprec(X, y))
def xgboost(X, y):
#X = preprocessing.scale(X)
datamatrix = xgb.DMatrix(data=X, label=y)
n_classes = y.shape[1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 123)
xg_reg = OneVsRestClassifier(xgb.XGBClassifier())
params = {"objective": 'multi:softprob', "eval_metric": 'merror','colsample_bytree' : 0.3, "learning_rate" : 0.1 , "max_depth" : 5, "alpha" : 10, "n_estimators" :10, "num_class" :y.shape[1]}
#xg_reg = xgb.train(dtrain = Xtrain, dtest = X_test, ytrain= y_train, ytest = y_test, params=params)
#res = xgb.cv(params, X, num_boost_round=1000, nfold=10, seed=seed, stratified=False, early_stopping_rounds=25, verbose_eval=10, show_stdv=True)
xg_reg.fit(X_train, y_train)
preds = xg_reg.predict(X_test)
y_score = xg_reg.predict_proba(X_test)
#rmse = np.sqrt(mean_squared_error(y_test, preds))
accuracy = accuracy_score(y_test, preds)
print("accuracy: %f", (accuracy))
metrics_precision_recall(y_test, preds, X_test, n_classes, 'XGboost')
#print("RMSE: %f" % (rmse))
#cv_results = xgb.cv(dtrain=datamatrix, params=params, nfold=10,
# num_boost_round=50,early_stopping_rounds=10, metrics="rmse", as_pandas=True, seed=123)
#print(cv_results)
#print((cv_results["test-rmse-mean"]).tail(1))
xg_reg = xgb.train(params=params, dtrain=datamatrix, num_boost_round=10)
xgb.plot_tree(xg_reg,num_trees=0)
plt.rcParams['figure.figsize'] = [70, 70]
plt.rcParams.update({'font.size': 6})
plt.savefig('tree.png')
plt.close()
#plt.show()
plt.rcParams.update({'font.size': 65})
xgb.plot_importance(xg_reg, max_num_features=10)
plt.rcParams['figure.figsize'] = [5, 5]
plt.savefig('xgboost')
plt.close()
def onehotencoder(X):
ohe = OneHotEncoder()
X_ohe = ohe.fit_transform(X) # It returns an numpy array
return X_ohe.toarray()
def svmclassifier(X, Y):
scaler = StandardScaler().fit(X)
X = scaler.transform(X)
classifier = LinearSVC(random_state=0, tol=1e-3)
#Kfold cross-validation
n_classes = 4
kf = KFold(n_splits=10, shuffle=True) # Define the split - into
kf_split = kf.get_n_splits(X) # returns the number of splitting iterations in the cross-validator
print(kf)
accuracy = []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
classifier.fit(X_train, Y_train)
y_score = classifier.decision_function(X_test)
preds = classifier.predict(X_test)
accuracy.append(accuracy_score(Y_test, preds))
accuracy_mean = statistics.mean(accuracy)
accuracy_std = statistics.stdev(accuracy)
print('svm:', accuracy_mean, accuracy_std)
#import pdb
#pdb.set_trace()
classifier = LinearSVC(random_state=0, tol=1e-5)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123)
classifier.fit(X_train, y_train)
y_score = classifier.decision_function(X_test)
metrics_precision_recall(onehotencoder(y_test.reshape(-1,1)), y_score, X_test, n_classes, 'SVM')
def replaceZeroes(data):
min_nonzero = np.min(data[np.nonzero(data)])
data[data == 0] = min_nonzero
return data
def dimensionality_reduction_f_classify(X, y):
# Split dataset to select feature and evaluate the classifier
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0
)
plt.figure(1)
plt.rcParams['figure.figsize'] = [50, 50]
plt.rcParams.update({'font.size': 6})
plt.clf()
X_indices = np.arange(X.shape[-1])
# #############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function to select the four
# most significant features
selector = SelectKBest(f_classif, k=5)
selector.fit(X_train, y_train)
scores = -np.log10(replaceZeroes(selector.pvalues_))
scores /= scores.max()
#print(scores)
plt.rcParams['figure.figsize'] = [70, 70]
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)',
edgecolor='black')
# #############################################################################
# Compare to the weights of an SVM
clf = make_pipeline(MinMaxScaler(), LinearSVC())
clf.fit(X_train, y_train)
print('Classification | |
: The association or disassociation succeeded.
* ``FAILED`` : The association or disassociation failed.
* ``IN_PROGRESS`` : The association or disassociation is still in progress.
- **EngineAttributes** *(list) --*
Attributes specific to the node association. In Puppet, the attibute PUPPET_NODE_CERT contains the signed certificate (the result of the CSR).
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
:type NodeAssociationStatusToken: string
:param NodeAssociationStatusToken: **[REQUIRED]**
The token returned in either the AssociateNodeResponse or the DisassociateNodeResponse.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server from which to disassociate the node.
:rtype: dict
:returns:
"""
pass
def describe_servers(self, ServerName: str = None, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Lists all configuration management servers that are identified with your account. Only the stored results from Amazon DynamoDB are returned. AWS OpsWorks CM does not query other services.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeServers>`_
**Request Syntax**
::
response = client.describe_servers(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Servers': [
{
'AssociatePublicIpAddress': True|False,
'BackupRetentionCount': 123,
'ServerName': 'string',
'CreatedAt': datetime(2015, 1, 1),
'CloudFormationStackArn': 'string',
'DisableAutomatedBackup': True|False,
'Endpoint': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'MaintenanceStatus': 'SUCCESS'|'FAILED',
'PreferredMaintenanceWindow': 'string',
'PreferredBackupWindow': 'string',
'SecurityGroupIds': [
'string',
],
'ServiceRoleArn': 'string',
'Status': 'BACKING_UP'|'CONNECTION_LOST'|'CREATING'|'DELETING'|'MODIFYING'|'FAILED'|'HEALTHY'|'RUNNING'|'RESTORING'|'SETUP'|'UNDER_MAINTENANCE'|'UNHEALTHY'|'TERMINATED',
'StatusReason': 'string',
'SubnetIds': [
'string',
],
'ServerArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Servers** *(list) --*
Contains the response to a ``DescribeServers`` request.
*For Puppet Server:* ``DescribeServersResponse$Servers$EngineAttributes`` contains PUPPET_API_CA_CERT. This is the PEM-encoded CA certificate that is used by the Puppet API over TCP port number 8140. The CA certificate is also used to sign node certificates.
- *(dict) --*
Describes a configuration management server.
- **AssociatePublicIpAddress** *(boolean) --*
Associate a public IP address with a server that you are launching.
- **BackupRetentionCount** *(integer) --*
The number of automated backups to keep.
- **ServerName** *(string) --*
The name of the server.
- **CreatedAt** *(datetime) --*
Time stamp of server creation. Example ``2016-07-29T13:38:47.520Z``
- **CloudFormationStackArn** *(string) --*
The ARN of the CloudFormation stack that was used to create the server.
- **DisableAutomatedBackup** *(boolean) --*
Disables automated backups. The number of stored backups is dependent on the value of PreferredBackupCount.
- **Endpoint** *(string) --*
A DNS name that can be used to access the engine. Example: ``myserver-asdfghjkl.us-east-1.opsworks.io``
- **Engine** *(string) --*
The engine type of the server. Valid values in this release include ``Chef`` and ``Puppet`` .
- **EngineModel** *(string) --*
The engine model of the server. Valid values in this release include ``Monolithic`` for Puppet and ``Single`` for Chef.
- **EngineAttributes** *(list) --*
The response of a createServer() request returns the master credential to access the server in EngineAttributes. These credentials are not stored by AWS OpsWorks CM; they are returned only as part of the result of createServer().
**Attributes returned in a createServer response for Chef**
* ``CHEF_PIVOTAL_KEY`` : A base64-encoded RSA private key that is generated by AWS OpsWorks for Chef Automate. This private key is required to access the Chef API.
* ``CHEF_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Chef starter kit, which includes a README, a configuration file, and the required RSA private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents. From this directory, you can run Knife commands.
**Attributes returned in a createServer response for Puppet**
* ``PUPPET_STARTER_KIT`` : A base64-encoded ZIP file. The ZIP file contains a Puppet starter kit, including a README and a required private key. Save this file, unzip it, and then change to the directory where you've unzipped the file contents.
* ``PUPPET_ADMIN_PASSWORD`` : An administrator password that you can use to sign in to the Puppet Enterprise console after the server is online.
- *(dict) --*
A name and value pair that is specific to the engine of the server.
- **Name** *(string) --*
The name of the engine attribute.
- **Value** *(string) --*
The value of the engine attribute.
- **EngineVersion** *(string) --*
The engine version of the server. For a Chef server, the valid value for EngineVersion is currently ``12`` . For a Puppet server, the valid value is ``2017`` .
- **InstanceProfileArn** *(string) --*
The instance profile ARN of the server.
- **InstanceType** *(string) --*
The instance type for the server, as specified in the CloudFormation stack. This might not be the same instance type that is shown in the EC2 console.
- **KeyPair** *(string) --*
The key pair associated with the server.
- **MaintenanceStatus** *(string) --*
The status of the most recent server maintenance run. Shows ``SUCCESS`` or ``FAILED`` .
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period specified for the server.
- **PreferredBackupWindow** *(string) --*
The preferred backup period specified for the server.
- **SecurityGroupIds** *(list) --*
The security group IDs for the server, as specified in the CloudFormation stack. These might not be the same security groups that are shown in the EC2 console.
- *(string) --*
- **ServiceRoleArn** *(string) --*
The service role ARN used to create the server.
- **Status** *(string) --*
The server's status. This field displays the states of actions in progress, such as creating, running, or backing up the server, as well as the server's health state.
- **StatusReason** *(string) --*
Depending on the server status, this field has either a human-readable message (such as a create or backup error), or an escaped block of JSON (used for health check results).
- **SubnetIds** *(list) --*
The subnet IDs specified in a CreateServer request.
- *(string) --*
- **ServerArn** *(string) --*
The ARN of the server.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeServers`` requests.
:type ServerName: string
:param ServerName:
Describes the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeServers`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeServers`` requests.
:rtype: dict
:returns:
"""
pass
def disassociate_node(self, ServerName: str, NodeName: str, EngineAttributes: List = None) -> Dict:
"""
Disassociates a node from an AWS OpsWorks CM server, and removes the node from the server's managed nodes. After a node is disassociated, the node key pair is no longer valid for accessing the configuration manager's API. For more information about how to associate a node, see AssociateNode .
A node can can only be disassociated from a server that is in a ``HEALTHY`` state. Otherwise, an ``InvalidStateException`` is thrown. A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DisassociateNode>`_
**Request Syntax**
::
response = client.disassociate_node(
ServerName='string',
NodeName='string',
EngineAttributes=[
{
'Name': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'NodeAssociationStatusToken': 'string'
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatusToken** *(string) --*
Contains a token which can be | |
+= '\t' + head
toPrint += '\t'+'taxonomy'+'\r'
for taxa in taxaTableFilt:
toPrint += taxa
for val in taxaTableFilt[taxa][0]:
toPrint += '\t' + str(val)
toPrint += '\t' + taxaIDs[taxa]
toPrint += '\r'
OTUtabletoPrint.write(toPrint)
OTUtabletoPrint.close()
os.system('biom convert -i OTUTableText.txt --to-hdf5 --table-type="OTU table" --process-obs-metadata taxonomy -o OTUTable_filtered.biom')
print("Printed OTU table")
# def printOTUTable(OTUTable, taxaIDs, output): # OLD VERSION, BEFORE I MADE ADJUSTMENTS
# # Print OTU table
# first = True
# toWrite = "#OTU ID"
# for row in OTUTable:
# if first:
# for sample in OTUTable[row].keys():
# toWrite += "\t" + sample
# toWrite += "\t"+ "taxonomy" + "\n"
# first = False
# toWrite += row
# for abund in OTUTable[row]:
# toWrite += "\t" + str(OTUTable[row][abund])
# toWrite += "\t" + taxaIDs[row] + "\n"
# open(output+".txt", 'w').write(toWrite)
# print("DONE PRINTING OTU TABLE")
#==========================================
# MAKING BINS FOR EACH SET OF BOUNDARIES-- USED BELOW
def sortBins(X, Y, listAbund): # Sorts list of abundances into bins according to X and Y and their associated salinities
lista = []
listb = []
listc = []
for i in range(len(listAbund[1])):
if listAbund[1][i] < X: # if X = Y, value will be sorted into X
lista.append(listAbund[0][i])
if listAbund[1][i] >= X and listAbund[1][i] <= Y:
listb.append(listAbund[0][i])
if listAbund[1][i] > Y:
listc.append(listAbund[0][i])
binValues = {'lista':lista, 'listb':listb, 'listc':listc}
return binValues # output is a dictionary with abundance observations sorted into lists according to what salinity they were found in
#==========================================
# MAKE AND TEST A MODEL USING ERROR SQUARED
def makeAModel(X,Y, binValues): # output is a dictionary with the abundances at each salinity, and then the 'model', which is just means
aMean = average(binValues['lista'])
bMean = average(binValues['listb'])
cMean = average(binValues['listc'])
# Make a combined 'piecewise' list that has each value and its corresponding mean
abundance = binValues['lista'] + binValues['listb'] + binValues['listc']
means = [aMean]*len(binValues['lista']) + [bMean]*len(binValues['listb']) + [cMean]*len(binValues['listc'])
combined = {'abundance': abundance, 'means': means}
return combined
def meanErrorSquared(combined): # Calculated the MES of data vs model
# find the sum of squared difference for each value from the mean in that bin
errorSquared = 0
n = range(len(combined['abundance']))
for i in n:
error = combined['means'][i] - combined['abundance'][i]
sqerror = error**2
errorSquared += sqerror
MES = float(errorSquared)/len(combined['abundance'])
return MES
def scaleByDiff(X,Y,meanA,meanB,meanC): # Scale X and Y into B given differences between means A,B,C
if meanA > meanC:
diffX = meanA-meanB
diffY = meanB-meanC
diffXY = Y-X
if diffX <= 0:
finalBoundary = Y
elif diffY <= 0:
finalBoundary = X
else:
scaleFactor = diffX/(diffX+diffY)
diffscaled = diffXY*scaleFactor
finalBoundary = Y-diffscaled
if meanC > meanA:
diffX = meanB-meanA
diffY = meanC-meanB
diffXY = Y-X
if diffY <= 0:
finalBoundary = X
elif diffX <= 0:
finalBoundary = Y
else:
scaleFactor = diffY/(diffX+diffY)
diffscaled = diffXY*scaleFactor
finalBoundary = X+diffscaled
if meanA == meanC:
print "WARNING:ERROR IN CALCULATING SCALEBYDIFF. Two means are exactly the same. Should not occur because it is classified as Hi or Lo"
return finalBoundary
def typeTaxa(X, Y, listAbund): # Uses Welch's t-test and bins above to classify taxa as Hi, Lo, or Inter (or other)
# This FIRST determines whether anything is intermediate:
# compares groups from best-fit model
# There are five intermediate sub-types: InterRestricted, InterPeak, InterPeakHiToler, InterPeakLoToler, Interbloom
# Then, it tests whether it is Hi or Lo:
# There are three intermediate sub-types each: Hi/LoRestricted, Hi/LoPeak, Hi/LoBloom
# Finally, it tests whether the remaining groups are 'ubiquitous':
# Everything else is kept as 'noclass'
global threshold
global Low,Inter,High
global ubiqOverlapThresh
global propUbiqNonzero
global critp
global critpVar
typeOutput = {'boundaries':[], 'type': '', 'typeSimple':'', 'meanA': [], 'meanB': [], 'meanC': [], 'X': [], 'Y': [], 'sigAB': [], 'sigBC': [], 'sigAC': [], 'bloom': 'No'} # Empty dictionary
binValues = sortBins(X,Y,listAbund) # Use function above to create dictionary with abundance and salinity information
# Find out whether someting is Hi-,Lo-, or Inter- specific
groupA = binValues['lista']
groupB = binValues['listb'] # should be at least 3 values in each list
groupC = binValues['listc']
meanA = average(groupA)
meanB = average(groupB)
meanC = average(groupC)
# Find threshold by using proportion of max, if necessariy
if threshold[0] == True:
maxAbund = max(groupA + groupB + groupC)
thresh = maxAbund*threshold[1]
else:
thresh = threshold[1]
# Calculate variance, but first test if each combination of groups has 0 variance.
# If variance is 0, set p** to 1, which is maximum
if average(groupA) == average(groupC) and numpy.var(groupA) == 0 and numpy.var(groupC) == 0:
pAC = 1
else:
# pAC = stats.ttest_ind(groupA,groupC, equal_var = False)[1] # p-value of A vs C
pAC = stats.mannwhitneyu(groupA,groupC)[1] # p-value of A vs C
if average(groupA) == average(groupB) and numpy.var(groupA) == 0 and numpy.var(groupB) == 0:
pAB = 1
else:
# pAB = stats.ttest_ind(groupA,groupB, equal_var = False)[1] # p-value of A vs B
pAB = stats.mannwhitneyu(groupA,groupB)[1] # p-value of A vs B
if average(groupB) == average(groupC) and numpy.var(groupB) == 0 and numpy.var(groupC) == 0:
pBC = 1
else:
# pBC = stats.ttest_ind(groupB,groupC, equal_var = False)[1] # p-value of B vs C
pBC = stats.mannwhitneyu(groupB,groupC)[1] # p-value of B vs C
# print pAC,pAB,pBC
sigAB = pAB < critp # True if significant
sigBC = pBC < critp # True if significant
sigAC = pAC < critp # True if significant
Stda = numpy.std(groupA) # For 'bloom' test
Stdb = numpy.std(groupB)
Stdc = numpy.std(groupC)
typeOutput['sigAB'] = pAB
typeOutput['sigBC'] = pBC
typeOutput['sigAC'] = pAC
typeOutput['meanA'] = meanA
typeOutput['meanB'] = meanB
typeOutput['meanC'] = meanC
typeOutput['X'] = X
typeOutput['Y'] = Y
typeOutput['bloom'] = 'No'
isInter = False # See if there is an intermediate community or not; if there isn't, then I compare just X and C.
isLow = False
isHigh = False
if sigAB and sigBC: # When the middle group is REAL (and not just 1 number), and it is significantly different than both flanking groups
if meanB > meanA and meanB > meanC and meanB > thresh: # intermediate species
isInter = True
typeOutput['boundaries'] = [X,Y]
typeOutput['typeSimple'] = Inter + 'Restricted'
if meanA <= thresh and meanC <= thresh: # "very" intermediate; that is, the abundances are basically 0 on either side
typeOutput['type'] = Inter+'Restricted'
elif meanA <= thresh and meanC > thresh: # leaning towards being hi
typeOutput['type'] = Inter+'PeakHiToler'
elif meanA > thresh and meanC <= thresh: # leaning towards being lo
typeOutput['type'] = Inter+'PeakLoToler'
else: # both meanA and meanC are larger than 0
typeOutput['type'] = Inter+'Peak'
elif meanB < meanA and meanB < meanC and meanA > thresh and meanC > thresh: # inv-inter water-- shouldn't exist according to hypothesis, but I put it in as a fail-safe
isInter = True
typeOutput['type'] = 'inv'+Inter
typeOutput['typeSimple'] = 'noclass'
typeOutput['boundaries'] = [X,Y]
else:
pass # All other combos mean the intermediates do NOT exist, so we can just compare X and C
if isInter: # If the group is already classified as either Inter or inv-Inter, then we skip the next loop. If it is not classified, we continue.
pass
elif (meanA > meanC and sigAC and meanA > thresh): #or (meanB > meanC and sigBC and (meanB-meanC) > (meanB-meanA)): # More in fresh water and it's significant
# Above, you can have EITHER X>C or Y>C but they must be significant, and if it's Y, the distance between Y and C must be greater than the distance between X and Y (to prevent Intermediate-looking ones)
# Note that meanB-meanC should ALWAYS be greater than meanB-meanA because if meanB-meanA is negative, it means it's truly fresh!
isLow = True
typeOutput['boundaries'] = [scaleByDiff(X,Y,meanA,meanB,meanC)] # We scale by how significant each difference is. (eg. If a-b is very significant but b-c is not very significant, then the 'true' boundary is approximated to be closer to a-b than to b-c.
# See function above for details
if meanC <= thresh:
typeOutput['type'] = Low + 'Restricted'
typeOutput['typeSimple'] = Low + 'Restricted'
else:
typeOutput['type'] = Low + 'Peak'
typeOutput['typeSimple'] = Low + 'Restricted'
elif (meanC > meanA and sigAC and meanC > thresh): #or (meanB > meanA and sigAB and (meanB-meanA) > (meanB-meanC)): # This is same process as above, except for marine samples
isHigh = True
typeOutput['boundaries'] = [scaleByDiff(X,Y,meanA,meanB,meanC)] # See above
if meanA <= thresh:
typeOutput['type'] = High + 'Restricted'
typeOutput['typeSimple'] = High + 'Restricted'
else:
typeOutput['type'] = High + 'Peak'
typeOutput['typeSimple'] = High + 'Restricted'
if not any([isInter, isLow, isHigh]) : # if it has not been sorted yet:
# Calculate whether the ranges of equal bins to overlap over the threshold set
binThirds = sortBins(max(listAbund[1])*0.33,max(listAbund[1])*0.66,listAbund)
groupRanges = [[min(binThirds['lista']), max(binThirds['lista'])],[min(binThirds['listb']), max(binThirds['listb'])],[min(binThirds['listc']), max(binThirds['listc'])]]
allOverlaps = []
for i in range(0,len(groupRanges)):
for j in range(0,len(groupRanges)):
difflwr = max(0,groupRanges[j][0] - groupRanges[i][0])
diffuppr = max(0,groupRanges[i][1] - groupRanges[j][1])
totalDist = groupRanges[i][1]-groupRanges[i][0]
if totalDist == 0:
totalDist = 1
finalOverlap = 1-(difflwr + diffuppr)/float(totalDist)
allOverlaps.append(finalOverlap)
# Also, calculate the number of zero and non-zero observations in ALL groups
zerosprop = sum([1 for i in listAbund[0] if i == 0])/float(len(listAbund[0]))
if zerosprop < propUbiqNonzero:
ubiqZeroTestPass = True
else:
ubiqZeroTestPass = False
if all([i >= ubiqOverlapThresh for i in allOverlaps]) and ubiqZeroTestPass: # passes both the overlap test and zero proportions test
typeOutput['type'] = 'ubiquitous' # basically everywhere at similar levels
typeOutput['typeSimple'] = 'noclass' # 'catch-all' for things | |
<gh_stars>10-100
del_items(0x8012BFF0)
SetType(0x8012BFF0, "int NumOfMonsterListLevels")
del_items(0x800A9014)
SetType(0x800A9014, "struct MonstLevel AllLevels[16]")
del_items(0x8012BCD4)
SetType(0x8012BCD4, "unsigned char NumsLEV1M1A[4]")
del_items(0x8012BCD8)
SetType(0x8012BCD8, "unsigned char NumsLEV1M1B[4]")
del_items(0x8012BCDC)
SetType(0x8012BCDC, "unsigned char NumsLEV1M1C[5]")
del_items(0x8012BCE4)
SetType(0x8012BCE4, "unsigned char NumsLEV2M2A[4]")
del_items(0x8012BCE8)
SetType(0x8012BCE8, "unsigned char NumsLEV2M2B[4]")
del_items(0x8012BCEC)
SetType(0x8012BCEC, "unsigned char NumsLEV2M2C[3]")
del_items(0x8012BCF0)
SetType(0x8012BCF0, "unsigned char NumsLEV2M2D[4]")
del_items(0x8012BCF4)
SetType(0x8012BCF4, "unsigned char NumsLEV2M2QA[4]")
del_items(0x8012BCF8)
SetType(0x8012BCF8, "unsigned char NumsLEV2M2QB[4]")
del_items(0x8012BCFC)
SetType(0x8012BCFC, "unsigned char NumsLEV3M3A[4]")
del_items(0x8012BD00)
SetType(0x8012BD00, "unsigned char NumsLEV3M3QA[3]")
del_items(0x8012BD04)
SetType(0x8012BD04, "unsigned char NumsLEV3M3B[4]")
del_items(0x8012BD08)
SetType(0x8012BD08, "unsigned char NumsLEV3M3C[4]")
del_items(0x8012BD0C)
SetType(0x8012BD0C, "unsigned char NumsLEV4M4A[4]")
del_items(0x8012BD10)
SetType(0x8012BD10, "unsigned char NumsLEV4M4QA[4]")
del_items(0x8012BD14)
SetType(0x8012BD14, "unsigned char NumsLEV4M4B[4]")
del_items(0x8012BD18)
SetType(0x8012BD18, "unsigned char NumsLEV4M4QB[5]")
del_items(0x8012BD20)
SetType(0x8012BD20, "unsigned char NumsLEV4M4C[4]")
del_items(0x8012BD24)
SetType(0x8012BD24, "unsigned char NumsLEV4M4QC[5]")
del_items(0x8012BD2C)
SetType(0x8012BD2C, "unsigned char NumsLEV4M4D[4]")
del_items(0x8012BD30)
SetType(0x8012BD30, "unsigned char NumsLEV5M5A[4]")
del_items(0x8012BD34)
SetType(0x8012BD34, "unsigned char NumsLEV5M5B[4]")
del_items(0x8012BD38)
SetType(0x8012BD38, "unsigned char NumsLEV5M5C[4]")
del_items(0x8012BD3C)
SetType(0x8012BD3C, "unsigned char NumsLEV5M5D[4]")
del_items(0x8012BD40)
SetType(0x8012BD40, "unsigned char NumsLEV5M5E[4]")
del_items(0x8012BD44)
SetType(0x8012BD44, "unsigned char NumsLEV5M5F[3]")
del_items(0x8012BD48)
SetType(0x8012BD48, "unsigned char NumsLEV5M5QA[4]")
del_items(0x8012BD4C)
SetType(0x8012BD4C, "unsigned char NumsLEV6M6A[5]")
del_items(0x8012BD54)
SetType(0x8012BD54, "unsigned char NumsLEV6M6B[3]")
del_items(0x8012BD58)
SetType(0x8012BD58, "unsigned char NumsLEV6M6C[4]")
del_items(0x8012BD5C)
SetType(0x8012BD5C, "unsigned char NumsLEV6M6D[3]")
del_items(0x8012BD60)
SetType(0x8012BD60, "unsigned char NumsLEV6M6E[3]")
del_items(0x8012BD64)
SetType(0x8012BD64, "unsigned char NumsLEV6M6QA[3]")
del_items(0x8012BD68)
SetType(0x8012BD68, "unsigned char NumsLEV7M7A[4]")
del_items(0x8012BD6C)
SetType(0x8012BD6C, "unsigned char NumsLEV7M7B[4]")
del_items(0x8012BD70)
SetType(0x8012BD70, "unsigned char NumsLEV7M7C[4]")
del_items(0x8012BD74)
SetType(0x8012BD74, "unsigned char NumsLEV7M7D[3]")
del_items(0x8012BD78)
SetType(0x8012BD78, "unsigned char NumsLEV7M7E[3]")
del_items(0x8012BD7C)
SetType(0x8012BD7C, "unsigned char NumsLEV8M8QA[2]")
del_items(0x8012BD80)
SetType(0x8012BD80, "unsigned char NumsLEV8M8A[2]")
del_items(0x8012BD84)
SetType(0x8012BD84, "unsigned char NumsLEV8M8B[4]")
del_items(0x8012BD88)
SetType(0x8012BD88, "unsigned char NumsLEV8M8C[3]")
del_items(0x8012BD8C)
SetType(0x8012BD8C, "unsigned char NumsLEV8M8D[2]")
del_items(0x8012BD90)
SetType(0x8012BD90, "unsigned char NumsLEV8M8E[2]")
del_items(0x8012BD94)
SetType(0x8012BD94, "unsigned char NumsLEV9M9A[4]")
del_items(0x8012BD98)
SetType(0x8012BD98, "unsigned char NumsLEV9M9B[3]")
del_items(0x8012BD9C)
SetType(0x8012BD9C, "unsigned char NumsLEV9M9C[2]")
del_items(0x8012BDA0)
SetType(0x8012BDA0, "unsigned char NumsLEV9M9D[2]")
del_items(0x8012BDA4)
SetType(0x8012BDA4, "unsigned char NumsLEV10M10A[3]")
del_items(0x8012BDA8)
SetType(0x8012BDA8, "unsigned char NumsLEV10M10B[2]")
del_items(0x8012BDAC)
SetType(0x8012BDAC, "unsigned char NumsLEV10M10C[2]")
del_items(0x8012BDB0)
SetType(0x8012BDB0, "unsigned char NumsLEV10M10D[2]")
del_items(0x8012BDB4)
SetType(0x8012BDB4, "unsigned char NumsLEV10M10QA[3]")
del_items(0x8012BDB8)
SetType(0x8012BDB8, "unsigned char NumsLEV11M11A[3]")
del_items(0x8012BDBC)
SetType(0x8012BDBC, "unsigned char NumsLEV11M11B[3]")
del_items(0x8012BDC0)
SetType(0x8012BDC0, "unsigned char NumsLEV11M11C[3]")
del_items(0x8012BDC4)
SetType(0x8012BDC4, "unsigned char NumsLEV11M11D[3]")
del_items(0x8012BDC8)
SetType(0x8012BDC8, "unsigned char NumsLEV11M11E[2]")
del_items(0x8012BDCC)
SetType(0x8012BDCC, "unsigned char NumsLEV12M12A[3]")
del_items(0x8012BDD0)
SetType(0x8012BDD0, "unsigned char NumsLEV12M12B[3]")
del_items(0x8012BDD4)
SetType(0x8012BDD4, "unsigned char NumsLEV12M12C[3]")
del_items(0x8012BDD8)
SetType(0x8012BDD8, "unsigned char NumsLEV12M12D[3]")
del_items(0x8012BDDC)
SetType(0x8012BDDC, "unsigned char NumsLEV13M13A[3]")
del_items(0x8012BDE0)
SetType(0x8012BDE0, "unsigned char NumsLEV13M13B[2]")
del_items(0x8012BDE4)
SetType(0x8012BDE4, "unsigned char NumsLEV13M13QB[3]")
del_items(0x8012BDE8)
SetType(0x8012BDE8, "unsigned char NumsLEV13M13C[3]")
del_items(0x8012BDEC)
SetType(0x8012BDEC, "unsigned char NumsLEV13M13D[2]")
del_items(0x8012BDF0)
SetType(0x8012BDF0, "unsigned char NumsLEV14M14A[3]")
del_items(0x8012BDF4)
SetType(0x8012BDF4, "unsigned char NumsLEV14M14B[3]")
del_items(0x8012BDF8)
SetType(0x8012BDF8, "unsigned char NumsLEV14M14QB[3]")
del_items(0x8012BDFC)
SetType(0x8012BDFC, "unsigned char NumsLEV14M14C[3]")
del_items(0x8012BE00)
SetType(0x8012BE00, "unsigned char NumsLEV14M14D[3]")
del_items(0x8012BE04)
SetType(0x8012BE04, "unsigned char NumsLEV14M14E[2]")
del_items(0x8012BE08)
SetType(0x8012BE08, "unsigned char NumsLEV15M15A[3]")
del_items(0x8012BE0C)
SetType(0x8012BE0C, "unsigned char NumsLEV15M15B[3]")
del_items(0x8012BE10)
SetType(0x8012BE10, "unsigned char NumsLEV15M15C[2]")
del_items(0x8012BE14)
SetType(0x8012BE14, "unsigned char NumsLEV15M15QA[2]")
del_items(0x8012BE18)
SetType(0x8012BE18, "unsigned char NumsLEV16M16D[3]")
del_items(0x800A8B34)
SetType(0x800A8B34, "struct MonstList ChoiceListLEV1[3]")
del_items(0x800A8B64)
SetType(0x800A8B64, "struct MonstList ChoiceListLEV2[6]")
del_items(0x800A8BC4)
SetType(0x800A8BC4, "struct MonstList ChoiceListLEV3[4]")
del_items(0x800A8C04)
SetType(0x800A8C04, "struct MonstList ChoiceListLEV4[7]")
del_items(0x800A8C74)
SetType(0x800A8C74, "struct MonstList ChoiceListLEV5[7]")
del_items(0x800A8CE4)
SetType(0x800A8CE4, "struct MonstList ChoiceListLEV6[6]")
del_items(0x800A8D44)
SetType(0x800A8D44, "struct MonstList ChoiceListLEV7[5]")
del_items(0x800A8D94)
SetType(0x800A8D94, "struct MonstList ChoiceListLEV8[6]")
del_items(0x800A8DF4)
SetType(0x800A8DF4, "struct MonstList ChoiceListLEV9[4]")
del_items(0x800A8E34)
SetType(0x800A8E34, "struct MonstList ChoiceListLEV10[5]")
del_items(0x800A8E84)
SetType(0x800A8E84, "struct MonstList ChoiceListLEV11[5]")
del_items(0x800A8ED4)
SetType(0x800A8ED4, "struct MonstList ChoiceListLEV12[4]")
del_items(0x800A8F14)
SetType(0x800A8F14, "struct MonstList ChoiceListLEV13[5]")
del_items(0x800A8F64)
SetType(0x800A8F64, "struct MonstList ChoiceListLEV14[6]")
del_items(0x800A8FC4)
SetType(0x800A8FC4, "struct MonstList ChoiceListLEV15[4]")
del_items(0x800A9004)
SetType(0x800A9004, "struct MonstList ChoiceListLEV16[1]")
del_items(0x8012D8AC)
SetType(0x8012D8AC, "struct TASK *GameTaskPtr")
del_items(0x800A9094)
SetType(0x800A9094, "struct LOAD_IMAGE_ARGS AllArgs[30]")
del_items(0x8012C000)
SetType(0x8012C000, "int ArgsSoFar")
del_items(0x8012C010)
SetType(0x8012C010, "unsigned long *ThisOt")
del_items(0x8012C014)
SetType(0x8012C014, "struct POLY_FT4 *ThisPrimAddr")
del_items(0x8012D8B0)
SetType(0x8012D8B0, "long hndPrimBuffers")
del_items(0x8012D8B4)
SetType(0x8012D8B4, "struct PRIM_BUFFER *PrimBuffers")
del_items(0x8012D8B8)
SetType(0x8012D8B8, "unsigned char BufferDepth")
del_items(0x8012D8B9)
SetType(0x8012D8B9, "unsigned char WorkRamId")
del_items(0x8012D8BA)
SetType(0x8012D8BA, "unsigned char ScrNum")
del_items(0x8012D8BC)
SetType(0x8012D8BC, "struct SCREEN_ENV *Screens")
del_items(0x8012D8C0)
SetType(0x8012D8C0, "struct PRIM_BUFFER *PbToClear")
del_items(0x8012D8C4)
SetType(0x8012D8C4, "unsigned char BufferNum")
del_items(0x8012C018)
SetType(0x8012C018, "struct POLY_FT4 *AddrToAvoid")
del_items(0x8012D8C5)
SetType(0x8012D8C5, "unsigned char LastBuffer")
del_items(0x8012D8C8)
SetType(0x8012D8C8, "struct DISPENV *DispEnvToPut")
del_items(0x8012D8CC)
SetType(0x8012D8CC, "int ThisOtSize")
del_items(0x8012C01C)
SetType(0x8012C01C, "struct RECT ScrRect")
del_items(0x8012D8D0)
SetType(0x8012D8D0, "int VidWait")
del_items(0x8012DD38)
SetType(0x8012DD38, "struct SCREEN_ENV screen[2]")
del_items(0x8012D8D4)
SetType(0x8012D8D4, "void (*VbFunc)()")
del_items(0x8012D8D8)
SetType(0x8012D8D8, "unsigned long VidTick")
del_items(0x8012D8DC)
SetType(0x8012D8DC, "int VXOff")
del_items(0x8012D8E0)
SetType(0x8012D8E0, "int VYOff")
del_items(0x8012C030)
SetType(0x8012C030, "struct LNK_OPTS *Gaz")
del_items(0x8012C034)
SetType(0x8012C034, "int LastFmem")
del_items(0x8012C024)
SetType(0x8012C024, "unsigned int GSYS_MemStart")
del_items(0x8012C028)
SetType(0x8012C028, "unsigned int GSYS_MemEnd")
del_items(0x800A93DC)
SetType(0x800A93DC, "struct MEM_INIT_INFO PsxMem")
del_items(0x800A9404)
SetType(0x800A9404, "struct MEM_INIT_INFO PsxFastMem")
del_items(0x8012C02C)
SetType(0x8012C02C, "int LowestFmem")
del_items(0x8012C044)
SetType(0x8012C044, "int FileSYS")
del_items(0x8012D8E4)
SetType(0x8012D8E4, "struct FileIO *FileSystem")
del_items(0x8012D8E8)
SetType(0x8012D8E8, "struct FileIO *OverlayFileSystem")
del_items(0x8012C05E)
SetType(0x8012C05E, "short DavesPad")
del_items(0x8012C060)
SetType(0x8012C060, "short DavesPadDeb")
del_items(0x800A942C)
SetType(0x800A942C, "char _6FileIO_FileToLoad[50]")
del_items(0x8012DE18)
SetType(0x8012DE18, "struct POLY_FT4 MyFT4")
del_items(0x800A9CD0)
SetType(0x800A9CD0, "struct TextDat *AllDats[368]")
del_items(0x8012C0B0)
SetType(0x8012C0B0, "int TpW")
del_items(0x8012C0B4)
SetType(0x8012C0B4, "int TpH")
del_items(0x8012C0B8)
SetType(0x8012C0B8, "int TpXDest")
del_items(0x8012C0BC)
SetType(0x8012C0BC, "int TpYDest")
del_items(0x8012C0C0)
SetType(0x8012C0C0, "struct RECT R")
del_items(0x800AA290)
SetType(0x800AA290, "struct POLY_GT4 MyGT4")
del_items(0x800AA2C4)
SetType(0x800AA2C4, "struct POLY_GT3 MyGT3")
del_items(0x800A9460)
SetType(0x800A9460, "struct TextDat DatPool[20]")
del_items(0x8012C0D4)
SetType(0x8012C0D4, "bool ChunkGot")
del_items(0x800AA2EC)
SetType(0x800AA2EC, "char STREAM_DIR[16]")
del_items(0x800AA2FC)
SetType(0x800AA2FC, "char STREAM_BIN[16]")
del_items(0x800AA30C)
SetType(0x800AA30C, "unsigned char EAC_DirectoryCache[300]")
del_items(0x8012C0E8)
SetType(0x8012C0E8, "unsigned long BL_NoLumpFiles")
del_items(0x8012C0EC)
SetType(0x8012C0EC, "unsigned long BL_NoStreamFiles")
del_items(0x8012C0F0)
SetType(0x8012C0F0, "struct STRHDR *LFileTab")
del_items(0x8012C0F4)
SetType(0x8012C0F4, "struct STRHDR *SFileTab")
del_items(0x8012C0F8)
SetType(0x8012C0F8, "unsigned char FileLoaded")
del_items(0x8012C11C)
SetType(0x8012C11C, "int NoTAllocs")
del_items(0x800AA438)
SetType(0x800AA438, "struct MEMSTRUCT MemBlock[50]")
del_items(0x8012D8F4)
SetType(0x8012D8F4, "bool CanPause")
del_items(0x8012D8F8)
SetType(0x8012D8F8, "bool Paused")
del_items(0x8012DE40)
SetType(0x8012DE40, "struct Dialog PBack")
del_items(0x800AA6A0)
SetType(0x800AA6A0, "unsigned char RawPadData0[34]")
del_items(0x800AA6C4)
SetType(0x800AA6C4, "unsigned char RawPadData1[34]")
del_items(0x800AA6E8)
SetType(0x800AA6E8, "unsigned char demo_buffer[900]")
del_items(0x8012C138)
SetType(0x8012C138, "int demo_pad_time")
del_items(0x8012C13C)
SetType(0x8012C13C, "int demo_pad_count")
del_items(0x800AA5C8)
SetType(0x800AA5C8, "struct CPad Pad0")
del_items(0x800AA634)
SetType(0x800AA634, "struct CPad Pad1")
del_items(0x8012C140)
SetType(0x8012C140, "unsigned long demo_finish")
del_items(0x8012C144)
SetType(0x8012C144, "int cac_pad")
del_items(0x8012C164)
SetType(0x8012C164, "struct POLY_FT4 *CharFt4")
del_items(0x8012C168)
SetType(0x8012C168, "int CharFrm")
del_items(0x8012C151)
SetType(0x8012C151, "unsigned char WHITER")
del_items(0x8012C152)
SetType(0x8012C152, "unsigned char WHITEG")
del_items(0x8012C153)
SetType(0x8012C153, "unsigned char WHITEB")
del_items(0x8012C154)
SetType(0x8012C154, "unsigned char BLUER")
del_items(0x8012C155)
SetType(0x8012C155, "unsigned char BLUEG")
del_items(0x8012C156)
SetType(0x8012C156, "unsigned char BLUEB")
del_items(0x8012C157)
SetType(0x8012C157, "unsigned char REDR")
del_items(0x8012C158)
SetType(0x8012C158, "unsigned char REDG")
del_items(0x8012C159)
SetType(0x8012C159, "unsigned char REDB")
del_items(0x8012C15A)
SetType(0x8012C15A, "unsigned char GOLDR")
del_items(0x8012C15B)
SetType(0x8012C15B, "unsigned char GOLDG")
del_items(0x8012C15C)
SetType(0x8012C15C, "unsigned char GOLDB")
del_items(0x800AAA6C)
SetType(0x800AAA6C, "struct CFont MediumFont")
del_items(0x800AAC88)
SetType(0x800AAC88, "struct CFont LargeFont")
del_items(0x8012C160)
SetType(0x8012C160, "bool buttoncol")
del_items(0x800AAEA4)
SetType(0x800AAEA4, "struct FontItem LFontTab[90]")
del_items(0x800AAF58)
SetType(0x800AAF58, "struct FontTab LFont")
del_items(0x800AAF68)
SetType(0x800AAF68, "struct FontItem MFontTab[155]")
del_items(0x800AB0A0)
SetType(0x800AB0A0, "struct FontTab MFont")
del_items(0x8012C17D)
SetType(0x8012C17D, "unsigned char DialogRed")
del_items(0x8012C17E)
SetType(0x8012C17E, "unsigned char DialogGreen")
del_items(0x8012C17F)
SetType(0x8012C17F, "unsigned char DialogBlue")
del_items(0x8012C180)
SetType(0x8012C180, "unsigned char DialogTRed")
del_items(0x8012C181)
SetType(0x8012C181, "unsigned char DialogTGreen")
del_items(0x8012C182)
SetType(0x8012C182, "unsigned char DialogTBlue")
del_items(0x8012C184)
SetType(0x8012C184, "struct TextDat *DialogTData")
del_items(0x8012C188)
SetType(0x8012C188, "int DialogBackGfx")
del_items(0x8012C18C)
SetType(0x8012C18C, "int DialogBackW")
del_items(0x8012C190)
SetType(0x8012C190, "int DialogBackH")
del_items(0x8012C194)
SetType(0x8012C194, "int DialogBorderGfx")
del_items(0x8012C198)
SetType(0x8012C198, "int DialogBorderTLW")
del_items(0x8012C19C)
SetType(0x8012C19C, "int DialogBorderTLH")
del_items(0x8012C1A0)
SetType(0x8012C1A0, "int DialogBorderTRW")
del_items(0x8012C1A4)
SetType(0x8012C1A4, "int DialogBorderTRH")
del_items(0x8012C1A8)
SetType(0x8012C1A8, "int DialogBorderBLW")
del_items(0x8012C1AC)
SetType(0x8012C1AC, "int DialogBorderBLH")
del_items(0x8012C1B0)
SetType(0x8012C1B0, "int DialogBorderBRW")
del_items(0x8012C1B4)
SetType(0x8012C1B4, "int DialogBorderBRH")
del_items(0x8012C1B8)
SetType(0x8012C1B8, "int DialogBorderTW")
del_items(0x8012C1BC)
SetType(0x8012C1BC, "int DialogBorderTH")
del_items(0x8012C1C0)
SetType(0x8012C1C0, "int DialogBorderBW")
del_items(0x8012C1C4)
SetType(0x8012C1C4, "int DialogBorderBH")
del_items(0x8012C1C8)
SetType(0x8012C1C8, "int DialogBorderLW")
del_items(0x8012C1CC)
SetType(0x8012C1CC, "int DialogBorderLH")
del_items(0x8012C1D0)
SetType(0x8012C1D0, "int DialogBorderRW")
del_items(0x8012C1D4)
SetType(0x8012C1D4, "int DialogBorderRH")
del_items(0x8012C1D8)
SetType(0x8012C1D8, "int DialogBevelGfx")
del_items(0x8012C1DC)
SetType(0x8012C1DC, "int DialogBevelCW")
del_items(0x8012C1E0)
SetType(0x8012C1E0, "int DialogBevelCH")
del_items(0x8012C1E4)
SetType(0x8012C1E4, "int DialogBevelLRW")
del_items(0x8012C1E8)
SetType(0x8012C1E8, "int DialogBevelLRH")
del_items(0x8012C1EC)
SetType(0x8012C1EC, "int DialogBevelUDW")
del_items(0x8012C1F0)
SetType(0x8012C1F0, "int DialogBevelUDH")
del_items(0x8012C1F4)
SetType(0x8012C1F4, "int MY_DialogOTpos")
del_items(0x8012D8FC)
SetType(0x8012D8FC, "unsigned char DialogGBack")
del_items(0x8012D8FD)
SetType(0x8012D8FD, "char GShadeX")
del_items(0x8012D8FE)
SetType(0x8012D8FE, "char GShadeY")
del_items(0x8012D904)
SetType(0x8012D904, "unsigned char RandBTab[8]")
del_items(0x800AB0F0)
SetType(0x800AB0F0, "int Cxy[28]")
del_items(0x8012C177)
SetType(0x8012C177, "unsigned char BORDERR")
del_items(0x8012C178)
SetType(0x8012C178, "unsigned char BORDERG")
del_items(0x8012C179)
SetType(0x8012C179, "unsigned char BORDERB")
del_items(0x8012C17A)
SetType(0x8012C17A, "unsigned char BACKR")
del_items(0x8012C17B)
SetType(0x8012C17B, "unsigned char BACKG")
del_items(0x8012C17C)
SetType(0x8012C17C, "unsigned char BACKB")
del_items(0x800AB0B0)
SetType(0x800AB0B0, "char GShadeTab[64]")
del_items(0x8012C175)
SetType(0x8012C175, "char GShadePX")
del_items(0x8012C176)
SetType(0x8012C176, "char GShadePY")
del_items(0x8012C201)
SetType(0x8012C201, "unsigned char PlayDemoFlag")
del_items(0x8012DE50)
SetType(0x8012DE50, "struct RGBPOLY rgbb")
del_items(0x8012DE80)
SetType(0x8012DE80, "struct RGBPOLY rgbt")
del_items(0x8012D90C)
SetType(0x8012D90C, "int blockr")
del_items(0x8012D910)
SetType(0x8012D910, "int blockg")
del_items(0x8012D914)
SetType(0x8012D914, "int blockb")
del_items(0x8012D918)
SetType(0x8012D918, "int InfraFlag")
del_items(0x8012D91C)
SetType(0x8012D91C, "unsigned char blank_bit")
del_items(0x8012C215)
SetType(0x8012C215, "unsigned char P1ObjSelCount")
del_items(0x8012C216)
SetType(0x8012C216, "unsigned char P2ObjSelCount")
del_items(0x8012C217)
SetType(0x8012C217, "unsigned char P12ObjSelCount")
del_items(0x8012C218)
SetType(0x8012C218, "unsigned char P1ItemSelCount")
del_items(0x8012C219)
SetType(0x8012C219, "unsigned char P2ItemSelCount")
del_items(0x8012C21A)
SetType(0x8012C21A, "unsigned char P12ItemSelCount")
del_items(0x8012C21B)
SetType(0x8012C21B, "unsigned char P1MonstSelCount")
del_items(0x8012C21C)
SetType(0x8012C21C, "unsigned char P2MonstSelCount")
del_items(0x8012C21D)
SetType(0x8012C21D, "unsigned char P12MonstSelCount")
del_items(0x8012C21E)
SetType(0x8012C21E, "unsigned short P1ObjSelCol")
del_items(0x8012C220)
SetType(0x8012C220, "unsigned short P2ObjSelCol")
del_items(0x8012C222)
SetType(0x8012C222, "unsigned short P12ObjSelCol")
del_items(0x8012C224)
SetType(0x8012C224, "unsigned short P1ItemSelCol")
del_items(0x8012C226)
SetType(0x8012C226, "unsigned short P2ItemSelCol")
del_items(0x8012C228)
SetType(0x8012C228, "unsigned short P12ItemSelCol")
del_items(0x8012C22A)
SetType(0x8012C22A, "unsigned short P1MonstSelCol")
del_items(0x8012C22C)
SetType(0x8012C22C, "unsigned short P2MonstSelCol")
del_items(0x8012C22E)
SetType(0x8012C22E, "unsigned short P12MonstSelCol")
del_items(0x8012C230)
SetType(0x8012C230, "struct CBlocks *CurrentBlocks")
del_items(0x800AB160)
SetType(0x800AB160, "struct TownToCreature TownConv[10]")
del_items(0x8012C24C)
SetType(0x8012C24C, "enum OVER_TYPE CurrentOverlay")
del_items(0x801219A8)
SetType(0x801219A8, "unsigned long HaltTab[3]")
del_items(0x8012DEB0)
SetType(0x8012DEB0, "struct Overlay FrontEndOver")
del_items(0x8012DEC0)
SetType(0x8012DEC0, "struct Overlay PregameOver")
del_items(0x8012DED0)
SetType(0x8012DED0, "struct Overlay GameOver")
del_items(0x8012DEE0)
SetType(0x8012DEE0, "struct Overlay FmvOver")
del_items(0x8012D920)
SetType(0x8012D920, "int OWorldX")
del_items(0x8012D924)
SetType(0x8012D924, "int OWorldY")
del_items(0x8012D928)
SetType(0x8012D928, "int WWorldX")
del_items(0x8012D92C)
SetType(0x8012D92C, "int WWorldY")
del_items(0x80121A24)
SetType(0x80121A24, "short TxyAdd[16]")
del_items(0x8012C270)
SetType(0x8012C270, "int GXAdj2")
del_items(0x8012D930)
SetType(0x8012D930, "int TimePerFrame")
del_items(0x8012D934)
SetType(0x8012D934, "int CpuStart")
del_items(0x8012D938)
SetType(0x8012D938, "int CpuTime")
del_items(0x8012D93C)
SetType(0x8012D93C, "int DrawTime")
del_items(0x8012D940)
SetType(0x8012D940, "int DrawStart")
del_items(0x8012D944)
SetType(0x8012D944, "int LastCpuTime")
del_items(0x8012D948)
SetType(0x8012D948, "int LastDrawTime")
del_items(0x8012D94C)
SetType(0x8012D94C, "int DrawArea")
del_items(0x8012C278)
SetType(0x8012C278, "bool ProfOn")
del_items(0x800AB174)
SetType(0x800AB174, "unsigned char LevPals[17]")
del_items(0x80121B80)
SetType(0x80121B80, "unsigned short Level2Bgdata[25]")
del_items(0x800AB188)
SetType(0x800AB188, "struct PanelXY DefP1PanelXY")
del_items(0x800AB1DC)
SetType(0x800AB1DC, "struct PanelXY DefP1PanelXY2")
del_items(0x800AB230)
SetType(0x800AB230, "struct PanelXY DefP2PanelXY")
del_items(0x800AB284)
SetType(0x800AB284, "struct PanelXY DefP2PanelXY2")
del_items(0x800AB2D8)
SetType(0x800AB2D8, "unsigned int SpeedBarGfxTable[50]")
del_items(0x8012C2A0)
SetType(0x8012C2A0, "int hof")
del_items(0x8012C2A4)
SetType(0x8012C2A4, "int mof")
del_items(0x800AB3A0)
SetType(0x800AB3A0, "struct SFXHDR SFXTab[2]")
del_items(0x800AB4A0)
SetType(0x800AB4A0, "unsigned long STR_Buffer[18432]")
del_items(0x8012C2D8)
SetType(0x8012C2D8, "unsigned long Time")
del_items(0x8012C2DC)
SetType(0x8012C2DC, "bool CDWAIT")
del_items(0x800BD4A0)
SetType(0x800BD4A0, "struct SpuVoiceAttr voice_attr")
del_items(0x800BD4E0)
SetType(0x800BD4E0, "struct SFXHDR STRSave")
del_items(0x8012D950)
SetType(0x8012D950, "bool SavePause")
del_items(0x8012C2B1)
SetType(0x8012C2B1, "char NoActiveStreams")
del_items(0x8012C2B4)
SetType(0x8012C2B4, "bool STRInit")
del_items(0x8012C2B8)
SetType(0x8012C2B8, "int frame_rate")
del_items(0x8012C2BC)
SetType(0x8012C2BC, "unsigned char CDAngle")
del_items(0x8012C300)
SetType(0x8012C300, "char SFXNotPlayed")
del_items(0x8012C301)
SetType(0x8012C301, "char SFXNotInBank")
del_items(0x8012DEF0)
SetType(0x8012DEF0, "char spu_management[264]")
del_items(0x8012E000)
SetType(0x8012E000, "struct SpuReverbAttr rev_attr")
del_items(0x8012D958)
SetType(0x8012D958, "unsigned short NoSfx")
del_items(0x8012E020)
SetType(0x8012E020, "unsigned short CHStatus[24]")
del_items(0x8012C2EC)
SetType(0x8012C2EC, "struct bank_entry *BankOffsets")
del_items(0x8012C2F0)
SetType(0x8012C2F0, "long OffsetHandle")
del_items(0x8012C2F4)
SetType(0x8012C2F4, "int BankBase")
del_items(0x8012C2F8)
SetType(0x8012C2F8, "unsigned char SPU_Done")
del_items(0x80121F28)
SetType(0x80121F28, "unsigned short SFXRemapTab[58]")
del_items(0x8012C2FC)
SetType(0x8012C2FC, "int NoSNDRemaps")
del_items(0x800BD560)
SetType(0x800BD560, "struct PalCollection ThePals")
del_items(0x80121FD0)
SetType(0x80121FD0, "struct InitPos InitialPositions[20]")
del_items(0x8012C344)
SetType(0x8012C344, "int demo_level")
del_items(0x8012E050)
SetType(0x8012E050, "int buff[8]")
del_items(0x8012C348)
SetType(0x8012C348, "int old_val")
del_items(0x8012C34C)
SetType(0x8012C34C, "struct TASK *DemoTask")
del_items(0x8012C350)
SetType(0x8012C350, "struct TASK *DemoGameTask")
del_items(0x8012C354)
SetType(0x8012C354, "struct TASK *tonys")
del_items(0x8012C32C)
SetType(0x8012C32C, "int demo_load")
del_items(0x8012C330)
SetType(0x8012C330, "int demo_record_load")
del_items(0x8012C334)
SetType(0x8012C334, "int level_record")
del_items(0x8012C338)
SetType(0x8012C338, "char demo_fade_finished")
del_items(0x8012C33B)
SetType(0x8012C33B, "unsigned char demo_which")
del_items(0x800BD78C)
SetType(0x800BD78C, "unsigned long demolevel[5]")
del_items(0x8012C339)
SetType(0x8012C339, "unsigned char quest_cheat_num")
del_items(0x8012C33A)
SetType(0x8012C33A, "unsigned char cheat_quest_flag")
del_items(0x8012C328)
SetType(0x8012C328, "int moo_moo")
del_items(0x800BD74C)
SetType(0x800BD74C, "unsigned long quest_seed[16]")
del_items(0x8012C33C)
SetType(0x8012C33C, "unsigned char demo_flash")
del_items(0x8012C340)
SetType(0x8012C340, "int tonys_Task")
del_items(0x8012C4B0)
SetType(0x8012C4B0, "bool DoShowPanel")
del_items(0x8012C4B4)
SetType(0x8012C4B4, "bool DoDrawBg")
del_items(0x8012D95C)
SetType(0x8012D95C, "bool GlueFinished")
del_items(0x8012D960)
SetType(0x8012D960, "bool DoHomingScroll")
del_items(0x8012D964)
SetType(0x8012D964, "struct TextDat *TownerGfx")
del_items(0x8012D968)
SetType(0x8012D968, "int CurrentMonsterList")
del_items(0x8012C361)
SetType(0x8012C361, "char started_grtask")
del_items(0x800BD7A0)
SetType(0x800BD7A0, "struct PInf PlayerInfo[81]")
del_items(0x8012C4B8)
SetType(0x8012C4B8, "char ArmourChar[4]")
del_items(0x801220C4)
SetType(0x801220C4, "char WepChar[10]")
del_items(0x8012C4BC)
SetType(0x8012C4BC, "char CharChar[4]")
del_items(0x8012D96C)
SetType(0x8012D96C, "char ctrl_select_line")
del_items(0x8012D96D)
SetType(0x8012D96D, "char ctrl_select_side")
del_items(0x8012D96E)
SetType(0x8012D96E, "char ckeyheld")
del_items(0x8012D974)
SetType(0x8012D974, "struct RECT CtrlRect")
del_items(0x8012C4D0)
SetType(0x8012C4D0, "unsigned char ctrlflag")
del_items(0x800BDC14)
SetType(0x800BDC14, "struct KEY_ASSIGNS txt_actions[19]")
del_items(0x800BDB6C)
SetType(0x800BDB6C, "struct pad_assigns pad_txt[14]")
del_items(0x8012C4CC)
SetType(0x8012C4CC, "int toppos")
del_items(0x8012E070)
SetType(0x8012E070, "struct Dialog CtrlBack")
del_items(0x800BDD44)
SetType(0x800BDD44, "int controller_defaults[2][19]")
del_items(0x8012C53C)
SetType(0x8012C53C, "int gr_scrxoff")
del_items(0x8012C540)
SetType(0x8012C540, "int gr_scryoff")
del_items(0x8012C548)
SetType(0x8012C548, "unsigned short water_clut")
del_items(0x8012C54B)
SetType(0x8012C54B, "char visible_level")
del_items(0x8012C539)
SetType(0x8012C539, "char last_type")
del_items(0x8012C54D)
SetType(0x8012C54D, "char daylight")
del_items(0x8012C54A)
SetType(0x8012C54A, "char cow_in_sight")
del_items(0x8012C544)
SetType(0x8012C544, "unsigned int water_count")
del_items(0x8012C54C)
SetType(0x8012C54C, "unsigned char lastrnd")
del_items(0x8012C550)
SetType(0x8012C550, "int call_clock")
del_items(0x8012C560)
SetType(0x8012C560, "int TitleAnimCount")
del_items(0x8012C564)
SetType(0x8012C564, "int flametick")
del_items(0x800BDDDC)
SetType(0x800BDDDC, "int ypos[6]")
del_items(0x800BDDF4)
SetType(0x800BDDF4, "int frmlist[6]")
del_items(0x800BDE0C)
SetType(0x800BDE0C, "int xoff[6]")
del_items(0x8012C568)
SetType(0x8012C568, "int startx")
del_items(0x8012C56C)
SetType(0x8012C56C, "bool hellomumflag")
del_items(0x800BDE44)
SetType(0x800BDE44, "struct SPELLFX_DAT SpellFXDat[2]")
del_items(0x8012E080)
SetType(0x8012E080, "struct Particle PartArray[16]")
del_items(0x8012D97C)
SetType(0x8012D97C, "int partOtPos")
del_items(0x8012C58C)
SetType(0x8012C58C, "int SetParticle")
del_items(0x8012C590)
SetType(0x8012C590, "int p1partexecnum")
del_items(0x8012C594)
SetType(0x8012C594, "int p2partexecnum")
del_items(0x800BDE24)
SetType(0x800BDE24, "int JumpArray[8]")
del_items(0x8012C598)
SetType(0x8012C598, "int partjumpflag")
del_items(0x8012C59C)
SetType(0x8012C59C, "int partglowflag")
del_items(0x8012C5A0)
SetType(0x8012C5A0, "int partcolour")
del_items(0x8012C5A4)
SetType(0x8012C5A4, "bool anyfuckingmenus")
del_items(0x800BDED4)
SetType(0x800BDED4, "struct Spell_Target SplTarget[2]")
del_items(0x8012C5C5)
SetType(0x8012C5C5, "unsigned char select_flag")
del_items(0x8012D980)
SetType(0x8012D980, "struct RECT SelectRect")
del_items(0x8012D988)
SetType(0x8012D988, "char item_select")
del_items(0x8012C5C8)
SetType(0x8012C5C8, "char QSpell[2]")
del_items(0x8012C5CC)
SetType(0x8012C5CC, "char _spltotype[2]")
del_items(0x8012C5D0)
SetType(0x8012C5D0, "bool force_attack[2]")
del_items(0x8012C5B8)
SetType(0x8012C5B8, "struct CPlayer *gplayer")
del_items(0x8012E2C0)
SetType(0x8012E2C0, "struct Dialog SelectBack")
del_items(0x8012C5BC)
SetType(0x8012C5BC, "char mana_order[4]")
del_items(0x8012C5C0)
SetType(0x8012C5C0, "char health_order[4]")
del_items(0x8012C5C4)
SetType(0x8012C5C4, "unsigned char birdcheck")
del_items(0x8012E2D0)
SetType(0x8012E2D0, "struct TextDat *DecRequestors[10]")
del_items(0x8012D98C)
SetType(0x8012D98C, "unsigned short progress")
del_items(0x80122250)
SetType(0x80122250, "unsigned short Level2CutScreen[21]")
del_items(0x8012E2F8)
SetType(0x8012E2F8, "struct CScreen Scr")
del_items(0x8012C5F0)
SetType(0x8012C5F0, "struct TASK *CutScreenTSK")
del_items(0x8012C5F4)
SetType(0x8012C5F4, "bool GameLoading")
del_items(0x8012E378)
SetType(0x8012E378, "struct Dialog LBack")
del_items(0x8012C610)
SetType(0x8012C610, "unsigned int card_ev0")
del_items(0x8012C614)
SetType(0x8012C614, "unsigned int card_ev1")
del_items(0x8012C618)
SetType(0x8012C618, "unsigned int card_ev2")
del_items(0x8012C61C)
SetType(0x8012C61C, "unsigned int card_ev3")
del_items(0x8012C620)
SetType(0x8012C620, "unsigned int card_ev10")
del_items(0x8012C624)
SetType(0x8012C624, "unsigned int card_ev11")
del_items(0x8012C628)
SetType(0x8012C628, "unsigned int card_ev12")
del_items(0x8012C62C)
SetType(0x8012C62C, "unsigned int card_ev13")
del_items(0x8012C630)
SetType(0x8012C630, "int card_dirty[2]")
del_items(0x8012C638)
SetType(0x8012C638, "struct TASK *MemcardTask")
del_items(0x8012D990)
SetType(0x8012D990, "int card_event")
del_items(0x8012C60C)
SetType(0x8012C60C, "void (*mem_card_event_handler)()")
del_items(0x8012C604)
SetType(0x8012C604, "bool MemCardActive")
del_items(0x8012C608)
SetType(0x8012C608, "int never_hooked_events")
del_items(0x8012C690)
SetType(0x8012C690, "unsigned long MasterVol")
del_items(0x8012C694)
SetType(0x8012C694, "unsigned long MusicVol")
del_items(0x8012C698)
SetType(0x8012C698, "unsigned long SoundVol")
del_items(0x8012C69C)
SetType(0x8012C69C, "unsigned long VideoVol")
del_items(0x8012C6A0)
SetType(0x8012C6A0, "unsigned long SpeechVol")
del_items(0x8012D994)
SetType(0x8012D994, "struct TextDat *Slider")
del_items(0x8012D998)
SetType(0x8012D998, "int sw")
del_items(0x8012D99C)
SetType(0x8012D99C, "int sx")
del_items(0x8012D9A0)
SetType(0x8012D9A0, "int sy")
del_items(0x8012D9A4)
SetType(0x8012D9A4, "unsigned char Adjust")
del_items(0x8012D9A5)
SetType(0x8012D9A5, "unsigned char qspin")
del_items(0x8012D9A6)
SetType(0x8012D9A6, "unsigned char lqspin")
del_items(0x8012D9A8)
SetType(0x8012D9A8, "enum LANG_TYPE OrigLang")
del_items(0x8012D9AC)
SetType(0x8012D9AC, "enum LANG_TYPE OldLang")
del_items(0x8012D9B0)
SetType(0x8012D9B0, "enum LANG_TYPE NewLang")
del_items(0x8012C6A4)
SetType(0x8012C6A4, "int save_blocks")
del_items(0x8012C6A8)
SetType(0x8012C6A8, "char *Savefilename")
del_items(0x8012C6AC)
SetType(0x8012C6AC, "int ReturnMenu")
del_items(0x8012D9B4)
SetType(0x8012D9B4, "struct RECT ORect")
del_items(0x8012D9BC)
SetType(0x8012D9BC, "char *McState[2]")
del_items(0x8012C6B0)
SetType(0x8012C6B0, "int they_pressed")
del_items(0x8012D9C4)
SetType(0x8012D9C4, "unsigned char Seed[8]")
del_items(0x8012C668)
SetType(0x8012C668, "bool optionsflag")
del_items(0x8012C65C)
SetType(0x8012C65C, "int cmenu")
del_items(0x8012C674)
SetType(0x8012C674, "int options_pad")
del_items(0x8012C664)
SetType(0x8012C664, "bool allspellsflag")
del_items(0x800BE974)
SetType(0x800BE974, "short Circle[64]")
del_items(0x8012C648)
SetType(0x8012C648, "bool goldcheat")
del_items(0x8012C678)
SetType(0x8012C678, "int OptionsSeed")
del_items(0x8012C67C)
SetType(0x8012C67C, "bool OptionsSetSeed")
del_items(0x8012C64C)
SetType(0x8012C64C, "unsigned char Qfromoptions")
del_items(0x8012C650)
SetType(0x8012C650, "int Spacing")
del_items(0x8012C654)
SetType(0x8012C654, "int cs")
del_items(0x8012C658)
SetType(0x8012C658, "int lastcs")
del_items(0x8012C660)
SetType(0x8012C660, "bool MemcardOverlay")
del_items(0x8012C66C)
SetType(0x8012C66C, "int saveflag")
del_items(0x8012C670)
SetType(0x8012C670, "int loadflag")
del_items(0x800BDF04)
SetType(0x800BDF04, "struct OMENUITEM MainMenu[8]")
del_items(0x800BDFC4)
SetType(0x800BDFC4, "struct OMENUITEM GameMenu[10]")
del_items(0x800BE0B4)
SetType(0x800BE0B4, "struct OMENUITEM SoundMenu[6]")
del_items(0x800BE144)
SetType(0x800BE144, "struct OMENUITEM CentreMenu[7]")
del_items(0x800BE1EC)
SetType(0x800BE1EC, "struct OMENUITEM LangMenu[7]")
del_items(0x800BE294)
SetType(0x800BE294, "struct OMENUITEM QuitMenu[4]")
del_items(0x800BE2F4)
SetType(0x800BE2F4, "struct OMENUITEM MemcardMenu[7]")
del_items(0x800BE39C)
SetType(0x800BE39C, "struct OMENUITEM MemcardLoadGameMenu[4]")
del_items(0x800BE3FC)
SetType(0x800BE3FC, "struct OMENUITEM MemcardSaveGameMenu[4]")
del_items(0x800BE45C)
SetType(0x800BE45C, "struct OMENUITEM MemcardSaveOptionsMenu[4]")
del_items(0x800BE4BC)
SetType(0x800BE4BC, "struct OMENUITEM MemcardLoadOptionsMenu[4]")
del_items(0x800BE51C)
SetType(0x800BE51C, "struct OMENUITEM MemcardCharacterMenu[4]")
del_items(0x800BE57C)
SetType(0x800BE57C, "struct OMENUITEM MemcardSelectCard1[7]")
del_items(0x800BE624)
SetType(0x800BE624, "struct OMENUITEM MemcardSelectCard2[7]")
del_items(0x800BE6CC)
SetType(0x800BE6CC, "struct OMENUITEM MemcardFormatMenu[4]")
del_items(0x800BE72C)
SetType(0x800BE72C, "struct OMENUITEM CheatMenu[10]")
del_items(0x800BE81C)
SetType(0x800BE81C, "struct OMENUITEM InfoMenu[2]")
del_items(0x800BE84C)
SetType(0x800BE84C, "struct OMENUITEM MonstViewMenu[3]")
del_items(0x800BE894)
SetType(0x800BE894, "struct OMENUITEM SeedMenu[3]")
del_items(0x800BE8DC)
SetType(0x800BE8DC, "struct OMENULIST MenuList[19]")
del_items(0x8012C680)
SetType(0x8012C680, "bool debounce")
del_items(0x8012C684)
SetType(0x8012C684, "unsigned char KeyPos")
del_items(0x800BE9F4)
SetType(0x800BE9F4, "unsigned short KeyTab[10]")
del_items(0x8012C688)
SetType(0x8012C688, "int SeedPos")
del_items(0x800BEA08)
SetType(0x800BEA08, "struct BIRDSTRUCT BirdList[16]")
del_items(0x8012D9CC)
SetType(0x8012D9CC, "int last_seenx[2]")
del_items(0x8012D9D4)
SetType(0x8012D9D4, "int last_seeny[2]")
del_items(0x8012C6BD)
SetType(0x8012C6BD, "char hop_height")
del_items(0x8012C6C0)
SetType(0x8012C6C0, "struct Perch perches[4]")
del_items(0x800BEB88)
SetType(0x800BEB88, "struct FMVDAT FmvTab[8]")
del_items(0x8012C6D4)
SetType(0x8012C6D4, "int | |
right):
from discopy.quantum.gates import CX, H, sqrt, Bra, Match
def cup_factory(left, right):
if left == right == qubit:
return CX >> H @ sqrt(2) @ Id(1) >> Bra(0, 0)
if left == right == bit:
return Match() >> Discard(bit)
raise ValueError
return rigid.cups(
left, right, ar_factory=Circuit, cup_factory=cup_factory)
@staticmethod
def caps(left, right):
return Circuit.cups(left, right).dagger()
@staticmethod
def spiders(n_legs_in, n_legs_out, dim):
from discopy.quantum.gates import CX, H, Bra, sqrt
t = rigid.Ty('PRO')
if len(dim) == 0:
return Id()
def decomp_ar(spider):
return spider.decompose()
def spider_ar(spider):
dom, cod = len(spider.dom), len(spider.cod)
if dom < cod:
return spider_ar(spider.dagger()).dagger()
circ = Id(qubit)
if dom == 2:
circ = CX >> Id(qubit) @ Bra(0)
if cod == 0:
circ >>= H >> Bra(0) @ sqrt(2)
return circ
diag = Diagram.spiders(n_legs_in, n_legs_out, t ** len(dim))
decomp = monoidal.Functor(ob={t: t}, ar=decomp_ar)
to_circ = monoidal.Functor(ob={t: qubit}, ar=spider_ar,
ar_factory=Circuit, ob_factory=Ty)
circ = to_circ(decomp(diag))
return circ
def _apply_gate(self, gate, position):
""" Apply gate at position """
if position < 0 or position >= len(self.cod):
raise ValueError(f'Index {position} out of range.')
left = Id(position)
right = Id(len(self.cod) - len(left.cod) - len(gate.cod))
return self >> left @ gate @ right
def _apply_controlled(self, base_gate, *xs):
from discopy.quantum import Controlled
if len(set(xs)) != len(xs):
raise ValueError(f'Indices {xs} not unique.')
if min(xs) < 0 or max(xs) >= len(self.cod):
raise ValueError(f'Indices {xs} out of range.')
before = sorted(filter(lambda x: x < xs[-1], xs[:-1]))
after = sorted(filter(lambda x: x > xs[-1], xs[:-1]))
gate = base_gate
last_x = xs[-1]
for x in before[::-1]:
gate = Controlled(gate, distance=last_x - x)
last_x = x
last_x = xs[-1]
for x in after[::-1]:
gate = Controlled(gate, distance=last_x - x)
last_x = x
return self._apply_gate(gate, min(xs))
def H(self, x):
""" Apply Hadamard gate to circuit. """
from discopy.quantum import H
return self._apply_gate(H, x)
def S(self, x):
""" Apply S gate to circuit. """
from discopy.quantum import S
return self._apply_gate(S, x)
def X(self, x):
""" Apply Pauli X gate to circuit. """
from discopy.quantum import X
return self._apply_gate(X, x)
def Y(self, x):
""" Apply Pauli Y gate to circuit. """
from discopy.quantum import Y
return self._apply_gate(Y, x)
def Z(self, x):
""" Apply Pauli Z gate to circuit. """
from discopy.quantum import Z
return self._apply_gate(Z, x)
def Rx(self, phase, x):
""" Apply Rx gate to circuit. """
from discopy.quantum import Rx
return self._apply_gate(Rx(phase), x)
def Ry(self, phase, x):
""" Apply Rx gate to circuit. """
from discopy.quantum import Ry
return self._apply_gate(Ry(phase), x)
def Rz(self, phase, x):
""" Apply Rz gate to circuit. """
from discopy.quantum import Rz
return self._apply_gate(Rz(phase), x)
def CX(self, x, y):
""" Apply Controlled X / CNOT gate to circuit. """
from discopy.quantum import X
return self._apply_controlled(X, x, y)
def CY(self, x, y):
""" Apply Controlled Y gate to circuit. """
from discopy.quantum import Y
return self._apply_controlled(Y, x, y)
def CZ(self, x, y):
""" Apply Controlled Z gate to circuit. """
from discopy.quantum import Z
return self._apply_controlled(Z, x, y)
def CCX(self, x, y, z):
""" Apply Controlled CX / Toffoli gate to circuit. """
from discopy.quantum import X
return self._apply_controlled(X, x, y, z)
def CCZ(self, x, y, z):
""" Apply Controlled CZ gate to circuit. """
from discopy.quantum import Z
return self._apply_controlled(Z, x, y, z)
def CRx(self, phase, x, y):
""" Apply Controlled Rx gate to circuit. """
from discopy.quantum import Rx
return self._apply_controlled(Rx(phase), x, y)
def CRz(self, phase, x, y):
""" Apply Controlled Rz gate to circuit. """
from discopy.quantum import Rz
return self._apply_controlled(Rz(phase), x, y)
class Id(rigid.Id, Circuit):
""" Identity circuit. """
def __init__(self, dom=0):
if isinstance(dom, int):
dom = qubit ** dom
self._qubit_only = all(x.name == "qubit" for x in dom)
rigid.Id.__init__(self, dom)
Circuit.__init__(self, dom, dom, [], [])
def __repr__(self):
return "Id({})".format(len(self.dom) if self._qubit_only else self.dom)
def __str__(self):
return repr(self)
Circuit.id = Id
class Box(rigid.Box, Circuit):
"""
Boxes in a circuit diagram.
Parameters
----------
name : any
dom : discopy.quantum.circuit.Ty
cod : discopy.quantum.circuit.Ty
is_mixed : bool, optional
Whether the box is mixed, default is :code:`True`.
_dagger : bool, optional
If set to :code:`None` then the box is self-adjoint.
"""
def __init__(self, name, dom, cod,
is_mixed=True, data=None, _dagger=False, _conjugate=False):
if dom and not isinstance(dom, Ty):
raise TypeError(messages.type_err(Ty, dom))
if cod and not isinstance(cod, Ty):
raise TypeError(messages.type_err(Ty, cod))
z = 1 if _conjugate else 0
self._conjugate = _conjugate
rigid.Box.__init__(
self, name, dom, cod, data=data, _dagger=_dagger, _z=z)
Circuit.__init__(self, dom, cod, [self], [0])
if not is_mixed:
if all(isinstance(x, Digit) for x in dom @ cod):
self.classical = True
elif all(isinstance(x, Qudit) for x in dom @ cod):
self.classical = False
else:
raise ValueError(
"dom and cod should be Digits only or Qudits only.")
self._mixed = is_mixed
def grad(self, var, **params):
if var not in self.free_symbols:
return Sum([], self.dom, self.cod)
raise NotImplementedError
@property
def is_mixed(self):
return self._mixed
def __repr__(self):
return self.name
class Sum(tensor.Sum, Box):
""" Sums of circuits. """
@staticmethod
def upgrade(old):
return Sum(old.terms, old.dom, old.cod)
@property
def is_mixed(self):
return any(circuit.is_mixed for circuit in self.terms)
def get_counts(self, backend=None, **params):
if not self.terms:
return {}
if len(self.terms) == 1:
return self.terms[0].get_counts(backend=backend, **params)
counts = Circuit.get_counts(*self.terms, backend=backend, **params)
result = {}
for circuit_counts in counts:
for bitstring, count in circuit_counts.items():
result[bitstring] = result.get(bitstring, 0) + count
return result
def eval(self, backend=None, mixed=False, **params):
mixed = mixed or any(t.is_mixed for t in self.terms)
if not self.terms:
return 0
if len(self.terms) == 1:
return self.terms[0].eval(backend=backend, mixed=mixed, **params)
return sum(
Circuit.eval(*self.terms, backend=backend, mixed=mixed, **params))
def grad(self, var, **params):
return sum(circuit.grad(var, **params) for circuit in self.terms)
def to_tk(self):
return [circuit.to_tk() for circuit in self.terms]
Circuit.sum = Sum
class Swap(rigid.Swap, Box):
""" Implements swaps of circuit wires. """
def __init__(self, left, right):
rigid.Swap.__init__(self, left, right)
Box.__init__(
self, self.name, self.dom, self.cod, is_mixed=left != right)
def dagger(self):
return Swap(self.right, self.left)
def conjugate(self):
return Swap(self.right, self.left)
l = r = property(conjugate)
def __repr__(self):
return "SWAP"\
if self.left == self.right == qubit else super().__repr__()
def __str__(self):
return repr(self)
class Discard(RealConjugate, Box):
""" Discard n qubits. If :code:`dom == bit` then marginal distribution. """
def __init__(self, dom=1):
if isinstance(dom, int):
dom = qubit ** dom
super().__init__(
"Discard({})".format(dom), dom, qubit ** 0, is_mixed=True)
self.draw_as_discards = True
self.n_qubits = len(dom)
def dagger(self):
return MixedState(self.dom)
def _decompose(self):
return Id().tensor(*[Discard()] * self.n_qubits)
class MixedState(RealConjugate, Box):
"""
Maximally-mixed state on n qubits.
If :code:`cod == bit` then uniform distribution.
"""
def __init__(self, cod=1):
if isinstance(cod, int):
cod = qubit ** cod
super().__init__(
"MixedState({})".format(cod), qubit ** 0, cod, is_mixed=True)
self.drawing_name = "MixedState"
if cod == bit:
self.drawing_name = ""
self.draw_as_spider, self.color = True, "black"
def dagger(self):
return Discard(self.cod)
def _decompose(self):
return Id().tensor(*[MixedState()] * len(self.cod))
class Measure(RealConjugate, Box):
"""
Measure n qubits into n bits.
Parameters
----------
n_qubits : int
Number of qubits to measure.
destructive : bool, optional
Whether to do a non-destructive measurement instead.
override_bits : bool, optional
Whether to override input bits, this is the standard behaviour of tket.
"""
def __init__(self, n_qubits=1, destructive=True, override_bits=False):
dom, cod = qubit ** n_qubits, bit ** n_qubits
name = "Measure({})".format("" if n_qubits == 1 else n_qubits)
if not destructive:
cod = qubit ** n_qubits @ cod
name = name\
.replace("()", "(1)").replace(')', ", destructive=False)")
if override_bits:
dom = dom @ bit ** n_qubits
name = name\
.replace("()", "(1)").replace(')', ", override_bits=True)")
super().__init__(name, dom, cod, is_mixed=True)
self.destructive, self.override_bits = destructive, override_bits
self.n_qubits = n_qubits
self.draw_as_measures = True
def dagger(self):
return Encode(self.n_qubits,
constructive=self.destructive,
reset_bits=self.override_bits)
def _decompose(self):
return Id().tensor(*[
Measure(destructive=self.destructive,
override_bits=self.override_bits)] * self.n_qubits)
class Encode(RealConjugate, Box):
"""
Controlled preparation, i.e. encode n bits into n qubits.
Parameters
----------
n_bits : int
Number of bits to encode.
constructive : bool, optional
Whether to do a classically-controlled correction instead.
reset_bits : bool, optional
Whether to reset the bits to the uniform distribution.
"""
def __init__(self, n_bits=1, constructive=True, reset_bits=False):
dom, cod = bit ** n_bits, qubit ** n_bits
name = Measure(n_bits, constructive, reset_bits).name\
.replace("Measure", "Encode")\
.replace("destructive", "constructive")\
.replace("override_bits", "reset_bits")
super().__init__(name, dom, cod, is_mixed=True)
self.constructive, self.reset_bits = constructive, reset_bits
self.n_bits = n_bits
def dagger(self):
return Measure(self.n_bits,
destructive=self.constructive,
override_bits=self.reset_bits)
def _decompose(self):
return Id().tensor(*[
Encode(constructive=self.constructive,
reset_bits=self.reset_bits)] * | |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model architecture for predictive model, including CDNA, DNA, and STP."""
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.platform import flags
from tensorflow.contrib.layers.python import layers as tf_layers
from lstm_ops import basic_conv_lstm_cell
FLAGS = flags.FLAGS
# Amount to use when lower bounding tensors
RELU_SHIFT = 1e-12
# kernel size for DNA and CDNA.
DNA_KERN_SIZE = 5
def kl_divergence(mu, log_sigma):
"""KL divergence of diagonal gaussian N(mu,exp(log_sigma)) and N(0,1).
Args:
mu: mu parameter of the distribution.
log_sigma: log(sigma) parameter of the distribution.
Returns:
the KL loss.
"""
return -.5 * tf.reduce_sum(1. + log_sigma - tf.square(mu) - tf.exp(log_sigma),
axis=1)
def construct_latent_tower(images):
"""Builds convolutional latent tower for stochastic model.
At training time this tower generates a latent distribution (mean and std)
conditioned on the entire video. This latent variable will be fed to the
main tower as an extra variable to be used for future frames prediction.
At inference time, the tower is disabled and only returns latents sampled
from N(0,1).
If the multi_latent flag is on, a different latent for every timestep would
be generated.
Args:
images: tensor of ground truth image sequences
Returns:
latent_mean: predicted latent mean
latent_std: predicted latent standard deviation
latent_loss: loss of the latent twoer
samples: random samples sampled from standard guassian
"""
with slim.arg_scope([slim.conv2d], reuse=False):
stacked_images = tf.concat(images, 3)
latent_enc1 = slim.conv2d(
stacked_images,
32, [3, 3],
stride=2,
scope='latent_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm1'})
latent_enc2 = slim.conv2d(
latent_enc1,
64, [3, 3],
stride=2,
scope='latent_conv2',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm2'})
latent_enc3 = slim.conv2d(
latent_enc2,
64, [3, 3],
stride=1,
scope='latent_conv3',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm3'})
latent_mean = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
activation_fn=None,
scope='latent_mean',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_norm_mean'})
latent_std = slim.conv2d(
latent_enc3,
FLAGS.latent_channels, [3, 3],
stride=2,
scope='latent_std',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'latent_std_norm'})
latent_std += FLAGS.latent_std_min
divergence = kl_divergence(latent_mean, latent_std)
latent_loss = tf.reduce_mean(divergence)
if FLAGS.multi_latent:
# timestep x batch_size x latent_size
samples = tf.random_normal(
[FLAGS.sequence_length-1] + latent_mean.shape, 0, 1,
dtype=tf.float32)
else:
# batch_size x latent_size
samples = tf.random_normal(latent_mean.shape, 0, 1, dtype=tf.float32)
if FLAGS.inference_time:
# No latent tower at inference time, just standard gaussian.
return None, None, None, samples
else:
return latent_mean, latent_std, latent_loss, samples
def construct_model(images,
actions=None,
states=None,
iter_num=-1.0,
k=-1,
use_state=True,
num_masks=10,
stp=False,
cdna=True,
dna=False,
context_frames=2):
"""Build convolutional lstm video predictor using STP, CDNA, or DNA.
Args:
images: tensor of ground truth image sequences
actions: tensor of action sequences
states: tensor of ground truth state sequences
iter_num: tensor of the current training iteration (for sched. sampling)
k: constant used for scheduled sampling. -1 to feed in own prediction.
use_state: True to include state and action in prediction
num_masks: the number of different pixel motion predictions (and
the number of masks for each of those predictions)
stp: True to use Spatial Transformer Predictor (STP)
cdna: True to use Convoluational Dynamic Neural Advection (CDNA)
dna: True to use Dynamic Neural Advection (DNA)
context_frames: number of ground truth frames to pass in before
feeding in own predictions
Returns:
gen_images: predicted future image frames
gen_states: predicted future states
Raises:
ValueError: if more than one network option specified or more than 1 mask
specified for DNA model.
"""
# Each image is being used twice, in latent tower and main tower.
# This is to make sure we are using the *same* image for both, ...
# ... given how TF queues work.
images = [tf.identity(image) for image in images]
if stp + cdna + dna != 1:
raise ValueError('More than one, or no network option specified.')
batch_size, img_height, img_width, color_channels = images[0].get_shape()[0:4]
lstm_func = basic_conv_lstm_cell
# Generated robot states and images.
gen_states, gen_images = [], []
current_state = states[0]
if k == -1:
feedself = True
else:
# Scheduled sampling:
# Calculate number of ground-truth frames to pass in.
num_ground_truth = tf.to_int32(
tf.round(tf.to_float(batch_size) * (k / (k + tf.exp(iter_num / k)))))
feedself = False
# LSTM state sizes and states.
lstm_size = np.int32(np.array([32, 32, 64, 64, 128, 64, 32]))
lstm_state1, lstm_state2, lstm_state3, lstm_state4 = None, None, None, None
lstm_state5, lstm_state6, lstm_state7 = None, None, None
# Latent tower
latent_loss = 0.0
if FLAGS.stochastic_model:
latent_tower_outputs = construct_latent_tower(images)
latent_mean, latent_std, latent_loss, samples = latent_tower_outputs
# Main tower
for image, action in zip(images[:-1], actions[:-1]):
# Reuse variables after the first timestep.
reuse = bool(gen_images)
done_warm_start = len(gen_images) > context_frames - 1
with slim.arg_scope(
[lstm_func, slim.layers.conv2d, slim.layers.fully_connected,
tf_layers.layer_norm, slim.layers.conv2d_transpose],
reuse=reuse):
if feedself and done_warm_start:
# Feed in generated image.
prev_image = gen_images[-1]
elif done_warm_start:
# Scheduled sampling
prev_image = scheduled_sample(image, gen_images[-1], batch_size,
num_ground_truth)
else:
# Always feed in ground_truth
prev_image = image
# Predicted state is always fed back in
state_action = tf.concat(axis=1, values=[action, current_state])
enc0 = slim.layers.conv2d(
prev_image,
32, [5, 5],
stride=2,
scope='scale1_conv1',
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm1'})
hidden1, lstm_state1 = lstm_func(
enc0, lstm_state1, lstm_size[0], scope='state1')
hidden1 = tf_layers.layer_norm(hidden1, scope='layer_norm2')
hidden2, lstm_state2 = lstm_func(
hidden1, lstm_state2, lstm_size[1], scope='state2')
hidden2 = tf_layers.layer_norm(hidden2, scope='layer_norm3')
enc1 = slim.layers.conv2d(
hidden2, hidden2.get_shape()[3], [3, 3], stride=2, scope='conv2')
hidden3, lstm_state3 = lstm_func(
enc1, lstm_state3, lstm_size[2], scope='state3')
hidden3 = tf_layers.layer_norm(hidden3, scope='layer_norm4')
hidden4, lstm_state4 = lstm_func(
hidden3, lstm_state4, lstm_size[3], scope='state4')
hidden4 = tf_layers.layer_norm(hidden4, scope='layer_norm5')
enc2 = slim.layers.conv2d(
hidden4, hidden4.get_shape()[3], [3, 3], stride=2, scope='conv3')
# Pass in state and action.
smear = tf.reshape(
state_action,
[int(batch_size), 1, 1, int(state_action.get_shape()[1])])
smear = tf.tile(
smear, [1, int(enc2.get_shape()[1]), int(enc2.get_shape()[2]), 1])
if use_state:
enc2 = tf.concat(axis=3, values=[enc2, smear])
# Setup latent
if FLAGS.stochastic_model:
latent = samples
if FLAGS.multi_latent:
latent = samples[timestep]
if not FLAGS.inference_time:
latent = tf.cond(iter_num < FLAGS.num_iterations_1st_stage,
lambda: tf.identity(latent),
lambda: latent_mean + tf.exp(latent_std / 2.0) * latent)
with tf.control_dependencies([latent]):
enc2 = tf.concat([enc2, latent], 3)
enc3 = slim.layers.conv2d(
enc2, hidden4.get_shape()[3], [1, 1], stride=1, scope='conv4')
hidden5, lstm_state5 = lstm_func(
enc3, lstm_state5, lstm_size[4], scope='state5') # last 8x8
hidden5 = tf_layers.layer_norm(hidden5, scope='layer_norm6')
enc4 = slim.layers.conv2d_transpose(
hidden5, hidden5.get_shape()[3], 3, stride=2, scope='convt1')
hidden6, lstm_state6 = lstm_func(
enc4, lstm_state6, lstm_size[5], scope='state6') # 16x16
hidden6 = tf_layers.layer_norm(hidden6, scope='layer_norm7')
# Skip connection.
hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16
enc5 = slim.layers.conv2d_transpose(
hidden6, hidden6.get_shape()[3], 3, stride=2, scope='convt2')
hidden7, lstm_state7 = lstm_func(
enc5, lstm_state7, lstm_size[6], scope='state7') # 32x32
hidden7 = tf_layers.layer_norm(hidden7, scope='layer_norm8')
# Skip connection.
hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32
enc6 = slim.layers.conv2d_transpose(
hidden7,
hidden7.get_shape()[3], 3, stride=2, scope='convt3', activation_fn=None,
normalizer_fn=tf_layers.layer_norm,
normalizer_params={'scope': 'layer_norm9'})
if dna:
# Using largest hidden state for predicting untied conv kernels.
enc7 = slim.layers.conv2d_transpose(
enc6, DNA_KERN_SIZE**2, 1, stride=1, scope='convt4', activation_fn=None)
else:
# Using largest hidden state for predicting a new image layer.
enc7 = slim.layers.conv2d_transpose(
enc6, color_channels, 1, stride=1, scope='convt4', activation_fn=None)
# This allows the network to also generate one image from scratch,
# which is useful when regions of the image become unoccluded.
transformed = [tf.nn.sigmoid(enc7)]
if stp:
stp_input0 = tf.reshape(hidden5, [int(batch_size), -1])
stp_input1 = slim.layers.fully_connected(
stp_input0, 100, scope='fc_stp')
transformed += stp_transformation(prev_image, stp_input1, num_masks)
elif cdna:
cdna_input = tf.reshape(hidden5, [int(batch_size), -1])
transformed += cdna_transformation(prev_image, cdna_input, num_masks,
int(color_channels))
elif dna:
# Only one mask is supported (more should be unnecessary).
if num_masks != 1:
raise ValueError('Only one mask is supported for DNA model.')
transformed = [dna_transformation(prev_image, enc7)]
masks = slim.layers.conv2d_transpose(
enc6, num_masks + 1, 1, stride=1, scope='convt7', activation_fn=None)
masks = tf.reshape(
tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])),
[int(batch_size), int(img_height), int(img_width), num_masks + 1])
mask_list = tf.split(axis=3, num_or_size_splits=num_masks + 1, value=masks)
output = mask_list[0] * prev_image
for layer, mask in zip(transformed, mask_list[1:]):
output += layer * mask
gen_images.append(output)
current_state = slim.layers.fully_connected(
state_action,
int(current_state.get_shape()[1]),
scope='state_pred',
activation_fn=None)
gen_states.append(current_state)
return gen_images, gen_states, latent_loss
## Utility functions
def stp_transformation(prev_image, stp_input, num_masks):
"""Apply spatial transformer predictor (STP) to previous image.
Args:
prev_image: previous image to be transformed.
stp_input: hidden layer to be used for | |
t in relation:
basic_tag.append(relation[t][0])
ratio *= relation[t][1]
else:
basic_tag.append(t)
basic_tag = tuple(basic_tag)
# compute identical factor ratio compare to a fully diffent decay
#that we have assume for the basic tag
if len(set(tag)) != len(tag):
for t in set(tag):
ratio /= math.factorial(tag.count(t))
# Now build the output
if basic_tag not in tag2real:
tag2real[basic_tag] = (tag, ratio)
decay_mapping[tag] = set([(tag, 1)])
ratio2=1
else:
real_tag, ratio2 = tag2real[basic_tag]
if real_tag != tag:
decay_mapping[real_tag].add((tag, ratio/ratio2))
return decay_mapping
@misc.mute_logger()
@misc.set_global()
def generate_all_matrix_element(self):
"""generate the full series of matrix element needed by Madspin.
i.e. the undecayed and the decay one. And associate those to the
madspin production_topo object"""
# 1. compute the partial width
# 2. compute the production matrix element
# 3. create the all_topology object
# 4. compute the full matrix element (use the partial to throw away
# pointless decay.
# 5. add the decay information to the all_topology object (with branching
# ratio)
# 0. clean previous run ------------------------------------------------
path_me = self.path_me
try:
shutil.rmtree(pjoin(path_me,'full_me'))
except Exception:
pass
try:
shutil.rmtree(pjoin(path_me,'production_me'))
except Exception as error:
pass
path_me = self.path_me
# 1. compute the partial width------------------------------------------
if not self.options["onlyhelicity"]:
self.get_branching_ratio()
# 2. compute the production matrix element -----------------------------
processes = [line[9:].strip() for line in self.banner.proc_card
if line.startswith('generate')]
processes += [' '.join(line.split()[2:]) for line in self.banner.proc_card
if re.search('^\s*add\s+process', line)]
mgcmd = self.mgcmd
modelpath = self.model.get('modelpath+restriction')
commandline="import model %s" % modelpath
if not self.model.mg5_name:
commandline += ' --modelname'
mgcmd.exec_cmd(commandline)
# Handle the multiparticle of the banner
#for name, definition in self.mscmd.multiparticles:
if hasattr(self.mscmd, 'multiparticles_ms'):
for name, pdgs in self.mscmd.multiparticles_ms.items():
if name == 'all':
continue
#self.banner.get('proc_card').get('multiparticles'):
mgcmd.do_define("%s = %s" % (name, ' '.join(repr(i) for i in pdgs)))
mgcmd.exec_cmd("set group_subprocesses False")
logger.info('generating the production square matrix element')
start = time.time()
commandline=''
for proc in processes:
if '[' in proc:
commandline += reweight_interface.ReweightInterface.get_LO_definition_from_NLO(proc, mgcmd._curr_model)
else:
commandline += 'add process %s; ' % proc
commandline = commandline.replace('add process', 'generate',1)
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
commandline = 'output standalone_msP %s %s' % \
(pjoin(path_me,'production_me'), ' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
# 3. Create all_ME + topology objects ----------------------------------
matrix_elements = mgcmd._curr_matrix_elements.get_matrix_elements()
self.all_ME.adding_me(matrix_elements, pjoin(path_me,'production_me'))
# 3b. simplify list_branches -------------------------------------------
# remove decay which are not present in any production ME.
final_states = set()
for me in matrix_elements:
for leg in me.get('base_amplitude').get('process').get('legs'):
if not leg.get('state'):
continue
label = self.model.get_particle(leg.get('id')).get_name()
if self.all_ME.has_particles_ambiguity:
final_states.add(self.pid2label[-1*self.pid2label[label]])
final_states.add(label)
for key in list(self.list_branches.keys()):
if key not in final_states and key not in self.mgcmd._multiparticles:
if (len(self.list_branches)>1):
del self.list_branches[key]
elif not self.options["onlyhelicity"]:
raise Exception(" No decay define for process.")
logger.info('keeping dummy decay for passthrough mode')
# 4. compute the full matrix element -----------------------------------
if not self.options["onlyhelicity"]:
logger.info('generating the full matrix element squared (with decay)')
start = time.time()
to_decay = list(self.mscmd.list_branches.keys())
decay_text = []
for decays in self.mscmd.list_branches.values():
for decay in decays:
if '=' not in decay:
decay += ' QCD=99'
if ',' in decay:
decay_text.append('(%s)' % decay)
else:
decay_text.append(decay)
decay_text = ', '.join(decay_text)
commandline = ''
for proc in processes:
if not proc.strip().startswith(('add','generate')):
proc = 'add process %s' % proc
commandline += self.get_proc_with_decay(proc, decay_text, mgcmd._curr_model, self.options)
commandline = commandline.replace('add process', 'generate',1)
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
# remove decay with 0 branching ratio.
mgcmd.remove_pointless_decay(self.banner.param_card)
commandline = 'output standalone_msF %s %s' % (pjoin(path_me,'full_me'),
' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
elif self.options["onlyhelicity"]:
logger.info("Helicity Matrix-Element")
commandline = 'output standalone_msF %s %s' % \
(pjoin(path_me,'full_me'), ' '.join(list(self.list_branches.keys())))
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
# 5. add the decay information to the all_topology object --------------
for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
me_path = pjoin(path_me,'full_me', 'SubProcesses', \
"P%s" % matrix_element.get('processes')[0].shell_string())
self.all_ME.add_decay(matrix_element, me_path)
# 5.b import production matrix elements (+ related info) in the full process directory
list_prodfiles=['matrix_prod.f','configs_production.inc','props_production.inc','nexternal_prod.inc']
for tag in self.all_ME:
prod_path=self.all_ME[tag]['path']
nfinal=len(self.all_ME[tag]['base_order'][1])
for dico in self.all_ME[tag]['decays']:
full_path=dico['path']
#print prod_path
#print full_path
#print ' '
for item in list_prodfiles:
#print full_path
prodfile=pjoin(prod_path,item)
destination=pjoin(full_path,item)
shutil.copyfile(prodfile, destination)
# we need to write the file config_decays.inc
self.generate_configs_file(nfinal,dico,full_path)
if self.options["onlyhelicity"]:
return
# 6. generate decay only part ------------------------------------------
logger.info('generate matrix element for decay only (1 - > N).')
start = time.time()
commandline = ''
i=0
for processes in self.list_branches.values():
for proc in processes:
commandline+="add process %s @%i --no_warning=duplicate;" % (proc,i)
i+=1
commandline = commandline.replace('add process', 'generate',1)
mgcmd.exec_cmd(commandline, precmd=True)
# remove decay with 0 branching ratio.
mgcmd.remove_pointless_decay(self.banner.param_card)
#
commandline = 'output standalone_msF %s' % pjoin(path_me,'decay_me')
logger.info(commandline)
mgcmd.exec_cmd(commandline, precmd=True)
logger.info('Done %.4g' % (time.time()-start))
#
self.all_decay = {}
for matrix_element in mgcmd._curr_matrix_elements.get_matrix_elements():
me = matrix_element.get('processes')[0]
me_string = me.shell_string()
dirpath = pjoin(path_me,'decay_me', 'SubProcesses', "P%s" % me_string)
#
self.all_decay[me_string] = {'path': dirpath,
'dc_branch':dc_branch_from_me(me),
'nbody': len(me.get_final_ids_after_decay()),
'processes': matrix_element.get('processes'),
'tag': me.shell_string(pdg_order=True)}
#
# if __debug__:
# #check that all decay matrix element correspond to a decay only
# for prod in self.all_ME.values():
# for decay in prod['matrix_element']['base_amplitude']['process']['decay_chains']:
# assert decay.shell_string() in self.all_decay
@staticmethod
def get_proc_with_decay(proc, decay_text, model, msoptions=None):
commands = []
if '[' in proc:
new_command = reweight_interface.ReweightInterface.get_LO_definition_from_NLO(proc, model)
new_procs = new_command.split(';')
else:
new_procs = [proc]
for new_proc in new_procs:
new_proc= new_proc.strip()
if new_proc.endswith(';'):
new_proc = new_proc[:-1]
#catch line like "define" where no decay need to be added
if not new_proc.strip():
continue
if new_proc.startswith('p '):
new_proc = 'add process %s' % new_proc
logger.critical("wrongly formatted input for MadSpin. Please report this!")
elif not new_proc.startswith(('add', 'generate')):
commands.append(new_proc)
continue
# check options
tmp, options = [], set(["--no_warning=duplicate"])
for arg in new_proc.split():
if arg.startswith('--'):
options.add(arg)
else:
tmp.append(arg)
new_proc = ' '.join(tmp)
options = list(options)
options.sort()
options = ' '.join(options)
# deal with @ syntax need to move it after the decay specification
if '@' in new_proc:
baseproc, proc_nb = new_proc.split('@')
try:
proc_nb = int(proc_nb)
except ValueError:
raise MadSpinError('MadSpin didn\'t allow order restriction after the @ comment: \"%s\" not valid' % proc_nb)
proc_nb = '@%i' % proc_nb
else:
baseproc = new_proc
proc_nb = ''
if msoptions and msoptions['global_order_coupling']:
if '@' in proc_nb:
proc_nb += " %s" % msoptions['global_order_coupling']
else:
proc_nb += " @0 %s" % msoptions['global_order_coupling']
nb_comma = baseproc.count(',')
if nb_comma == 0:
commands.append("%s, %s %s %s" % (baseproc, decay_text, proc_nb, options))
elif nb_comma == 1:
before, after = baseproc.split(',')
commands.append("%s, %s, (%s, %s) %s %s" % (before, decay_text, after, decay_text, proc_nb, options))
else:
part = baseproc.split(',')
if any('(' in p for p in part):
raise Exception('too much decay at MG level. this can not be done for the moment)')
else:
decay_part = []
for p in part[1:]:
decay_part.append("(%s, %s)" % (p, decay_text))
commands.append("%s, %s, %s %s %s" % (part[0], decay_text, ', '.join(decay_part), proc_nb, options))
commands.append('') #to have a ; at the end of the command
return ';'.join(commands)
def get_branching_ratio(self):
"""compute the branching ratio of all the decaying particles"""
# Compute the width branching ratio. Doing this at this point allows
#to remove potential pointless decay in the diagram generation.
resonances = decay_misc.get_all_resonances(self.banner,
self.mgcmd, list(self.mscmd.list_branches.keys()))
logger.debug('List of resonances:%s' % resonances)
path_me = os.path.realpath(self.path_me)
width = width_estimate(resonances, path_me, self.banner, self.model,
self.pid2label)
width.extract_br(self.list_branches, self.mgcmd)
width.print_branching_fractions()
#self.channel_br = width.get_BR_for_each_decay(self.decay_processes,
# self.mgcmd._multiparticles)
self.width_estimator = width
self.banner.param_card = width.banner.param_card
return width
def compile(self):
logger.info('Compiling code')
self.compile_fortran(self.path_me, mode="full_me")
if not self.options["onlyhelicity"]:
self.compile_fortran(self.path_me, mode="production_me")
self.compile_fortran(self.path_me, mode="decay_me")
def compile_fortran(self, path_me, mode='production_me'):
""" Compile the fortran executables associated with the evalutation of the
matrix elements (production process)
Returns the path to the fortran executable
"""
base_dir = pjoin(path_me, mode,"SubProcesses")
list_prod=os.listdir(base_dir)
logger.debug("""Finalizing %s's """% mode)
# COMPILATION OF LIBRARY
misc.compile( cwd=pjoin(path_me, mode,"Source","DHELAS"), mode='fortran')
file_madspin=pjoin(MG5DIR, 'MadSpin', 'src', 'lha_read_ms.f')
shutil.copyfile(file_madspin, pjoin(path_me, mode,"Source","MODEL","lha_read.f" ))
if not self.options["use_old_dir"]:
misc.compile(arg=['clean'], cwd=pjoin(path_me, mode,"Source","MODEL"), mode='fortran')
misc.compile( cwd=pjoin(path_me, mode,"Source","MODEL"), mode='fortran')
file=pjoin(path_me, 'param_card.dat')
shutil.copyfile(file,pjoin(path_me,mode,"Cards","param_card.dat"))
# get all paths to matix elements
list_prod=[]
if mode == 'full_me':
for tag in self.all_ME:
for dico in self.all_ME[tag]['decays']:
full_path=dico['path']
if full_path not in list_prod: list_prod.append(full_path)
elif mode == 'production_me':
for tag in self.all_ME:
| |
#!/usr/bin/env python
"""
Electronic structure solver.
Type:
$ ./schroedinger.py
for usage and help.
"""
import os
import os.path as op
from optparse import OptionParser
from math import pi
from scipy.optimize import broyden3
try:
from scipy.optimize import bisect
except ImportError:
from scipy.optimize import bisection as bisect
from scipy.optimize.nonlin import excitingmixing
import sfepy
from sfepy.base.base import *
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.linalg import norm_l2_along_axis
from sfepy.base.log import Log
from sfepy.applications import SimpleApp
from sfepy.fem import MeshIO, ProblemDefinition, Materials
from sfepy.fem.evaluate import eval_equations
import sfepy.base.ioutils as io
from sfepy.solvers import Solver, eig
def guess_n_eigs( n_electron, n_eigs = None ):
"""
Guess the number of eigenvalues (energies) to compute so that the smearing
iteration converges. Passing n_eigs overrides the guess.
"""
if n_eigs is not None: return n_eigs
if n_electron > 2:
n_eigs = int(1.2 * ((0.5 * n_electron) + 5))
else:
n_eigs = n_electron
return n_eigs
def smear( energies, e_f, width, exponent ):
energies = nm.atleast_1d( energies )
e1, e2 = e_f - width, e_f + width
val = nm.zeros_like( energies )
ii = nm.where( energies <= e1 )[0]
val[ii] = 2.0
ii = nm.where( (energies > e1) & (energies <= e_f) )[0]
val[ii] = 2.0 - nm.power((energies[ii] - e1) / width, exponent)
ii = nm.where( (energies > e_f) & (energies < e2) )[0]
val[ii] = 0.0 + nm.power((e2 - energies[ii]) / width, exponent)
return val
def setup_smearing( eigs, n_electron, width = 0.1, exponent = 2.0 ):
def objective( e_f ):
r = nm.sum( smear( eigs, e_f, width, exponent ) ) - n_electron
# print e_f, r
return r
## import pylab
## x = nm.linspace(eigs[0], eigs[-1], 1000)
## pylab.plot( x, smear( x, -3, width, exponent ) )
## pylab.show()
## import pylab
## x = nm.linspace(eigs[0], eigs[-1], 1000)
## pylab.plot( x, [objective(y) for y in x] )
## pylab.show()
try:
e_f = bisect(objective, eigs[0], eigs[-1], xtol=1e-12)
except AssertionError:
e_f = None
## print eigs
## print e_f, e_f - width, e_f + width
## print objective(e_f)
## debug()
def smear_tuned( energies ):
return smear( energies, e_f, width, exponent )
## import pylab
## x = nm.linspace(eigs[0], eigs[-1], 1000)
## pylab.plot( x, smear_tuned( x ) )
## pylab.show()
return e_f, smear_tuned
def update_state_to_output( out, pb, vec, name, fill_value = None ):
"""Convert 'vec' to output for saving and insert it into 'out'. """
aux = pb.state_to_output( vec, fill_value )
key = aux.keys()[0]
out[name] = aux[key]
def wrap_function( function, args ):
ncalls = [0]
times = []
results = []
def function_wrapper( x ):
ncalls[0] += 1
tt = time.time()
results[:] = function( x, *args )
eigs, mtx_s_phi, vec_n, vec_v_h, v_ion_qp, v_xc_qp, v_hxc_qp = results
tt2 = time.time()
if tt2 < tt:
raise RuntimeError, '%f >= %f' % (tt, tt2)
times.append( tt2 - tt )
return v_hxc_qp.ravel() - x
return ncalls, times, function_wrapper, results
class SchroedingerApp( SimpleApp ):
def process_options( options ):
"""Application options setup. Sets default values for missing
non-compulsory options."""
get = options.get_default_attr
eigen_solver = get( 'eigen_solver', None,
'missing "eigensolver" in options!' )
n_electron = get( 'n_electron', 5 )
n_eigs = guess_n_eigs( n_electron, n_eigs = get( 'n_eigs', None ) )
# None -> save all.
save_eig_vectors = get( 'save_eig_vectors', None )
log_filename = get( 'log_filename', 'log.txt' )
iter_fig_name = get( 'iter_fig_name', 'iterations.pdf' )
# Called after DFT iteration, can do anything, no return value.
iter_hook = get( 'iter_hook', None )
return Struct( **locals() )
process_options = staticmethod( process_options )
def process_dft_options( options ):
"""Application DFT options setup. Sets default values for missing
non-compulsory options."""
get = options.get_default_attr
dft_solver = get( 'dft_solver', None,
'missing "dft" in options!' )
return Struct( **locals() )
process_dft_options = staticmethod( process_dft_options )
def __init__( self, conf, options, output_prefix, **kwargs ):
SimpleApp.__init__( self, conf, options, output_prefix,
init_equations = False )
output_dir = self.problem.output_dir
opts = self.app_options
opts.log_filename = op.join( output_dir, opts.log_filename )
opts.iter_fig_name = op.join( output_dir, opts.iter_fig_name )
self.mesh_results_name = op.join( opts.output_dir,
self.problem.get_output_name() )
self.eig_results_name = op.join( opts.output_dir,
self.problem.ofn_trunk + '_eigs.txt' )
def setup_options( self ):
SimpleApp.setup_options( self )
opts = SchroedingerApp.process_options( self.conf.options )
if self.options.dft:
opts += SchroedingerApp.process_dft_options( self.conf.options )
self.app_options += opts
funmod = self.conf.funmod
hook = self.app_options.iter_hook
if hook is not None:
hook = getattr( funmod, hook )
self.iter_hook = hook
def call( self ):
options = self.options
if options.dft:
if options.plot:
from sfepy.base.plotutils import plt
options.plot = plt is not None
evp = self.solve_eigen_problem_n()
else:
evp = self.solve_eigen_problem_1()
output( "solution saved to %s" % self.problem.get_output_name() )
output( "in %s" % self.app_options.output_dir )
if self.post_process_hook_final is not None: # User postprocessing.
self.post_process_hook_final( self.problem, evp = evp )
return evp
def _interp_to_nodes(self, v_qp):
variable = self.problem.create_variables(['scalar'])['scalar']
variable.data_from_qp(v_qp, 'i1')
return variable()
def iterate(self, v_hxc_qp, eig_solver,
mtx_a_equations, mtx_a_variables, mtx_b, log, file_output,
n_electron=None):
from sfepy.physics import dft
self.itercount += 1
pb = self.problem
opts = self.app_options
n_electron = get_default( n_electron, opts.n_electron )
sh = self.qp_shape
v_hxc_qp = nm.array(v_hxc_qp, dtype=nm.float64)
v_hxc_qp.shape = (sh[0] * sh[1],) + sh[2:]
mat_v = Materials(mtx_a_equations.collect_materials())['mat_v']
mat_v.set_extra_args(vhxc=v_hxc_qp)
mat_v.time_update(None, pb.domain, mtx_a_equations)
v_hxc_qp.shape = sh
v_ion_qp = mat_v.get_data(('Omega', 'i1'), 0, 'V_ion')
output( 'assembling lhs...' )
tt = time.clock()
mtx_a = eval_equations(mtx_a_equations, mtx_a_variables,
mode='weak', dw_mode='matrix')
output( '...done in %.2f s' % (time.clock() - tt) )
assert_( nm.alltrue( nm.isfinite( mtx_a.data ) ) )
output( 'computing the Ax=Blx Kohn-Sham problem...' )
tt = time.clock()
eigs, mtx_s_phi = eig_solver( mtx_a, mtx_b,
opts.n_eigs, eigenvectors = True )
output( '...done in %.2f s' % (time.clock() - tt) )
n_eigs_ok = len(eigs)
output( 'setting-up smearing...' )
e_f, smear_tuned = setup_smearing( eigs, n_electron )
output( 'Fermi energy:', e_f )
if e_f is None:
raise Exception("cannot find Fermi energy - exiting.")
weights = smear_tuned(eigs)
output( '...done' )
if (weights[-1] > 1e-12):
output("last smearing weight is nonzero (%s eigs ok)!" % n_eigs_ok)
output( "saving solutions, iter=%d..." % self.itercount )
out = {}
var_name = mtx_a_variables.get_names(kind='state')[0]
for ii in xrange( n_eigs_ok ):
vec_phi = mtx_a_variables.make_full_vec(mtx_s_phi[:,ii])
update_state_to_output( out, pb, vec_phi, var_name+'%03d' % ii )
name = op.join( opts.output_dir, "iter%d" % self.itercount )
pb.save_state('.'.join((name, opts.output_format)), out=out)
output( "...solutions saved" )
output('computing total charge...')
tt = time.clock()
aux = pb.create_evaluable('dq_state_in_volume_qp.i1.Omega(Psi)')
psi_equations, psi_variables = aux
var = psi_variables['Psi']
n_qp = nm.zeros_like(v_hxc_qp)
for ii in xrange( n_eigs_ok ):
vec_phi = mtx_a_variables.make_full_vec(mtx_s_phi[:,ii])
var.data_from_any(vec_phi)
phi_qp = eval_equations(psi_equations, psi_variables)
n_qp += weights[ii] * (phi_qp ** 2)
output('...done in %.2f s' % (time.clock() - tt))
ap, vg = var.get_approximation(('i1', 'Omega', 0), 'Volume')
det = vg.variable(1)
charge = (det * n_qp).sum()
## Same as above.
## out = nm.zeros((n_qp.shape[0], 1, 1, 1), dtype=nm.float64)
## vg.integrate(out, n_qp)
## charge = out.sum()
vec_n = self._interp_to_nodes(n_qp)
var.data_from_any(vec_n)
charge_n = pb.evaluate('di_volume_integrate.i1.Omega(Psi)', Psi=var)
##
# V_xc in quadrature points.
v_xc_qp = nm.zeros((nm.prod(self.qp_shape),), dtype=nm.float64)
for ii, val in enumerate(n_qp.flat):
## print ii, val
v_xc_qp[ii] = dft.getvxc(val, 0)
assert_(nm.isfinite(v_xc_qp).all())
v_xc_qp.shape = self.qp_shape
mat_key = mat_v.datas.keys()[0]
pb.set_equations( pb.conf.equations_vh )
pb.select_bcs( ebc_names = ['VHSurface'] )
pb.update_materials()
output( "solving Ax=b Poisson equation" )
pb.materials['mat_n'].reset()
pb.materials['mat_n'].set_all_data({mat_key : {0: {'N' : n_qp}}})
vec_v_h = pb.solve()
var.data_from_any(vec_v_h)
v_h_qp = pb.evaluate('dq_state_in_volume_qp.i1.Omega(Psi)', Psi=var)
v_hxc_qp = v_h_qp + v_xc_qp
norm = nla.norm(v_hxc_qp.ravel())
dnorm = abs(norm - self.norm_v_hxc0)
log(norm, max(dnorm,1e-20)) # logplot of pure 0 fails.
file_output( '%d: F(x) = |VH + VXC|: %f, abs(F(x) - F(x_prev)): %e'\
% (self.itercount, norm, dnorm) )
file_output("-"*70)
file_output('Fermi energy:', e_f)
file_output("----------------------------------------")
file_output(" # | eigs | smearing")
file_output("----|-----------------|-----------------")
for ii in xrange( n_eigs_ok ):
file_output("% 3d | %-15s | %-15s" % (ii+1, eigs[ii], weights[ii]))
file_output("----------------------------------------")
file_output("charge_qp: ", charge)
file_output("charge_n: ", charge_n)
file_output("----------------------------------------")
file_output("|N|: ", nla.norm(n_qp.ravel()))
file_output("|V_H|: ", nla.norm(v_h_qp.ravel()))
file_output("|V_XC|: ", nla.norm(v_xc_qp.ravel()))
file_output("|V_HXC|: ", norm)
if self.iter_hook is not None: # User postprocessing.
pb.select_bcs(ebc_names=['ZeroSurface'])
mtx_phi = self.make_full(mtx_s_phi)
data = Struct(iteration = self.itercount,
eigs = eigs, weights = weights,
mtx_s_phi = mtx_s_phi, mtx_phi = mtx_phi,
vec_n = vec_n, vec_v_h = vec_v_h,
n_qp = n_qp, v_ion_qp = v_ion_qp, v_h_qp = v_h_qp,
v_xc_qp = v_xc_qp, file_output = file_output)
self.iter_hook(self.problem, data = data)
file_output("-"*70)
self.norm_v_hxc0 = norm
return eigs, mtx_s_phi, vec_n, vec_v_h, v_ion_qp, v_xc_qp, v_hxc_qp
def solve_eigen_problem_n( self ):
opts = self.app_options
pb = self.problem
dim = pb.domain.mesh.dim
pb.set_equations( pb.conf.equations )
pb.select_bcs( ebc_names = ['ZeroSurface'] )
output( 'assembling rhs...' )
tt = time.clock()
mtx_b = pb.evaluate(pb.conf.equations['rhs'], mode='weak',
auto_init=True, dw_mode='matrix')
output( '...done in %.2f s' % (time.clock() - tt) )
assert_( nm.alltrue( nm.isfinite( mtx_b.data ) ) | |
<gh_stars>0
# -*- coding: utf-8 -*-
"""Creates oemof energy system components.
Functions for the creation of oemof energy system objects from a
given set of object parameters.
---
Contributors:
- <NAME> - <EMAIL>
- <NAME> - <EMAIL>
"""
from oemof import solph
import logging
import os
import pandas as pd
from feedinlib import *
import demandlib.bdew as bdew
import datetime
import numpy
def buses(nodes_data, nodes):
"""
Creates bus objects.
Creates bus objects with the parameters given in 'nodes_data' and
adds them to the list of components 'nodes'.
----
Keyword arguments:
nodes_data : obj:'dict'
-- dictionary containing parameters of the buses to be
created. The following parameters have to be provided:
- label,
- active,
- excess,
- shortage,
- shortage costs /(CU/kWh),
- excess costs /(CU/kWh)
nodes : obj:'list'
-- list of components created before (can be empty)
----
Returns:
busd : obj:'dict'
-- dictionary containing all buses created
----
@ <NAME> - <EMAIL>, 13.02.2020
"""
# creates a list of buses
busd = {}
# Creates components, which are defined within the "buses"-sheet of
# the original excel-file
for i, b in nodes_data['buses'].iterrows():
# Create a bus object for every bus, which is marked as "active"
if b['active']:
# creates an oemof-bus object
bus = solph.Bus(label=b['label'])
# adds the bus object to the list of components "nodes"
nodes.append(bus)
busd[b['label']] = bus
# returns logging info
logging.info(' ' + 'Bus created: ' + b['label'])
# Create an sink for every bus, which is marked with
# "excess"
if b['excess']:
# creates the oemof-sink object and
# directly adds it to the list of components "nodes"
inputs = {
busd[b['label']]:
solph.Flow(variable_costs=b['excess costs /(CU/kWh)'],
emission_factor=b[
'variable excess constraint costs /(CU/kWh)'])}
nodes.append(
solph.Sink(
label=b['label'] + '_excess',
inputs=inputs))
# Create a source for every bus, which is marked with
# "shortage"
if b['shortage']:
# creates the oemof-source object and
# directly adds it to the list of components "nodes"
outputs = {
busd[b['label']]:
solph.Flow(
variable_costs=b['shortage costs /(CU/kWh)'],
emission_factor=b[
'variable shortage constraint costs /(CU/kWh)'])}
nodes.append(
solph.Source(
label=b['label'] + '_shortage',
outputs=outputs))
# Returns the list of buses as result of the function
return busd
class Sources:
"""Creates source objects.
There are four options for labeling source objects to be created:
- 'commodity' : a source with flexible time series
- 'timeseries' : a source with predefined time series
- 'photovoltaic' : a photovoltaic component
- 'wind power' : a wind power component
"""
def create_source(self, so, timeseries_args):
"""Creates an oemof source with fixed or unfixed timeseries
----
Keyword arguments:
so : obj:'dict'
-- dictionary containing all information for the
creation of an oemof source. At least the following
key-value-pairs have to be included:
'label'
'output'
'periodical costs /(CU/(kW a))'
'min. investment capacity /(kW)'
'max. investment capacity /(kW)'
'existing capacity /(kW)'
'Non-Convex Investment'
'Fix Investment Costs /(CU/a)'
'variable costs /(CU/kWh)'
timeseries_args: dict
-- dictionary rather containing the 'fix-attribute' or
the 'min-' and 'max-attribute' of a source
---
Contributors:
- <NAME> - <EMAIL>
"""
# Creates a oemof source and appends it to the nodes_sources
# (variable of the create_sources-class) list
self.nodes_sources.append(
solph.Source(
label=so['label'],
outputs={self.busd[so['output']]: solph.Flow(
investment=solph.Investment(
ep_costs=so[
'periodical costs /(CU/(kW a))'],
periodical_constraint_costs=so[
'periodical constraint costs /(CU/(kW a))'],
minimum=so[
'min. investment capacity /(kW)'],
maximum=so[
'max. investment capacity /(kW)'],
existing=so['existing capacity /(kW)'],
nonconvex=True if
so['Non-Convex Investment'] == 1
else False,
offset=so[
'Fix Investment Costs /(CU/a)']),
**timeseries_args,
variable_costs=so['variable costs /(CU/kWh)'],
emission_factor=so[
'variable constraint costs /(CU/kWh)']
)}
))
def commodity_source(self, so):
""" Creates an oemof source object with flexible time series
(no maximum or minimum) with the use of the
create_source method.
----
Keyword arguments:
so : obj:'dict'
-- dictionary containing all information for the
creation of an oemof source. At least the
following key-value-pairs have to be included:
'label'
"""
# starts the create_source method with the parameters
# min = 0 and max = 1
self.create_source(so, {'min': 0, 'max': 1})
# Returns logging info
logging.info(' ' + 'Commodity Source created: ' + so['label'])
def timeseries_source(self, so, filepath):
"""Creates an oemof source object from a pre-defined
timeseries with the use of the create_source
method.
---
Keyword arguments:
so : obj:'dict'
-- dictionary containing all information for the
creation of an oemof source. At least the following
key-value-pairs have to be included:
'label'
'output'
'periodical costs /(CU/(kW a))'
'min. investment capacity /(kW)'
'max. investment capacity /(kW)'
'existing capacity /(kW)'
'Non-Convex Investment'
'Fix Investment Costs /(CU/a)'
'variable costs /(CU/kWh)'
filepath: String
-- path to .xlsx scenario-file containing a
"time_series" sheet
"""
# reads the timeseries sheet of the scenario file
time_series = pd.read_excel(filepath, sheet_name='time_series')
if so['fixed'] == 1:
# sets the timeseries attribute for a fixed source
args = {'fix': time_series[so['label'] + '.fix'].tolist()}
elif so['fixed'] == 0:
# sets the timeseries attributes for an unfixed source
args = {'min': time_series[so['label'] + '.min'].tolist(),
'max': time_series[so['label'] + '.max'].tolist()}
else:
raise SystemError(so['label'] + " Error in fixed attribute")
# starts the create_source method with the parameters set before
self.create_source(so, args)
# Returns logging info
logging.info(' ' + 'Timeseries Source created: ' + so['label'])
def pv_source(self, so, my_weather_pandas_dataframe):
"""Creates an oemof photovoltaic source object.
Simulates the yield of a photovoltaic system using feedinlib and
creates a source object with the yield as time series and the
use of the create_source method.
---
Keyword arguments:
so : obj:'dict'
-- dictionary containing all information for the
creation of an oemof source. At least the following
key-value-pairs have to be included:
- 'label'
- 'fixed'
- 'Azimuth (PV ONLY)'
- 'Surface Tilt (PV ONLY)'
- 'Modul Model (PV ONLY)'
- 'Inverter Model (PV ONLY)'
- 'Albedo (PV ONLY)'
- 'Latitude (PV ONLY)'
- 'Longitude (PV ONLY)'
"""
# reads pv system parameters from parameter dictionary
# nodes_data
parameter_set = {
'azimuth': so['Azimuth (PV ONLY)'],
'tilt': so['Surface Tilt (PV ONLY)'],
'module_name': so['Modul Model (PV ONLY)'],
'inverter_name': so['Inverter Model (PV ONLY)'],
'albedo': so['Albedo (PV ONLY)']}
# sets pv system parameters for pv_module
pv_module = powerplants.Photovoltaic(**parameter_set)
# calculates global horizontal irradiance from diffuse (dhi)
# and direct irradiance and adds it to the weather data frame
my_weather_pandas_dataframe['ghi'] = \
(my_weather_pandas_dataframe.dirhi
+ my_weather_pandas_dataframe.dhi)
# changes names of data columns,
# so it fits the needs of the feedinlib
name_dc = {'temperature': 'temp_air', 'windspeed': 'v_wind'}
my_weather_pandas_dataframe.rename(columns=name_dc)
# calculates time series normed on 1 kW pv peak performance
feedin = pv_module.feedin(
weather=my_weather_pandas_dataframe,
location=(so['Latitude (PV ONLY)'], so['Longitude (PV ONLY)']),
scaling='peak_power')
# Prepare data set for compatibility with oemof
for i in range(len(feedin)):
# Set negative values to zero
# (requirement for solving the model)
if feedin[i] < 0:
feedin[i] = 0
# Set values greater 1 to 1
# (requirement for solving the model)
if feedin[i] > 1:
feedin[i] = 1
# Replace 'nan' value with 0
feedin = feedin.fillna(0)
if so['fixed'] == 1:
# sets the attribute for a fixed pv_source
args = {'fix': feedin}
elif so['fixed'] == 0:
# sets the attributes for an unfixed pv_source
args = {'min': 0, 'max': feedin}
else:
raise SystemError(so['label'] + " Error in fixed attribute")
# starts the create_source method with the parameters set before
self.create_source(so, args)
# returns logging info
logging.info(' ' + 'Source created: ' + so['label'])
def windpower_source(self, so, weather_df_wind):
"""Creates an oemof windpower source object.
Simulates the yield of a windturbine using feedinlib and
creates a source object with the yield as time series and the
use of the create_source method.
---
Keyword arguments:
so : obj:'dict'
-- dictionary containing all information for the
creation of an oemof source. At least the following
key-value-pairs have to be included:
- 'label'
- 'fixed'
- 'Turbine Model (Windpower ONLY)'
- 'Hub Height (Windpower ONLY)'
"""
# set up wind turbine using the wind turbine library.
# The turbine name must | |
"""
Get the is_deleted status and info for the container.
:returns: a tuple, in the form (info, is_deleted) info is a dict as
returned by get_info and is_deleted is a boolean.
"""
if self.db_file != ':memory:' and not os.path.exists(self.db_file):
return {}, True
info = self.get_info()
return info, self._is_deleted_info(**info)
def get_info(self):
"""
Get global data for the container.
:returns: dict with keys: account, container, created_at,
put_timestamp, delete_timestamp, status_changed_at,
object_count, bytes_used, reported_put_timestamp,
reported_delete_timestamp, reported_object_count,
reported_bytes_used, hash, id, x_container_sync_point1,
x_container_sync_point2, and storage_policy_index.
"""
self._commit_puts_stale_ok()
with self.get() as conn:
data = None
trailing_sync = 'x_container_sync_point1, x_container_sync_point2'
trailing_pol = 'storage_policy_index'
errors = set()
while not data:
try:
data = conn.execute(('''
SELECT account, container, created_at, put_timestamp,
delete_timestamp, status_changed_at,
object_count, bytes_used,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, reported_bytes_used, hash,
id, %s, %s
FROM container_stat
''') % (trailing_sync, trailing_pol)).fetchone()
except sqlite3.OperationalError as err:
err_msg = str(err)
if err_msg in errors:
# only attempt migration once
raise
errors.add(err_msg)
if 'no such column: storage_policy_index' in err_msg:
trailing_pol = '0 AS storage_policy_index'
elif 'no such column: x_container_sync_point' in err_msg:
trailing_sync = '-1 AS x_container_sync_point1, ' \
'-1 AS x_container_sync_point2'
else:
raise
data = dict(data)
# populate instance cache
self._storage_policy_index = data['storage_policy_index']
self.account = data['account']
self.container = data['container']
return data
def set_x_container_sync_points(self, sync_point1, sync_point2):
with self.get() as conn:
try:
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
except sqlite3.OperationalError as err:
if 'no such column: x_container_sync_point' not in \
str(err):
raise
self._migrate_add_container_sync_points(conn)
self._set_x_container_sync_points(conn, sync_point1,
sync_point2)
conn.commit()
def _set_x_container_sync_points(self, conn, sync_point1, sync_point2):
if sync_point1 is not None and sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?,
x_container_sync_point2 = ?
''', (sync_point1, sync_point2))
elif sync_point1 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point1 = ?
''', (sync_point1,))
elif sync_point2 is not None:
conn.execute('''
UPDATE container_stat
SET x_container_sync_point2 = ?
''', (sync_point2,))
def get_policy_stats(self):
with self.get() as conn:
try:
info = conn.execute('''
SELECT storage_policy_index, object_count, bytes_used
FROM policy_stat
''').fetchall()
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
info = conn.execute('''
SELECT 0 as storage_policy_index, object_count, bytes_used
FROM container_stat
''').fetchall()
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def has_multiple_policies(self):
with self.get() as conn:
try:
curs = conn.execute('''
SELECT count(storage_policy_index)
FROM policy_stat
''').fetchone()
except sqlite3.OperationalError as err:
if 'no such table: policy_stat' not in str(err):
raise
# no policy_stat row
return False
if curs and curs[0] > 1:
return True
# only one policy_stat row
return False
def set_storage_policy_index(self, policy_index, timestamp=None):
"""
Update the container_stat policy_index and status_changed_at.
"""
if timestamp is None:
timestamp = Timestamp(time.time()).internal
def _setit(conn):
conn.execute('''
INSERT OR IGNORE INTO policy_stat (storage_policy_index)
VALUES (?)
''', (policy_index,))
conn.execute('''
UPDATE container_stat
SET storage_policy_index = ?,
status_changed_at = MAX(?, status_changed_at)
WHERE storage_policy_index <> ?
''', (policy_index, timestamp, policy_index))
conn.commit()
with self.get() as conn:
try:
_setit(conn)
except sqlite3.OperationalError as err:
if not any(msg in str(err) for msg in (
"no such column: storage_policy_index",
"no such table: policy_stat")):
raise
self._migrate_add_storage_policy(conn)
_setit(conn)
self._storage_policy_index = policy_index
def reported(self, put_timestamp, delete_timestamp, object_count,
bytes_used):
"""
Update reported stats, available with container's `get_info`.
:param put_timestamp: put_timestamp to update
:param delete_timestamp: delete_timestamp to update
:param object_count: object_count to update
:param bytes_used: bytes_used to update
"""
with self.get() as conn:
conn.execute('''
UPDATE container_stat
SET reported_put_timestamp = ?, reported_delete_timestamp = ?,
reported_object_count = ?, reported_bytes_used = ?
''', (put_timestamp, delete_timestamp, object_count, bytes_used))
conn.commit()
def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
path=None, storage_policy_index=0, reverse=False):
"""
Get a list of objects sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not
have the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:param path: if defined, will set the prefix and delimiter based on
the path
:param reverse: reverse the result order.
:returns: list of tuples of (name, created_at, size, content_type,
etag)
"""
delim_force_gte = False
(marker, end_marker, prefix, delimiter, path) = utf8encode(
marker, end_marker, prefix, delimiter, path)
self._commit_puts_stale_ok()
if reverse:
# Reverse the markers if we are reversing the listing.
marker, end_marker = end_marker, marker
if path is not None:
prefix = path
if path:
prefix = path = path.rstrip('/') + '/'
delimiter = '/'
elif delimiter and not prefix:
prefix = ''
if prefix:
end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1)
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = '''SELECT name, created_at, size, content_type, etag
FROM object WHERE'''
query_args = []
if end_marker and (not prefix or end_marker < end_prefix):
query += ' name < ? AND'
query_args.append(end_marker)
elif prefix:
query += ' name < ? AND'
query_args.append(end_prefix)
if delim_force_gte:
query += ' name >= ? AND'
query_args.append(marker)
# Always set back to False
delim_force_gte = False
elif marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
orig_tail_query = '''
ORDER BY name %s LIMIT ?
''' % ('DESC' if reverse else '')
orig_tail_args = [limit - len(results)]
# storage policy filter
policy_tail_query = '''
AND storage_policy_index = ?
''' + orig_tail_query
policy_tail_args = [storage_policy_index] + orig_tail_args
tail_query, tail_args = \
policy_tail_query, policy_tail_args
try:
curs = conn.execute(query + tail_query,
tuple(query_args + tail_args))
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
tail_query, tail_args = \
orig_tail_query, orig_tail_args
curs = conn.execute(query + tail_query,
tuple(query_args + tail_args))
curs.row_factory = None
# Delimiters without a prefix is ignored, further if there
# is no delimiter then we can simply return the result as
# prefixes are now handled in the SQL statement.
if prefix is None or not delimiter:
return [r for r in curs]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
name = row[0]
if reverse:
end_marker = name
else:
marker = name
if len(results) >= limit:
curs.close()
return results
end = name.find(delimiter, len(prefix))
if path is not None:
if name == path:
continue
if end >= 0 and len(name) > end + len(delimiter):
if reverse:
end_marker = name[:end + 1]
else:
marker = name[:end] + chr(ord(delimiter) + 1)
curs.close()
break
elif end > 0:
if reverse:
end_marker = name[:end + 1]
else:
marker = name[:end] + chr(ord(delimiter) + 1)
# we want result to be inclusive of delim+1
delim_force_gte = True
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, '0', 0, None, ''])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the object table.
:param item_list: list of dictionaries of {'name', 'created_at',
'size', 'content_type', 'etag', 'deleted',
'storage_policy_index'}
:param source: if defined, update incoming_sync with the source
"""
for item in item_list:
if isinstance(item['name'], six.text_type):
item['name'] = item['name'].encode('utf-8')
def _really_merge_items(conn):
curs = conn.cursor()
if self.get_db_version(conn) >= 1:
query_mod = ' deleted IN (0, 1) AND '
else:
query_mod = ''
curs.execute('BEGIN IMMEDIATE')
# Get created_at times for objects in item_list that already exist.
# We must chunk it up to avoid sqlite's limit of 999 args.
created_at = {}
for offset in range(0, len(item_list), SQLITE_ARG_LIMIT):
chunk = [rec['name'] for rec in
item_list[offset:offset + SQLITE_ARG_LIMIT]]
created_at.update(
((rec[0], rec[1]), rec[2]) for rec in curs.execute(
'SELECT name, storage_policy_index, created_at '
'FROM object WHERE ' + query_mod + ' name IN (%s)' %
','.join('?' * len(chunk)), chunk))
# Sort item_list into things that need adding and deleting, based
# on results of created_at query.
to_delete = {}
to_add = {}
for item in item_list:
item.setdefault('storage_policy_index', 0) # legacy
item_ident = (item['name'], item['storage_policy_index'])
if created_at.get(item_ident) < item['created_at']:
if item_ident in created_at: # | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest2 as unittest
import json
import datetime as dt
import uuid
import warnings
from nose.tools import * # PEP8 asserts
import pytz
from marshmallow import Serializer, fields, validate, pprint, utils
from marshmallow.exceptions import MarshallingError
from marshmallow.compat import LINUX, unicode, PY26, binary_type, total_seconds
if PY26:
def assert_in(obj, cont):
assert obj in cont, "{0} not in {1}".format(obj, cont)
def assert_not_in(obj, cont):
assert obj not in cont, "{0} found in {1}".format(obj, cont)
central = pytz.timezone("US/Central")
##### Models #####
class User(object):
SPECIES = "Homo sapiens"
def __init__(self, name, age=0, id_=None, homepage=None,
email=None, registered=True, time_registered=None,
birthdate=None, balance=100, sex='male', employer=None):
self.name = name
self.age = age
# A naive datetime
self.created = dt.datetime(2013, 11, 10, 14, 20, 58)
# A TZ-aware datetime
self.updated = central.localize(dt.datetime(2013, 11, 10, 14, 20, 58), is_dst=False)
self.id = id_
self.homepage = homepage
self.email = email
self.balance = balance
self.registered = True
self.hair_colors = ['black', 'brown', 'blond', 'redhead']
self.sex_choices = ('male', 'female')
self.finger_count = 10
self.uid = uuid.uuid1()
self.time_registered = time_registered or dt.time(1, 23, 45, 6789)
self.birthdate = birthdate or dt.date(2013, 1, 23)
self.sex = sex
self.employer = employer
self.relatives = []
@property
def since_created(self):
return dt.datetime(2013, 11, 24) - self.created
def __repr__(self):
return "<User {0}>".format(self.name)
class Blog(object):
def __init__(self, title, user, collaborators=None, categories=None, id_=None):
self.title = title
self.user = user
self.collaborators = collaborators # List/tuple of users
self.categories = categories
self.id = id_
###### Serializers #####
class Uppercased(fields.Raw):
'''Custom field formatting example.'''
def format(self, value):
return value.upper()
class UserSerializer(Serializer):
name = fields.String()
age = fields.Float()
created = fields.DateTime()
created_formatted = fields.DateTime(format="%Y-%m-%d", attribute="created")
created_iso = fields.DateTime(format="iso", attribute="created")
updated = fields.DateTime()
updated_local = fields.LocalDateTime(attribute="updated")
species = fields.String(attribute="SPECIES")
id = fields.String(default="no-id")
uppername = Uppercased(attribute='name')
homepage = fields.Url()
email = fields.Email()
balance = fields.Price()
is_old = fields.Method("get_is_old")
lowername = fields.Function(lambda obj: obj.name.lower())
registered = fields.Boolean()
hair_colors = fields.List(fields.Raw)
sex_choices = fields.List(fields.Raw)
finger_count = fields.Integer()
uid = fields.UUID()
time_registered = fields.Time()
birthdate = fields.Date()
since_created = fields.TimeDelta()
sex = fields.Select(['male', 'female'])
def get_is_old(self, obj):
try:
return obj.age > 80
except TypeError as te:
raise MarshallingError(te)
class UserMetaSerializer(Serializer):
'''The equivalent of the UserSerializer, using the ``fields`` option.'''
uppername = Uppercased(attribute='name')
balance = fields.Price()
is_old = fields.Method("get_is_old")
lowername = fields.Function(lambda obj: obj.name.lower())
updated_local = fields.LocalDateTime(attribute="updated")
species = fields.String(attribute="SPECIES")
homepage = fields.Url()
email = fields.Email()
def get_is_old(self, obj):
try:
return obj.age > 80
except TypeError as te:
raise MarshallingError(te)
class Meta:
fields = ('name', 'age', 'created', 'updated', 'id', 'homepage',
'uppername', 'email', 'balance', 'is_old', 'lowername',
"updated_local", "species", 'registered', 'hair_colors',
'sex_choices', "finger_count", 'uid', 'time_registered',
'birthdate', 'since_created')
class UserExcludeSerializer(UserSerializer):
class Meta:
exclude = ("created", "updated", "field_not_found_but_thats_ok")
class UserAdditionalSerializer(Serializer):
lowername = fields.Function(lambda obj: obj.name.lower())
class Meta:
additional = ("name", "age", "created", "email")
class UserIntSerializer(UserSerializer):
age = fields.Integer()
class UserFixedSerializer(UserSerializer):
age = fields.Fixed(decimals=2)
class UserFloatStringSerializer(UserSerializer):
age = fields.Float(as_string=True)
class UserDecimalSerializer(UserSerializer):
age = fields.Arbitrary()
class ExtendedUserSerializer(UserSerializer):
is_old = fields.Boolean()
class UserRelativeUrlSerializer(UserSerializer):
homepage = fields.Url(relative=True)
class BlogSerializer(Serializer):
title = fields.String()
user = fields.Nested(UserSerializer)
collaborators = fields.Nested(UserSerializer, many=True)
categories = fields.List(fields.String)
id = fields.String()
class BlogUserMetaSerializer(Serializer):
user = fields.Nested(UserMetaSerializer())
collaborators = fields.Nested(UserMetaSerializer, many=True)
class BlogSerializerMeta(Serializer):
'''Same as BlogSerializer but using ``fields`` options.'''
user = fields.Nested(UserSerializer)
collaborators = fields.Nested(UserSerializer, many=True)
class Meta:
fields = ('title', 'user', 'collaborators', 'categories', "id")
class BlogSerializerOnly(Serializer):
title = fields.String()
user = fields.Nested(UserSerializer)
collaborators = fields.Nested(UserSerializer, only=("id", ), many=True)
class BlogSerializerExclude(BlogSerializer):
user = fields.Nested(UserSerializer, exclude=("uppername", "species"))
class BlogSerializerOnlyExclude(BlogSerializer):
user = fields.Nested(UserSerializer, only=("name", ), exclude=("name", "species"))
class BlogSerializerPrefixedUser(BlogSerializer):
user = fields.Nested(UserSerializer(prefix="usr_"))
collaborators = fields.Nested(UserSerializer(prefix="usr_"), many=True)
##### The Tests #####
class TestSerializer(unittest.TestCase):
def setUp(self):
self.obj = User(name="Monty", age=42.3, homepage="http://monty.python.org/")
self.serialized = UserSerializer(self.obj)
def test_serializing_basic_object(self):
assert_equal(self.serialized.data['name'], "Monty")
assert_almost_equal(self.serialized.data['age'], 42.3)
assert_true(self.serialized.data['registered'])
def test_serializing_none(self):
s = UserSerializer(None)
assert_equal(s.data['name'], '')
assert_equal(s.data['age'], 0)
def test_fields_are_copies(self):
s = UserSerializer(User("Monty", age=42))
s2 = UserSerializer(User("Monty", age=43))
assert_true(s.fields is not s2.fields)
def test_json(self):
json_data = self.serialized.json
expected = binary_type(json.dumps(self.serialized.data).encode("utf-8"))
assert_equal(json_data, expected)
def test_to_json_returns_bytestring(self):
assert_true(isinstance(self.serialized.to_json(), binary_type))
def test_naive_datetime_field(self):
assert_equal(self.serialized.data['created'],
'Sun, 10 Nov 2013 14:20:58 -0000')
def test_datetime_formatted_field(self):
assert_equal(self.serialized.data['created_formatted'],
self.obj.created.strftime("%Y-%m-%d"))
def test_datetime_iso_field(self):
assert_equal(self.serialized.data['created_iso'],
utils.isoformat(self.obj.created))
def test_tz_datetime_field(self):
# Datetime is corrected back to GMT
assert_equal(self.serialized.data['updated'],
'Sun, 10 Nov 2013 20:20:58 -0000')
def test_local_datetime_field(self):
assert_equal(self.serialized.data['updated_local'],
'Sun, 10 Nov 2013 14:20:58 -0600')
def test_class_variable(self):
assert_equal(self.serialized.data['species'], 'Homo sapiens')
def test_serialize_many(self):
user1 = User(name="Mick", age=123)
user2 = User(name="Keith", age=456)
users = [user1, user2]
serialized = UserSerializer(users, many=True)
assert_equal(len(serialized.data), 2)
assert_equal(serialized.data[0]['name'], "Mick")
assert_equal(serialized.data[1]['name'], "Keith")
def test_no_implicit_list_handling(self):
users = [User(name='Mick'), User(name='Keith')]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_raises(TypeError, lambda: UserSerializer(users))
assert_true(issubclass(w[-1].category, DeprecationWarning))
def test_serialize_many_with_meta(self):
user1 = User(name="Mick", age=123)
user2 = User(name="Keith", age=456)
users = [user1, user2]
s = UserMetaSerializer(users, many=True)
assert_equal(len(s.data), 2)
assert_equal(s.data[0]['name'], "Mick")
assert_equal(s.data[0]['created'], utils.rfcformat(user1.created))
assert_equal(s.data[1]['name'], "Keith")
def test_inheriting_serializer(self):
serialized = ExtendedUserSerializer(self.obj)
assert_equal(serialized.data['name'], self.obj.name)
assert_false(serialized.data['is_old'])
def test_custom_field(self):
assert_equal(self.serialized.data['uppername'], "MONTY")
def test_url_field(self):
assert_equal(self.serialized.data['homepage'], "http://monty.python.org/")
def test_url_field_validation(self):
invalid = User("John", age=42, homepage="/john")
s = UserSerializer(invalid)
assert_false(s.is_valid(["homepage"]))
def test_relative_url_field(self):
u = User("John", age=42, homepage="/john")
serialized = UserRelativeUrlSerializer(u)
assert_true(serialized.is_valid())
def test_stores_invalid_url_error(self):
user = User(name="<NAME>", homepage="www.foo.com")
serialized = UserSerializer(user)
assert_in("homepage", serialized.errors)
assert_equal(serialized.errors['homepage'],
'"www.foo.com" is not a valid URL. Did you mean: "http://www.foo.com"?')
def test_default(self):
user = User("John") # No ID set
serialized = UserSerializer(user)
assert_equal(serialized.data['id'], "no-id")
def test_email_field(self):
u = User("John", email="<EMAIL>")
s = UserSerializer(u)
assert_equal(s.data['email'], "<EMAIL>")
def test_stored_invalid_email(self):
u = User("John", email="<EMAIL>")
s = UserSerializer(u)
assert_in("email", s.errors)
assert_equal(s.errors['email'], '"johnexample.com" is not a valid email address.')
def test_integer_field(self):
u = User("John", age=42.3)
serialized = UserIntSerializer(u)
assert_equal(type(serialized.data['age']), int)
assert_equal(serialized.data['age'], 42)
def test_fixed_field(self):
u = User("John", age=42.3)
serialized = UserFixedSerializer(u)
assert_equal(serialized.data['age'], "42.30")
def test_as_string(self):
u = User("John", age=42.3)
serialized = UserFloatStringSerializer(u)
assert_equal(type(serialized.data['age']), str)
assert_almost_equal(float(serialized.data['age']), 42.3)
def test_decimal_field(self):
u = User("John", age=42.3)
s = UserDecimalSerializer(u)
assert_equal(type(s.data['age']), unicode)
assert_almost_equal(float(s.data['age']), 42.3)
def test_price_field(self):
assert_equal(self.serialized.data['balance'], "100.00")
def test_validate(self):
valid = User("Joe", email="<EMAIL>")
invalid = User("John", email="<EMAIL>")
assert_true(UserSerializer(valid).is_valid())
assert_false(UserSerializer(invalid).is_valid())
def test_validate_field(self):
invalid = User("John", email="<EMAIL>")
assert_true(UserSerializer(invalid).is_valid(["name"]))
assert_false(UserSerializer(invalid).is_valid(["email"]))
def test_validating_nonexistent_field_raises_error(self):
assert_raises(KeyError, lambda: self.serialized.is_valid(["foobar"]))
def test_fields_param_must_be_list_or_tuple(self):
invalid = User("John", email="<EMAIL>")
assert_raises(ValueError, lambda: UserSerializer(invalid).is_valid("name"))
def test_extra(self):
user = User("Joe", email="<EMAIL>")
s = UserSerializer(user, extra={"fav_color": "blue"})
assert_equal(s.data['fav_color'], "blue")
def test_method_field(self):
assert_false(self.serialized.data['is_old'])
u = User("Joe", age=81)
assert_true(UserSerializer(u).data['is_old'])
def test_function_field(self):
assert_equal(self.serialized.data['lowername'], self.obj.name.lower())
def test_prefix(self):
s = UserSerializer(self.obj, prefix="usr_")
assert_equal(s.data['usr_name'], self.obj.name)
def test_fields_must_be_declared_as_instances(self):
class BadUserSerializer(Serializer):
name = fields.String
assert_raises(TypeError, lambda: BadUserSerializer(self.obj))
def test_serializing_generator(self):
users = [User("Foo"), User("Bar")]
user_gen = (u for u in users)
s = UserSerializer(user_gen, many=True)
assert_equal(len(s.data), 2)
assert_equal(s.data[0], UserSerializer(users[0]).data)
def test_serializing_generator_with_meta_fields(self):
users = [User("Foo"), User("Bar")]
user_gen = (u for u in users)
s = UserMetaSerializer(user_gen, many=True)
assert_equal(len(s.data), 2)
assert_equal(s.data[0], UserMetaSerializer(users[0]).data)
def test_serializing_empty_list_returns_empty_list(self):
assert_equal(UserSerializer([], many=True).data, [])
assert_equal(UserMetaSerializer([], many=True).data, [])
def test_serializing_dict(self):
user = {"name": "foo", "email": "foo", "age": 42.3}
s = UserSerializer(user)
assert_equal(s.data['name'], "foo")
assert_equal(s.data['age'], 42.3)
assert_false(s.is_valid(['email']))
def test_exclude_in_init(self):
s = UserSerializer(self.obj, exclude=('age', 'homepage'))
assert_not_in('homepage', s.data)
assert_not_in('age', s.data)
assert_in('name', s.data)
def test_only_in_init(self):
s = UserSerializer(self.obj, only=('name', 'age'))
assert_not_in('homepage', s.data)
assert_in('name', s.data)
assert_in('age', s.data)
def test_invalid_only_param(self):
assert_raises(AttributeError,
lambda: UserSerializer(self.obj, only=("_invalid", "name")))
def test_strict_init(self):
invalid = User("Foo", email="foo.com")
assert_raises(MarshallingError, lambda: UserSerializer(invalid, strict=True))
def test_strict_meta_option(self):
class StrictUserSerializer(UserSerializer):
class Meta:
strict = True
invalid = User("Foo", email="foo.com")
assert_raises(MarshallingError, lambda: StrictUserSerializer(invalid))
def test_can_serialize_uuid(self):
assert_equal(self.serialized.data['uid'], str(self.obj.uid))
def test_can_serialize_time(self):
assert_equal(self.serialized.data['time_registered'],
self.obj.time_registered.isoformat()[:12])
def test_invalid_time(self):
u = User('Joe', time_registered='foo')
s = UserSerializer(u)
assert_false(s.is_valid(['time_registered']))
assert_equal(s.errors['time_registered'],
"'foo' cannot be formatted as a time.")
def test_invalid_date(self):
u = User("Joe", birthdate='foo')
s = UserSerializer(u)
assert_false(s.is_valid(['birthdate']))
assert_equal(s.errors['birthdate'],
"'foo' cannot be formatted as a date.")
def test_invalid_selection(self):
u = User('Jonhy')
u.sex = 'hybrid'
s = UserSerializer(u)
assert_false(s.is_valid(['sex']))
assert_equal(s.errors['sex'],
"'hybrid' is not a valid choice for this field.")
def test_custom_error_message():
class ErrorSerializer(Serializer):
email = fields.Email(error="Invalid email")
homepage = fields.Url(error="Bad homepage.")
balance = fields.Fixed(error="Bad balance.")
u = User("Joe", email="joe.net", homepage="<EMAIL>", balance="blah")
s = ErrorSerializer(u)
assert_false(s.is_valid())
assert_equal(s.errors['email'], "Invalid email")
assert_equal(s.errors['homepage'], "Bad homepage.")
assert_equal(s.errors['balance'], "Bad balance.")
def test_error_raised_if_fields_option_is_not_list():
class BadSerializer(Serializer):
name = fields.String()
class Meta:
fields = 'name'
u = User('Joe')
assert_raises(ValueError, lambda: BadSerializer(u))
def test_error_raised_if_additional_option_is_not_list():
class BadSerializer(Serializer):
name = fields.String()
class Meta:
additional = 'email'
u = User('Joe')
assert_raises(ValueError, lambda: BadSerializer(u))
class TestMetaOptions(unittest.TestCase):
def setUp(self):
self.obj = User(name="Monty", age=42.3, homepage="http://monty.python.org/")
self.serialized = UserSerializer(self.obj)
def test_meta_serializer_fields(self):
u = User("John", age=42.3, email="<EMAIL>",
homepage="http://john.com")
s = UserMetaSerializer(u)
assert_equal(s.data['name'], u.name)
assert_equal(s.data['balance'], "100.00")
assert_equal(s.data['uppername'], "JOHN")
assert_false(s.data['is_old'])
assert_equal(s.data['created'], utils.rfcformat(u.created))
assert_equal(s.data['updated_local'], utils.rfcformat(u.updated, localtime=True))
assert_equal(s.data['finger_count'], 10)
def test_meta_fields_mapping(self):
s = UserMetaSerializer(self.obj)
assert_equal(type(s.fields['name']), fields.String)
assert_equal(type(s.fields['created']), fields.DateTime)
assert_equal(type(s.fields['updated']), fields.DateTime)
assert_equal(type(s.fields['updated_local']), fields.LocalDateTime)
assert_equal(type(s.fields['age']), fields.Float)
assert_equal(type(s.fields['balance']), fields.Price)
assert_equal(type(s.fields['registered']), fields.Boolean)
assert_equal(type(s.fields['sex_choices']), fields.Raw)
assert_equal(type(s.fields['hair_colors']), fields.Raw)
assert_equal(type(s.fields['finger_count']), fields.Integer)
assert_equal(type(s.fields['uid']), fields.UUID)
assert_equal(type(s.fields['time_registered']), fields.Time)
assert_equal(type(s.fields['birthdate']), fields.Date)
assert_equal(type(s.fields['since_created']), fields.TimeDelta)
def test_meta_field_not_on_obj_raises_attribute_error(self):
class BadUserSerializer(Serializer):
class Meta:
fields = ('name', 'notfound')
assert_raises(AttributeError, lambda: BadUserSerializer(self.obj))
def test_exclude_fields(self):
s = UserExcludeSerializer(self.obj)
assert_not_in("created", s.data)
assert_not_in("updated", | |
<reponame>YeLyuUT/FastVOD
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
import numpy as np
from model.faster_rcnn.resnet import resnet
from model.utils.config import cfg
from model.roi_pooling.modules.roi_pool import _RoIPooling
from model.roi_crop.modules.roi_crop import _RoICrop
from model.roi_align.modules.roi_align import RoIAlignAvg
from model.psroi_pooling.modules.psroi_pool import PSRoIPool
from model.rpn.proposal_target_layer_cascade import _ProposalTargetLayer
from model.rpn.rpn import _RPN
from model.nms.nms_wrapper import nms
import math
import time
import pdb
from model.utils.net_utils import _smooth_l1_loss, _crop_pool_layer, _affine_grid_gen, _affine_theta
class _global_context_layer(nn.Module):
def __init__(self, c_in, c_out, c_mid=256, ks=15):
# the c_out should be set as multiple of pooled_size*pooled_size .
super(_global_context_layer, self).__init__()
self.c_out = c_out
self.c_mid = c_mid
self.ks = ks
# define convolution ops.
self.row_prev = nn.Conv2d(c_in, c_mid , kernel_size=(ks,1),padding=((ks-1)//2,0))
self.row_post = nn.Conv2d(c_mid,c_out , kernel_size=(1,ks),padding=(0,(ks-1)//2))
self.col_prev = nn.Conv2d(c_in, c_mid , kernel_size=(1,ks),padding=(0,(ks-1)//2))
self.col_post = nn.Conv2d(c_mid,c_out , kernel_size=(ks,1),padding=((ks-1)//2,0))
# two post conv-layers are specialized as the addition is applied after the layer.
self._init_weights()
self.row_post.weight.data = self.row_post.weight.data/2.0
self.col_post.weight.data = self.col_post.weight.data/2.0
def forward(self, feature):
f_row = self.row_prev(feature)
f_col = self.col_prev(feature)
f_row = self.row_post(f_row)
f_col = self.col_post(f_col)
out = f_row+f_col
#assert feature.size()[2:]==out.size()[2:], 'Check your global context layer.{}!={}'.format(feature.size()[2:],out.size()[2:])
return out
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
#n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#m.weight.data.normal_(0, math.sqrt(2. / n))
class _fasterRCNN(resnet):
""" faster RCNN """
def __init__(self, classes, num_layers=101, pretrained = False, class_agnostic = False, b_save_mid_convs = False):
super(_fasterRCNN, self).__init__(classes, num_layers, pretrained, class_agnostic)
self.classes = classes
self.n_classes = len(classes)
self.class_agnostic = class_agnostic
self.b_save_mid_convs = b_save_mid_convs
# loss
self.RCNN_loss_cls = 0
self.RCNN_loss_bbox = 0
# define rpn
self.RCNN_rpn = _RPN(self.dout_base_model)
self.RCNN_proposal_target = _ProposalTargetLayer(self.n_classes)
self.RCNN_roi_pool = _RoIPooling(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)
self.RCNN_roi_align = RoIAlignAvg(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0/16.0)
self.grid_size = cfg.POOLING_SIZE * 2 if cfg.CROP_RESIZE_WITH_MAX_POOL else cfg.POOLING_SIZE
self.RCNN_roi_crop = _RoICrop()
self.Conv_feat_track = None
self.rpn_rois = None
if cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.FASTER_RCNN:
print('RCNN uses Faster RCNN core.')
elif cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN_LIGHTHEAD:
print('RCNN uses RFCN Light Head core.')
# The input channel is set mannually since we use resnet101 only.
# c_out is set to 10*ps*ps. c_mid is set to 256.
self.relu = nn.ReLU()
core_depth = cfg.RESNET.GLOBAL_CONTEXT_OUT_DEPTH
ctx_size = cfg.RESNET.GLOBAL_CONTEXT_RANGE
self.g_ctx = _global_context_layer(2048, core_depth * cfg.POOLING_SIZE * cfg.POOLING_SIZE, 256, ctx_size)
self.RCNN_psroi_pool = PSRoIPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0 / 16.0, cfg.POOLING_SIZE, core_depth)
# fc layer for roi-wise prediction.
# roi_mid_c in the original paper is 2048.
roi_mid_c = 2048
self.fc_roi = nn.Linear(core_depth * cfg.POOLING_SIZE * cfg.POOLING_SIZE, roi_mid_c)
elif cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN:
print('RCNN uses R-FCN core.')
# define extra convolution layers for psroi input.
tmp_c_in = 2048
self.rfcn_cls = nn.Conv2d(tmp_c_in, self.n_classes * cfg.POOLING_SIZE * cfg.POOLING_SIZE, kernel_size=1)
if self.class_agnostic:
self.rfcn_bbox = nn.Conv2d(tmp_c_in, 4 * cfg.POOLING_SIZE * cfg.POOLING_SIZE, kernel_size=1)
else:
# Need to remove the background class for bbox regression.
# Other circumstances are handled by torch.gather op later.
self.rfcn_bbox = nn.Conv2d(tmp_c_in, 4 * (self.n_classes) * cfg.POOLING_SIZE * cfg.POOLING_SIZE, kernel_size=1)
# define psroi layers
self.RCNN_psroi_score = PSRoIPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0 / 16.0, cfg.POOLING_SIZE, self.n_classes)
if self.class_agnostic:
self.RCNN_psroi_bbox = PSRoIPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0 / 16.0, cfg.POOLING_SIZE, 4)
else:
self.RCNN_psroi_bbox = PSRoIPool(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1.0 / 16.0, cfg.POOLING_SIZE, 4*(self.n_classes))
# define ave_roi_pooling layers.
self.ave_pooling_bbox = nn.AvgPool2d(cfg.POOLING_SIZE, stride=cfg.POOLING_SIZE)
self.ave_pooling_cls = nn.AvgPool2d(cfg.POOLING_SIZE, stride=cfg.POOLING_SIZE)
def _init_weights(self):
def normal_init(m, mean, stddev, truncated=False):
"""
weight initalizer: truncated normal and random normal.
"""
# x is a parameter
if truncated:
m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation
else:
m.weight.data.normal_(mean, stddev)
m.bias.data.zero_()
normal_init(self.RCNN_rpn.RPN_Conv, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_rpn.RPN_bbox_pred, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_cls_score, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.RCNN_bbox_pred, 0, 0.001, cfg.TRAIN.TRUNCATED)
if cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN_LIGHTHEAD:
normal_init(self.fc_roi, 0, 0.01, cfg.TRAIN.TRUNCATED)
elif cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN:
normal_init(self.rfcn_cls, 0, 0.01, cfg.TRAIN.TRUNCATED)
normal_init(self.rfcn_bbox, 0, 0.001, cfg.TRAIN.TRUNCATED)
def forward(self, im_data, im_info, gt_boxes, num_boxes):
self.Conv_feat_track = None
self.rpn_rois = None
batch_size = im_data.size(0)
# reduce gt_boxe from length 6 to 5 if necessary.
if gt_boxes is not None:
if gt_boxes.dim==3 and gt_boxes.size(2)==6:
gt_boxes = gt_boxes[:,:,:5]
if gt_boxes.dim==2 and gt_boxes.size(1)==6:
gt_boxes = gt_boxes[:,:5]
gt_boxes = gt_boxes.data
num_boxes = num_boxes.data
im_info = im_info.data
# feed image data to base model to obtain base feature map
base_feat = self.RCNN_base(im_data)
if cfg.SIAMESE.DETACH_CONV1234 is True:
base_feat.detach_()
# feed base feature map tp RPN to obtain rois
if cfg.SIAMESE.DETACH_FEAT_FOR_TRACK is True:
self.Conv_feat_track = base_feat.detach()
else:
self.Conv_feat_track = base_feat
rois_rpn, rpn_loss_cls, rpn_loss_bbox = self.RCNN_rpn(base_feat, im_info, gt_boxes, num_boxes)
self.rpn_rois = rois_rpn
# if it is training phrase, then use ground truth bboxes for refinement.
if self.training:
rois, rois_label, rois_target, rois_inside_ws, rois_outside_ws = self.prepare_rois_for_training(rois_rpn, gt_boxes, num_boxes)
else:
rois_label = None
rois_target = None
rois_inside_ws = None
rois_outside_ws = None
rpn_loss_cls = 0
rpn_loss_bbox = 0
rois = rois_rpn
if rois is None:
return None, None, None, None, None, None, None, None
# The original implementation puts c5 to R-CNN.
if not cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.FASTER_RCNN:
base_feat = self.RCNN_top(base_feat)
# convert base feat to roi predictions.
self.base_feat_for_roi = base_feat
bbox_pred, cls_prob, cls_score = self.base_feat_to_roi_pred(base_feat, rois, rois_label)
RCNN_loss_cls = 0
RCNN_loss_bbox = 0
if self.training:
# classification loss
# handle online hard example mining (OHEM) here.
if cfg.TRAIN.OHEM is True:
RCNN_loss_cls_tmp = F.cross_entropy(cls_score, rois_label, reduce=False)
RCNN_loss_bbox_tmp = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws, reduce=False)
#assert RCNN_loss_cls_tmp.size()==RCNN_loss_bbox_tmp.size(), 'size not equal.{}!={}'.format(RCNN_loss_cls_tmp.size(),RCNN_loss_bbox_tmp.size())
RCNN_loss_tmp = RCNN_loss_cls_tmp# + RCNN_loss_bbox_tmp
sorted_RCNN_loss_tmp, index = torch.sort(RCNN_loss_tmp, descending=True)
# TODO add nms here.
ordered_boxes = rois.view(-1, 5)[index,1:5]
loss_boxes = torch.cat((ordered_boxes, sorted_RCNN_loss_tmp.view(-1,1)), 1)
keep = nms(loss_boxes, cfg.TRAIN.OHEM_NMS).long().view(-1)
keep = keep[:cfg.TRAIN.OHEM_BATCH_SIZE*batch_size]
# we only keep the first <cfg.TRAIN.OHEM_BATCH_SIZE*batch_size> indexes.
index = index[keep]
index.detach_()
# redo forward to train hard examples only.
# select first cfg.TRAIN.OHEM_BATCH_SIZE rois for training.
index = index[:cfg.TRAIN.OHEM_BATCH_SIZE*batch_size]
rois_view = rois.view(-1,5).index_select(0, index)
rois_label = rois_label.index_select(0, index)
rois_target = rois_target.index_select(0, index)
rois_inside_ws = rois_inside_ws.index_select(0, index)
rois_outside_ws = rois_outside_ws.index_select(0, index)
bbox_pred, cls_prob, cls_score = self.base_feat_to_roi_pred(base_feat, rois_view, rois_label)
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label, reduce = False)
# bounding box regression L1 loss
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws, reduce = False)
#print('1:', RCNN_loss_cls[:5])
#print('2:', RCNN_loss_bbox[:5])
#print('3:', sorted_RCNN_loss_tmp[:5])
RCNN_loss_cls = RCNN_loss_cls.mean()
RCNN_loss_bbox = RCNN_loss_bbox.mean()
else:
RCNN_loss_cls = F.cross_entropy(cls_score, rois_label)
# bounding box regression L1 loss
RCNN_loss_bbox = _smooth_l1_loss(bbox_pred, rois_target, rois_inside_ws, rois_outside_ws)
rpn_loss_cls = torch.unsqueeze(rpn_loss_cls, 0)
rpn_loss_bbox = torch.unsqueeze(rpn_loss_bbox, 0)
RCNN_loss_cls = torch.unsqueeze(RCNN_loss_cls, 0)
RCNN_loss_bbox = torch.unsqueeze(RCNN_loss_bbox, 0)
if self.training:
cls_prob = None
bbox_pred = None
else:
cls_prob = cls_prob.view(batch_size, rois.size(1), -1)
bbox_pred = bbox_pred.view(batch_size, rois.size(1), -1)
return rois, cls_prob, bbox_pred, rpn_loss_cls, rpn_loss_bbox, RCNN_loss_cls, RCNN_loss_bbox, rois_label
def create_architecture(self):
# _init_modules should go before _init_weights as some layers are newly initialized.
self._init_modules()
self._init_weights()
def base_feat_to_roi_pred(self, base_feat, rois, rois_label):
# handle base_feat
if cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN_LIGHTHEAD:
# ctx op layer here.
base_feat = self.g_ctx(base_feat)
base_feat = self.relu(base_feat)
elif cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN:
base_feat_score = self.rfcn_cls(base_feat)
base_feat_bbox = self.rfcn_bbox(base_feat)
else:
pass
# handle pooling.
if cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN:
# For RFCN, two pooled_feat blobs are needed. One for class score, one for bbox.
assert cfg.POOLING_MODE =='pspool', 'R-FCN has to use ps-pooling. Please check your config file for correct input.'
pooled_feat_score = self.RCNN_psroi_score(base_feat_score, rois.view(-1, 5))
pooled_feat_bbox = self.RCNN_psroi_bbox(base_feat_bbox, rois.view(-1, 5))
cls_score = self.ave_pooling_cls(pooled_feat_score)
bbox_pred = self.ave_pooling_bbox(pooled_feat_bbox)
cls_score = cls_score.view((cls_score.shape[0], cls_score.shape[1]))
bbox_pred = bbox_pred.view((bbox_pred.shape[0], bbox_pred.shape[1]))
if self.training and not self.class_agnostic:
# select the corresponding columns according to roi labels
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1, rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1,4))
bbox_pred = bbox_pred_select.squeeze(1)
cls_prob = F.softmax(cls_score, 1)
return bbox_pred, cls_prob, cls_score
else:
if cfg.POOLING_MODE == 'crop':
# pdb.set_trace()
# pooled_feat_anchor = _crop_pool_layer(base_feat, rois.view(-1, 5))
grid_xy = _affine_grid_gen(rois.view(-1, 5), base_feat.size()[2:], self.grid_size)
grid_yx = torch.stack([grid_xy.data[:,:,:,1], grid_xy.data[:,:,:,0]], 3).contiguous()
pooled_feat = self.RCNN_roi_crop(base_feat, grid_yx.detach())
if cfg.CROP_RESIZE_WITH_MAX_POOL:
pooled_feat = F.max_pool2d(pooled_feat, 2, 2)
elif cfg.POOLING_MODE == 'align':
pooled_feat = self.RCNN_roi_align(base_feat, rois.view(-1, 5))
elif cfg.POOLING_MODE == 'pool':
pooled_feat = self.RCNN_roi_pool(base_feat, rois.view(-1, 5))
elif cfg.POOLING_MODE =='pspool':
pooled_feat = self.RCNN_psroi_pool(base_feat, rois.view(-1, 5))
bbox_pred, cls_prob, cls_score = self.roi_wise_pred(pooled_feat, rois_label)
return bbox_pred, cls_prob, cls_score
def roi_wise_pred(self,pooled_feat,rois_label=None):
# feed pooled features to top model
if cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.FASTER_RCNN:
roi_feat = self._head_to_tail(pooled_feat)
elif cfg.RESNET.CORE_CHOICE.USE == cfg.RESNET.CORE_CHOICE.RFCN_LIGHTHEAD:
roi_feat = pooled_feat.view(pooled_feat.size(0), -1)
roi_feat = self.fc_roi(roi_feat)
else:
raise ValueError('Unknown RCNN type.')
# compute bbox offset
bbox_pred = self.RCNN_bbox_pred(roi_feat)
if self.training and not self.class_agnostic:
# select the corresponding columns according to roi labels
bbox_pred_view = bbox_pred.view(bbox_pred.size(0), int(bbox_pred.size(1) / 4), 4)
bbox_pred_select = torch.gather(bbox_pred_view, 1,
rois_label.view(rois_label.size(0), 1, 1).expand(rois_label.size(0), 1, 4))
bbox_pred = bbox_pred_select.squeeze(1)
# compute object classification probability
cls_score = self.RCNN_cls_score(roi_feat)
cls_prob = F.softmax(cls_score, 1)
return bbox_pred, cls_prob, cls_score
def prepare_rois_for_training(self, | |
to False.
Returns:
list: The lines declaring the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
type_name = cls.get_native_type(**var)
out = [cls.format_function_param('declare',
type_name=type_name,
variable=cls.get_name_declare(var))]
if is_argument:
return out
if definitions is None:
definitions = out
definitions += cls.write_initialize(var, value=value,
requires_freeing=requires_freeing)
return out
@classmethod
def get_name_declare(cls, var):
r"""Determine the name that should be used for declaration.
Args:
var (str, dict): Name of variable or dictionary of information.
Returns:
str: Modified name for declaration.
"""
if isinstance(var, str): # pragma: no cover
return var
assert(isinstance(var, dict))
out = var['name']
return out
@classmethod
def write_free(cls, var, **kwargs):
r"""Return the lines required to free a variable with a certain type.
Args:
var (dict, str): Name or information dictionary for the variable
being declared.
**kwargs: Additional keyword arguments are passed to format_function_param.
Returns:
list: The lines freeing the variable.
"""
if isinstance(var, str): # pragma: no cover
var = {'name': var}
out = []
if not var.get('dont_free', False):
if ((isinstance(var.get('datatype', False), dict)
and (('free_%s' % var['datatype']['type'])
in cls.function_param))):
out = [cls.format_function_param(
'free_%s' % var['datatype']['type'],
variable=var['name'], **kwargs)]
else:
out = [cls.format_function_param(
'free', variable=var['name'], **kwargs)]
return out
@classmethod
def write_assign_to_output(cls, dst_var, src_var, copy=False,
outputs_in_inputs=False, **kwargs):
r"""Write lines assigning a value to an output variable.
Args:
dst_var (str, dict): Name or information dictionary for
variable being assigned to.
src_var (str, dict): Name or information dictionary for
value being assigned to dst_var.
copy (bool, optional): If True, the assigned value is copied
during assignment. Defaults to False.
outputs_in_inputs (bool, optional): If True, outputs are passed
as input parameters. In some languages, this means that a
pointer or reference is passed (e.g. C) and so the assignment
should be to the memory indicated rather than the variable.
Defaults to False.
Returns:
list: Lines achieving assignment.
"""
datatype = None
if isinstance(dst_var, dict):
kwargs['name'] = dst_var['name']
datatype = dst_var['datatype']
else:
kwargs['name'] = dst_var
if isinstance(src_var, dict):
kwargs['value'] = src_var['name']
datatype = src_var['datatype']
else:
kwargs['value'] = src_var
if ((outputs_in_inputs and isinstance(dst_var, dict)
and isinstance(dst_var['datatype'], dict)
and ('copy_' + dst_var['datatype']['type']
in cls.function_param))):
copy = True
if copy:
if ((isinstance(datatype, dict)
and ('copy_' + datatype['type'] in cls.function_param))):
return [cls.format_function_param(
'copy_' + datatype['type'], **kwargs)]
else:
return [cls.format_function_param('assign_copy', **kwargs)]
else:
return [cls.format_function_param('assign', **kwargs)]
@classmethod
def write_expand_single_element(cls, output_var, add_cond=False):
r"""Write lines allowing extraction of the only element from a single
element array as a stand-alone variable if the variable is an array
and only has one element.
Args:
output_var (str): Name of the variable that should be conditionally
expanded.
add_cond (list, optional): Additional conditions that must be
satisfied for the array element to be extracted. Defaults to
False and is ignored.
Returns:
list: Lines added the conditional expansion of single element
arrays.
"""
if 'istype' not in cls.function_param:
return []
cond = ('(%s) %s (%s %s 1)' % (
cls.format_function_param('istype',
variable=output_var,
type=cls.type_map['array']),
cls.function_param.get('and', '&&'),
cls.format_function_param('len',
variable=output_var),
cls.function_param.get('equ', '==')))
if add_cond:
for x in add_cond:
cond += f" {cls.function_param.get('and', '&&')} {x}"
out = cls.write_if_block(
cond,
cls.format_function_param(
'assign', name=output_var,
value=cls.format_function_param(
'index', variable=output_var,
index=int(cls.function_param.get('first_index', 0)))))
return out
@classmethod
def split_variables(cls, var_str):
r"""Split variable string include individual variables.
Args:
var_str (str): String containing multiple variables.
Returns:
list: Split variables.
"""
out = []
if var_str:
pairs = [(r'\[', r'\]'),
(r'\(', r'\)'),
(r'\{', r'\}'),
(r"'", r"'"),
(r'"', r'"')]
regex_ele = r''
present = False
for p in pairs:
if not any([(str(ip)[-1] in var_str) for ip in p]):
continue
present = True
regex_ele += (r'(?:%s[.\n]*?%s)|' % p)
if present:
regex_ele += '(?:.+?)'
regex_ele = r'\s*(%s)\s*(?:,|$)' % regex_ele
out = [x.group(1) for x in re.finditer(regex_ele, var_str)]
else:
out = [x.strip() for x in var_str.split(',')]
return out
@classmethod
def prepare_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input/output to/from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying variables
in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
name_list = []
if not isinstance(vars_list, list):
vars_list = [vars_list]
for x in vars_list:
if isinstance(x, str):
name_list.append(x)
else:
assert(isinstance(x, dict))
name_list.append(x['name'])
return ', '.join(name_list)
@classmethod
def prepare_input_variables(cls, vars_list, in_definition=False,
for_yggdrasil=False):
r"""Concatenate a set of input variables such that it can be passed as a
single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
input to a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying input
variables in a function definition. Defaults to False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
return cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
@classmethod
def prepare_output_variables(cls, vars_list, in_definition=False,
in_inputs=False, for_yggdrasil=False):
r"""Concatenate a set of output variables such that it can be passed as
a single string to the function_call parameter.
Args:
vars_list (list): List of variable dictionaries containing info
(e.g. names) that should be used to prepare a string representing
output from a function call.
in_definition (bool, optional): If True, the returned sequence
will be of the format required for specifying output
variables in a function definition. Defaults to False.
in_inputs (bool, optional): If True, the output variables should
be formated to be included as input variables. Defaults to
False.
for_yggdrasil (bool, optional): If True, the variables will be
prepared in the formated expected by calls to yggdarsil
send/recv methods. Defaults to False.
Returns:
str: Concatentated variables list.
"""
if in_inputs:
vars_list = [cls.output2input(x, in_definition=in_definition)
for x in vars_list]
out = cls.prepare_variables(vars_list, in_definition=in_definition,
for_yggdrasil=for_yggdrasil)
if isinstance(vars_list, list) and (len(vars_list) > 1):
if in_definition and ('multiple_outputs_def' in cls.function_param):
out = cls.format_function_param('multiple_outputs_def', outputs=out)
elif 'multiple_outputs' in cls.function_param:
out = cls.format_function_param('multiple_outputs', outputs=out)
return out
@classmethod
def write_if_block(cls, cond, block_contents, else_block_contents=False):
r"""Return the lines required to complete a conditional block.
Args:
cond (str): Conditional that should determine block execution.
block_contents (list): Lines of code that should be executed inside
the block.
else_block_contents (list, optional): Lines of code that should be
executed inside the else clause of the block. Defaults to False
if not provided and an else clause is omitted.
Returns:
list: Lines of code performing conditional execution of a block.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
if not isinstance(cond, list):
cond = [cond]
block_contents = [block_contents]
assert(len(cond) == len(block_contents))
for i, (icond, iblock_contents) in enumerate(zip(cond, block_contents)):
if i == 0:
out.append(cls.format_function_param('if_begin', cond=icond))
else:
out.append(cls.format_function_param('if_elif', cond=icond))
if not isinstance(iblock_contents, (list, tuple)):
iblock_contents = [iblock_contents]
for x in iblock_contents:
out.append(cls.function_param['indent'] + x)
if else_block_contents:
out.append(cls.format_function_param('if_else'))
if not isinstance(else_block_contents, (list, tuple)):
else_block_contents = [else_block_contents]
for x in else_block_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('if_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_for_loop(cls, iter_var, iter_begin, iter_end, loop_contents):
r"""Return the lines required to complete a for loop.
Args:
iter_var (str): Name of variable that iterator should use.
iter_begin (int): Beginning of iteration.
iter_end (int): End of iteration.
loop_contents (list): Lines of code that should be executed inside
the loop.
Returns:
list: Lines of code performing a loop.
"""
if cls.function_param is None:
raise NotImplementedError("function_param attribute not set for"
"language '%s'" % cls.language)
out = []
# Opening for statement line
out.append(cls.format_function_param('for_begin', iter_var=iter_var,
iter_begin=iter_begin,
iter_end=iter_end))
# Indent loop contents
if not isinstance(loop_contents, (list, tuple)):
loop_contents = [loop_contents]
for x in loop_contents:
out.append(cls.function_param['indent'] + x)
# Close block
out.append(cls.function_param.get('for_end',
cls.function_param.get(
'block_end', '')))
return out
@classmethod
def write_while_loop(cls, cond, loop_contents):
r"""Return the lines | |
col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_startOffset")) &
(col("L.endOffset") <= col("R_endOffset")) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def ContainedInList(left, right):
"""Provide the ability to find annotations that are contained by another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are contained in B. What that means is the start/end offset for an annotation from A must be contained by the start/end offset from an annotation in B.
We of course have to also match on the document id.
We ultimately return a Dataframe with 2 fields where the first field is an annotation from B and the second field is an array of entries from A
that are contained in the first entry.
Args:
left: Dataframe of AQAnnotations, the ones we will return (as a list) if they are contained in AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they contain AQAnnotations from 'left'.
Returns:
Dataframe of (AQAnnotations,Array[AQAnnotations])
"""
def containedAQ(rec):
# Sort the contained annotations
#srecs = sorted(rec[1], key=lambda x: (-1 if x.LendOffset == None else x.LendOffset),reverse=True)
srecs = sorted(rec[1], key=lambda x: (1 if x.LstartOffset == None else x.LstartOffset),reverse=False)
# We can extract the key from the any 'right' entry in sorted recs (we will use the first one)
key = Row(docId = srecs[0].RdocId,
annotSet = srecs[0].RannotSet,
annotType = srecs[0].RannotType,
startOffset = int(srecs[0].RstartOffset),
endOffset = int(srecs[0].RendOffset),
annotId = int(srecs[0].RannotId),
properties = srecs[0].Rproperties)
# Construct the array
values = []
for rec in srecs:
if rec.LdocId != None:
values.append(Row(docId = rec.LdocId,
annotSet = rec.LannotSet,
annotType = rec.LannotType,
startOffset = int(rec.LstartOffset),
endOffset = int(rec.LendOffset),
annotId = int(rec.LannotId),
properties = rec.Lproperties))
return(key,values)
l = left.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("LannotId","LannotSet","LannotType","LdocId","LendOffset","Lproperties","LstartOffset")
r = right.select("annotId","annotSet","annotType","docId","endOffset","properties","startOffset").toDF("RannotId","RannotSet","RannotType","RdocId","RendOffset","Rproperties","RstartOffset")
results = l.join(r,
((col("LdocId") == col("RdocId")) &
(col("LstartOffset") >= col("RstartOffset")) &
(col("LendOffset") <= col("RendOffset")) &
(~((col("LannotSet") == col("RannotSet")) &
(col("LannotType") == col("RannotType")) &
(col("LstartOffset") == col("RstartOffset")) &
(col("LendOffset") == col("RendOffset")))))) \
.rdd \
.groupBy(lambda x: (x["RdocId"],x["RstartOffset"],x["RendOffset"])) \
.map(lambda rec: containedAQ(rec))
return spark.createDataFrame(results.map(lambda x: x),AQSchemaList())
def Before(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are before B.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset).
There is also the option of negating the query (think Not Before) so that we return only A where it is not before B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are before AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are after AQAnnotations from 'left'.
dist: Number of characters where endOffset from 'left' must occur before startOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT before). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("R_startOffset") >= col("L.endOffset")) &
(col("R_startOffset") - col("L.endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def After(left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are after another annotation.
The input is 2 Dataframes of AQAnnotations. We will call them A and B.
The purpose is to find those annotations in A that are after B.
What that means is the start offset for an annotation from A must be after (or equal to) the end offset from an annotation in B.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (startOffset) to occur n characters (or less) after the B annotation (endOffset).
There is also the option of negating the query (think Not After) so that we return only A where it is not after B.
Args:
left: Dataframe of AQAnnotations, the ones we will return if they are after AQAnnotations from 'right'.
right: Dataframe of AQAnnotations, the ones we are looking to see if are before AQAnnotations from 'left'.
dist: Number of characters where startOffset from 'left' must occur after endOffset from 'right'. Default is sys.maxsize.
limit: Number of AQAnnotations to return.
negate: Whether to negate the entire query (think NOT after). Default is false.
Returns:
Dataframe of AQAnnotations
"""
results = None
# Workaround for Catalyst optimization issues when working with two Dataframes derived from the same Dataframe - Catalyst gets confused
tmpRight = right.select("annotId", "annotSet", "annotType", "docId", "endOffset", "startOffset", "properties") \
.toDF("R_annotId", "R_annotSet", "R_annotType", "R_docId", "R_endOffset", "R_startOffset", 'R_properties')
if negate:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist)) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset")))),"leftouter") \
.filter(col("R_docId").isNull()) \
.select("L.*")
else:
results = left.alias("L").join(tmpRight,
((col("L.docId") == col("R_docId")) &
(col("L.startOffset") >= col("R_endOffset")) &
(col("L.startOffset") - col("R_endOffset") < dist) &
(~((col("L.annotSet") == col("R_annotSet")) &
(col("L.annotType") == col("R_annotType")) &
(col("L.startOffset") == col("R_startOffset")) &
(col("L.endOffset") == col("R_endOffset"))))),"leftsemi")
if limit > 0:
results = results.limit(limit)
return results
def Between(middle, left, right, dist=sys.maxsize , limit=0, negate=False):
"""Provide the ability to find annotations that are before one annotation and after another.
The input is 3 Dataframes of AQAnnotations. We will call them A, B and C.
The purpose is to find those annotations in A that are before B and after C.
What that means is the end offset for an annotation from A must be before (or equal to) the start offset from an annotation in B and the start offset for A be after (or equal to) the end offset from C.
We ultimately return the A annotations that meet this criteria.
A distance operator can also be optionally specified.
This would require an A annotation (endOffset) to occur n characters (or less) before the B annotation (startOffset) and would require the A annotation (startOffset) to occur n characters (or less) after the C annotation (endOffset) .
There is also the option of negating the query (think Not Between) so that we return only A where it is not before B nor after C.
Args:
middle: Dataframe of AQAnnotations, the ones we will return if they are between AQAnnotations from 'left' and AQAnnotations from 'right.
left: Dataframe of AQAnnotations, the ones we are looking to see if they are before AQAnnotations from 'middle'.
right: Dataframe of AQAnnotations, the ones we are looking to see if they are after AQAnnotations from 'middle'.
dist: Number of characters where startOffset from 'middle' must occur after | |
= "n"
global hold_blocker
hold_blocker = 0
score = 0
goal = 5
level = 1
scr.set(str(score))
gl.set(str(goal))
lvl.set(str(level))
delay = base_delay
spawn()
else:
cnt += 1
if nextup == 1:
nextup_t()
elif nextup == 2:
nextup_o()
elif nextup == 3:
nextup_i()
elif nextup == 4:
nextup_l()
elif nextup == 5:
nextup_j()
elif nextup == 6:
nextup_s()
elif nextup == 7:
nextup_z()
def writeScores():
with open("scores.dat", "w") as file:
for i in scores:
file.write(str(i[0]))
file.write(", ")
file.write(str(i[1]))
file.write("\n")
file.close()
def spawn_t():
f.delete("block")
global block_type
block_type = "t"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta1", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta1", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta1", tags=("block", "c") )
f.move(tile3, te, 0)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta1", tags=("block", "d") )
f.move(tile4, 0, te)
def nextup_t():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "c") )
pv.move(tile3, te, 0)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "d") )
pv.move(tile4, 0, te)
def hold_t():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "c") )
holdpv.move(tile3, te, 0)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="magenta1", tags=("block", "d") )
holdpv.move(tile4, 0, te)
def spawn_l():
f.delete("block")
global block_type
block_type = "l"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue", tags=("block", "c") )
f.move(tile3, te, 0)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue", tags=("block", "d") )
f.move(tile4, -te, te)
def nextup_l():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "c") )
pv.move(tile3, te, 0)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "d") )
pv.move(tile4, -te, te)
def hold_l():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "c") )
holdpv.move(tile3, te, 0)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="blue", tags=("block", "d") )
holdpv.move(tile4, -te, te)
def spawn_j():
f.delete("block")
global block_type
block_type = "j"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange", tags=("block", "c") )
f.move(tile3, te, 0)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange", tags=("block", "d") )
f.move(tile4, te, te)
def nextup_j():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "c") )
pv.move(tile3, te, 0)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "d") )
pv.move(tile4, te, te)
def hold_j():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "c") )
holdpv.move(tile3, te, 0)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="orange", tags=("block", "d") )
holdpv.move(tile4, te, te)
def spawn_o():
f.delete("block")
global block_type
block_type = "o"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow", tags=("block", "c") )
f.move(tile3, 0, te)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow", tags=("block", "d") )
f.move(tile4, -te, te)
def nextup_o():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "c") )
pv.move(tile3, 0, te)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "d") )
pv.move(tile4, -te, te)
def hold_o():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "c") )
holdpv.move(tile3, 0, te)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="yellow", tags=("block", "d") )
holdpv.move(tile4, -te, te)
def spawn_i():
f.delete("block")
global block_type
block_type = "i"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan", tags=("block", "b" ))
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan", tags=("block", "c") )
f.move(tile3, te, 0)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan", tags=("block", "d"))
f.move(tile4, 2*te, 0)
def nextup_i():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "b" ))
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "c") )
pv.move(tile3, te, 0)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "d"))
pv.move(tile4, -2*te, 0)
def hold_i():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "b" ))
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "c") )
holdpv.move(tile3, te, 0)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="cyan", tags=("block", "d"))
holdpv.move(tile4, -2*te, 0)
def spawn_s():
f.delete("block")
global block_type
block_type = "s"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green2", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green2", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green2", tags=("block", "c") )
f.move(tile3, -2*te, te)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green2", tags=("block", "d") )
f.move(tile4, -te, te)
def nextup_s():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "c") )
pv.move(tile3, -2*te, te)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "d") )
pv.move(tile4, -te, te)
def hold_s():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "c") )
holdpv.move(tile3, -2*te, te)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="green2", tags=("block", "d") )
holdpv.move(tile4, -te, te)
def spawn_z():
f.delete("block")
global block_type
block_type = "z"
tile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="red", tags=("block", "a"))
tile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="red", tags=("block", "b") )
f.move(tile2, -te, 0)
tile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="red", tags=("block", "c") )
f.move(tile3, 0, te)
tile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="red", tags=("block", "d") )
f.move(tile4, te, te)
def nextup_z():
pv.delete("block")
tile1 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "a"))
tile2 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "b") )
pv.move(tile2, -te, 0)
tile3 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "c") )
pv.move(tile3, 0, te)
tile4 = pv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "d") )
pv.move(tile4, te, te)
def hold_z():
holdpv.delete("block")
tile1 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "a"))
tile2 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "b") )
holdpv.move(tile2, -te, 0)
tile3 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "c") )
holdpv.move(tile3, 0, te)
tile4 = holdpv.create_rectangle(2*te, te, 3*te,2*te, fill="red", tags=("block", "d") )
holdpv.move(tile4, te, te)
def gspawn_t():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta4", tags=("ghost", "gb") )
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta4", tags=("ghost", "gc") )
f.move(gtile3, te, 0)
gtile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="magenta4", tags=("ghost", "gd") )
f.move(gtile4, 0, te)
def gspawn_l():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue4", tags=("ghost", "gb") )
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue4", tags=("ghost", "gc") )
f.move(gtile3, te, 0)
gtile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="blue4", tags=("ghost", "gd") )
f.move(gtile4, -te, te)
def gspawn_j():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange4", tags=("ghost", "gb") )
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange4", tags=("ghost", "gc") )
f.move(gtile3, te, 0)
gtile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="orange4", tags=("ghost", "gd") )
f.move(gtile4, te, te)
def gspawn_o():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow4", tags=("ghost", "gb") )
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow4", tags=("ghost", "gc") )
f.move(gtile3, 0, te)
gtile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="yellow4", tags=("ghost", "gd") )
f.move(gtile4, -te, te)
def gspawn_i():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan4", tags=("ghost", "gb" ))
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan4", tags=("ghost", "gc") )
f.move(gtile3, te, 0)
gtile4 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="cyan4", tags=("ghost", "gd"))
f.move(gtile4, 2*te, 0)
def gspawn_s():
f.delete("ghost")
gtile1 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green4", tags=("ghost", "ga"))
gtile2 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green4", tags=("ghost", "gb") )
f.move(gtile2, -te, 0)
gtile3 = f.create_rectangle(spx, spy, spx + te, spy+ te, fill="green4", | |
#!/usr/bin/env python
import os
import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from scipy.stats import norm
from itertools import product
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy.sparse import issparse
from sklearn.metrics import calinski_harabasz_score
"""
HashSolo script provides a probabilistic cell hashing demultiplexing method
which generates a noise distribution and signal distribution for
each hashing barcode from empirically observed counts. These distributions
are updates from the global signal and noise barcode distributions, which
helps in the setting where not many cells are observed. Signal distributions
for a hashing barcode are estimated from samples where that hashing barcode
has the highest count. Noise distributions for a hashing barcode are estimated
from samples where that hashing barcode is one the k-2 lowest barcodes, where
k is the number of barcodes. A doublet should then have its two highest
barcode counts most likely coming from a signal distribution for those barcodes.
A singlet should have its highest barcode from a signal distribution, and its
second highest barcode from a noise distribution. A negative two highest
barcodes should come from noise distributions. We test each of these
hypotheses in a bayesian fashion, and select the most probable hypothesis.
"""
def _calculate_log_likelihoods(data, number_of_noise_barcodes):
"""Calculate log likelihoods for each hypothesis, negative, singlet, doublet
Parameters
----------
data : np.ndarray
cells by hashing counts matrix
number_of_noise_barcodes : int,
number of barcodes to used to calculated noise distribution
Returns
-------
log_likelihoods_for_each_hypothesis : np.ndarray
a 2d np.array log likelihood of each hypothesis
all_indices
counter_to_barcode_combo
"""
def gaussian_updates(data, mu_o, std_o):
"""Update parameters of your gaussian
https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
Parameters
----------
data : np.array
1-d array of counts
mu_o : float,
global mean for hashing count distribution
std_o : float,
global std for hashing count distribution
Returns
-------
float
mean of gaussian
float
std of gaussian
"""
lam_o = 1 / (std_o ** 2)
n = len(data)
lam = 1 / np.var(data) if len(data) > 1 else lam_o
lam_n = lam_o + n * lam
mu_n = (
(np.mean(data) * n * lam + mu_o * lam_o) / lam_n if len(data) > 0 else mu_o
)
return mu_n, (1 / (lam_n / (n + 1))) ** (1 / 2)
eps = 1e-15
# probabilites for negative, singlet, doublets
log_likelihoods_for_each_hypothesis = np.zeros((data.shape[0], 3))
all_indices = np.empty(data.shape[0])
num_of_barcodes = data.shape[1]
number_of_non_noise_barcodes = (
num_of_barcodes - number_of_noise_barcodes
if number_of_noise_barcodes is not None
else 2
)
num_of_noise_barcodes = num_of_barcodes - number_of_non_noise_barcodes
# assume log normal
data = np.log(data + 1)
data_arg = np.argsort(data, axis=1)
data_sort = np.sort(data, axis=1)
# global signal and noise counts useful for when we have few cells
# barcodes with the highest number of counts are assumed to be a true signal
# barcodes with rank < k are considered to be noise
global_signal_counts = np.ravel(data_sort[:, -1])
global_noise_counts = np.ravel(data_sort[:, :-number_of_non_noise_barcodes])
global_mu_signal_o, global_sigma_signal_o = np.mean(global_signal_counts), np.std(
global_signal_counts
)
global_mu_noise_o, global_sigma_noise_o = np.mean(global_noise_counts), np.std(
global_noise_counts
)
noise_params_dict = {}
signal_params_dict = {}
# for each barcode get empirical noise and signal distribution parameterization
for x in np.arange(num_of_barcodes):
sample_barcodes = data[:, x]
sample_barcodes_noise_idx = np.where(data_arg[:, :num_of_noise_barcodes] == x)[
0
]
sample_barcodes_signal_idx = np.where(data_arg[:, -1] == x)
# get noise and signal counts
noise_counts = sample_barcodes[sample_barcodes_noise_idx]
signal_counts = sample_barcodes[sample_barcodes_signal_idx]
# get parameters of distribution, assuming lognormal do update from global values
noise_param = gaussian_updates(
noise_counts, global_mu_noise_o, global_sigma_noise_o
)
signal_param = gaussian_updates(
signal_counts, global_mu_signal_o, global_sigma_signal_o
)
noise_params_dict[x] = noise_param
signal_params_dict[x] = signal_param
counter_to_barcode_combo = {}
counter = 0
# for each combination of noise and signal barcode calculate probiltiy of in silico and real cell hypotheses
for noise_sample_idx, signal_sample_idx in product(
np.arange(num_of_barcodes), np.arange(num_of_barcodes)
):
signal_subset = data_arg[:, -1] == signal_sample_idx
noise_subset = data_arg[:, -2] == noise_sample_idx
subset = signal_subset & noise_subset
if sum(subset) == 0:
continue
indices = np.where(subset)[0]
barcode_combo = "_".join([str(noise_sample_idx), str(signal_sample_idx)])
all_indices[np.where(subset)[0]] = counter
counter_to_barcode_combo[counter] = barcode_combo
counter += 1
noise_params = noise_params_dict[noise_sample_idx]
signal_params = signal_params_dict[signal_sample_idx]
# calculate probabilties for each hypothesis for each cell
data_subset = data[subset]
log_signal_signal_probs = np.log(
norm.pdf(
data_subset[:, signal_sample_idx],
*signal_params[:-2],
loc=signal_params[-2],
scale=signal_params[-1]
)
+ eps
)
signal_noise_params = signal_params_dict[noise_sample_idx]
log_noise_signal_probs = np.log(
norm.pdf(
data_subset[:, noise_sample_idx],
*signal_noise_params[:-2],
loc=signal_noise_params[-2],
scale=signal_noise_params[-1]
)
+ eps
)
log_noise_noise_probs = np.log(
norm.pdf(
data_subset[:, noise_sample_idx],
*noise_params[:-2],
loc=noise_params[-2],
scale=noise_params[-1]
)
+ eps
)
log_signal_noise_probs = np.log(
norm.pdf(
data_subset[:, signal_sample_idx],
*noise_params[:-2],
loc=noise_params[-2],
scale=noise_params[-1]
)
+ eps
)
probs_of_negative = np.sum(
[log_noise_noise_probs, log_signal_noise_probs], axis=0
)
probs_of_singlet = np.sum(
[log_noise_noise_probs, log_signal_signal_probs], axis=0
)
probs_of_doublet = np.sum(
[log_noise_signal_probs, log_signal_signal_probs], axis=0
)
log_probs_list = [probs_of_negative, probs_of_singlet, probs_of_doublet]
# each cell and each hypothesis probability
for prob_idx, log_prob in enumerate(log_probs_list):
log_likelihoods_for_each_hypothesis[indices, prob_idx] = log_prob
return log_likelihoods_for_each_hypothesis, all_indices, counter_to_barcode_combo
def _calculate_bayes_rule(data, priors, number_of_noise_barcodes):
"""
Calculate bayes rule from log likelihoods
Parameters
----------
data : np.array
Anndata object filled only with hashing counts
priors : list,
a list of your prior for each hypothesis
first element is your prior for the negative hypothesis
second element is your prior for the singlet hypothesis
third element is your prior for the doublet hypothesis
We use [0.01, 0.8, 0.19] by default because we assume the barcodes
in your cell hashing matrix are those cells which have passed QC
in the transcriptome space, e.g. UMI counts, pct mito reads, etc.
number_of_noise_barcodes : int
number of barcodes to used to calculated noise distribution
Returns
-------
bayes_dict_results : dict
'most_likely_hypothesis' key is a 1d np.array of the most likely hypothesis
'probs_hypotheses' key is a 2d np.array probability of each hypothesis
'log_likelihoods_for_each_hypothesis' key is a 2d np.array log likelihood of each hypothesis
"""
priors = np.array(priors)
log_likelihoods_for_each_hypothesis, _, _ = _calculate_log_likelihoods(
data, number_of_noise_barcodes
)
probs_hypotheses = (
np.exp(log_likelihoods_for_each_hypothesis)
* priors
/ np.sum(
np.multiply(np.exp(log_likelihoods_for_each_hypothesis), priors), axis=1
)[:, None]
)
most_likely_hypothesis = np.argmax(probs_hypotheses, axis=1)
return {
"most_likely_hypothesis": most_likely_hypothesis,
"probs_hypotheses": probs_hypotheses,
"log_likelihoods_for_each_hypothesis": log_likelihoods_for_each_hypothesis,
}
def _get_clusters(clustering_data: anndata.AnnData, resolutions: list):
"""
Principled cell clustering
Parameters
----------
cell_hashing_adata : anndata.AnnData
Anndata object filled only with hashing counts
resolutions : list
clustering resolutions for leiden
Returns
-------
np.ndarray
leiden clustering results for each cell
"""
sc.pp.normalize_per_cell(clustering_data, counts_per_cell_after=1e4)
sc.pp.log1p(clustering_data)
sc.pp.highly_variable_genes(
clustering_data, min_mean=0.0125, max_mean=3, min_disp=0.5
)
clustering_data = clustering_data[:, clustering_data.var["highly_variable"]]
sc.pp.scale(clustering_data, max_value=10)
sc.tl.pca(clustering_data, svd_solver="arpack")
sc.pp.neighbors(clustering_data, n_neighbors=10, n_pcs=40)
sc.tl.umap(clustering_data)
best_ch_score = -np.inf
for resolution in resolutions:
sc.tl.leiden(clustering_data, resolution=resolution)
ch_score = calinski_harabasz_score(
clustering_data.X, clustering_data.obs["leiden"]
)
if ch_score > best_ch_score:
clustering_data.obs["best_leiden"] = clustering_data.obs["leiden"].values
best_ch_score = ch_score
return clustering_data.obs["best_leiden"].values
def hashsolo(
cell_hashing_adata: anndata.AnnData,
priors: list = [0.01, 0.8, 0.19],
pre_existing_clusters: str = None,
clustering_data: anndata.AnnData = None,
resolutions: list = [0.1, 0.25, 0.5, 0.75, 1],
number_of_noise_barcodes: int = None,
inplace: bool = True,
):
"""Demultiplex cell hashing dataset using HashSolo method
Parameters
----------
cell_hashing_adata : anndata.AnnData
Anndata object filled only with hashing counts
priors : list,
a list of your prior for each hypothesis
first element is your prior for the negative hypothesis
second element is your prior for the singlet hypothesis
third element is your prior for the doublet hypothesis
We use [0.01, 0.8, 0.19] by default because we assume the barcodes
in your cell hashing matrix are those cells which have passed QC
in the transcriptome space, e.g. UMI counts, pct mito reads, etc.
clustering_data : anndata.AnnData
transcriptional data for clustering
resolutions : list
clustering resolutions for leiden
pre_existing_clusters : str
column in cell_hashing_adata.obs for how to break up demultiplexing
inplace : bool
To do operation in place
Returns
-------
cell_hashing_adata : AnnData
if inplace is False returns AnnData with demultiplexing results
in .obs attribute otherwise does is in place
"""
if issparse(cell_hashing_adata.X):
cell_hashing_adata.X = np.array(cell_hashing_adata.X.todense())
if clustering_data is not None:
print(
"This may take awhile we are running clustering at {} different resolutions".format(
len(resolutions)
)
)
if not all(clustering_data.obs_names == cell_hashing_adata.obs_names):
raise ValueError(
"clustering_data and cell hashing cell_hashing_adata must have same index"
)
cell_hashing_adata.obs["best_leiden"] = _get_clusters(
clustering_data, resolutions
)
data = cell_hashing_adata.X
num_of_cells = cell_hashing_adata.shape[0]
results = pd.DataFrame(
np.zeros((num_of_cells, 6)),
columns=[
"most_likely_hypothesis",
"probs_hypotheses",
"cluster_feature",
"negative_hypothesis_probability",
"singlet_hypothesis_probability",
"doublet_hypothesis_probability",
],
index=cell_hashing_adata.obs_names,
)
if clustering_data is not None or pre_existing_clusters is not None:
cluster_features = (
"best_leiden" if pre_existing_clusters is None else pre_existing_clusters
)
unique_cluster_features = np.unique(cell_hashing_adata.obs[cluster_features])
for cluster_feature in unique_cluster_features:
cluster_feature_bool_vector = (
cell_hashing_adata.obs[cluster_features] == cluster_feature
)
posterior_dict = _calculate_bayes_rule(
| |
self._entity_data:
return self._entity_data.get('damage')
return "0"
@property
def LightningStart(self):
if "LightningStart" in self._entity_data:
return self._entity_data.get('LightningStart')
return ""
@property
def LightningEnd(self):
if "LightningEnd" in self._entity_data:
return self._entity_data.get('LightningEnd')
return ""
@property
def decalname(self):
if "decalname" in self._entity_data:
return self._entity_data.get('decalname')
return "Bigshot"
@property
def HDRColorScale(self):
if "HDRColorScale" in self._entity_data:
return float(self._entity_data.get('HDRColorScale'))
return float(1.0)
@property
def targetpoint(self):
if "targetpoint" in self._entity_data:
return parse_int_vector(self._entity_data.get('targetpoint'))
return parse_int_vector("0 0 0")
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Start On': (1, 0), 'Toggle': (2, 0), 'Random Strike': (4, 0), 'Ring': (8, 0),
'StartSparks': (16, 0), 'EndSparks': (32, 0), 'Decal End': (64, 0),
'Shade Start': (128, 0), 'Shade End': (256, 0), 'Taper Out': (512, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def TouchType(self):
if "TouchType" in self._entity_data:
return self._entity_data.get('TouchType')
return "0"
@property
def ClipStyle(self):
if "ClipStyle" in self._entity_data:
return self._entity_data.get('ClipStyle')
return "0"
@property
def filtername(self):
if "filtername" in self._entity_data:
return self._entity_data.get('filtername')
return None
class env_beverage(Targetname, Parentname):
pass
@property
def health(self):
if "health" in self._entity_data:
return int(self._entity_data.get('health'))
return int(10)
@property
def beveragetype(self):
if "beveragetype" in self._entity_data:
return self._entity_data.get('beveragetype')
return "0"
class env_funnel(Targetname, Parentname):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Reverse': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class env_blood(Targetname, Parentname):
pass
@property
def spraydir(self):
if "spraydir" in self._entity_data:
return parse_int_vector(self._entity_data.get('spraydir'))
return parse_int_vector("0 0 0")
@property
def color(self):
if "color" in self._entity_data:
return self._entity_data.get('color')
return "0"
@property
def amount(self):
if "amount" in self._entity_data:
return self._entity_data.get('amount')
return "100"
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Random Direction': (1, 0), 'Blood Stream': (2, 0), 'On Player': (4, 0),
'Spray decals': (8, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class env_bubbles(Targetname, Parentname):
pass
@property
def density(self):
if "density" in self._entity_data:
return int(self._entity_data.get('density'))
return int(2)
@property
def frequency(self):
if "frequency" in self._entity_data:
return int(self._entity_data.get('frequency'))
return int(2)
@property
def current(self):
if "current" in self._entity_data:
return int(self._entity_data.get('current'))
return int(0)
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Start Off': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class env_explosion(Targetname, Parentname, RenderModeChoices):
pass
icon_sprite = "editor/env_explosion.vmat"
@property
def iMagnitude(self):
if "iMagnitude" in self._entity_data:
return int(self._entity_data.get('iMagnitude'))
return int(100)
@property
def iRadiusOverride(self):
if "iRadiusOverride" in self._entity_data:
return int(self._entity_data.get('iRadiusOverride'))
return int(0)
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'No Damage': (1, 0), 'Repeatable': (2, 0), 'No Decal': (16, 0), 'No Sound': (64, 0),
'Damage above water surface only': (8192, 0), 'Generic damage': (16384, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def explosion_type(self):
if "explosion_type" in self._entity_data:
return self._entity_data.get('explosion_type')
return ""
@property
def explosion_custom_effect(self):
if "explosion_custom_effect" in self._entity_data:
return self._entity_data.get('explosion_custom_effect')
return None
@property
def explosion_custom_sound(self):
if "explosion_custom_sound" in self._entity_data:
return self._entity_data.get('explosion_custom_sound')
return ""
@property
def ignoredEntity(self):
if "ignoredEntity" in self._entity_data:
return self._entity_data.get('ignoredEntity')
return None
@property
def ignoredClass(self):
if "ignoredClass" in self._entity_data:
return int(self._entity_data.get('ignoredClass'))
return int(0)
class env_smoketrail(Targetname, Parentname):
pass
@property
def opacity(self):
if "opacity" in self._entity_data:
return float(self._entity_data.get('opacity'))
return float(0.75)
@property
def spawnrate(self):
if "spawnrate" in self._entity_data:
return float(self._entity_data.get('spawnrate'))
return float(20)
@property
def lifetime(self):
if "lifetime" in self._entity_data:
return float(self._entity_data.get('lifetime'))
return float(5.0)
@property
def startcolor(self):
if "startcolor" in self._entity_data:
return parse_int_vector(self._entity_data.get('startcolor'))
return parse_int_vector("192 192 192")
@property
def endcolor(self):
if "endcolor" in self._entity_data:
return parse_int_vector(self._entity_data.get('endcolor'))
return parse_int_vector("160 160 160")
@property
def emittime(self):
if "emittime" in self._entity_data:
return float(self._entity_data.get('emittime'))
return float(0)
@property
def minspeed(self):
if "minspeed" in self._entity_data:
return float(self._entity_data.get('minspeed'))
return float(10)
@property
def maxspeed(self):
if "maxspeed" in self._entity_data:
return float(self._entity_data.get('maxspeed'))
return float(20)
@property
def mindirectedspeed(self):
if "mindirectedspeed" in self._entity_data:
return float(self._entity_data.get('mindirectedspeed'))
return float(0)
@property
def maxdirectedspeed(self):
if "maxdirectedspeed" in self._entity_data:
return float(self._entity_data.get('maxdirectedspeed'))
return float(0)
@property
def startsize(self):
if "startsize" in self._entity_data:
return float(self._entity_data.get('startsize'))
return float(15)
@property
def endsize(self):
if "endsize" in self._entity_data:
return float(self._entity_data.get('endsize'))
return float(50)
@property
def spawnradius(self):
if "spawnradius" in self._entity_data:
return float(self._entity_data.get('spawnradius'))
return float(15)
@property
def firesprite(self):
if "firesprite" in self._entity_data:
return self._entity_data.get('firesprite')
return "sprites/firetrail.spr"
@property
def smokesprite(self):
if "smokesprite" in self._entity_data:
return self._entity_data.get('smokesprite')
return "sprites/whitepuff.spr"
class env_physexplosion(Targetname, Parentname):
pass
icon_sprite = "editor/env_physexplosion.vmat"
@property
def magnitude(self):
if "magnitude" in self._entity_data:
return self._entity_data.get('magnitude')
return "100"
@property
def radius(self):
if "radius" in self._entity_data:
return self._entity_data.get('radius')
return "0"
@property
def targetentityname(self):
if "targetentityname" in self._entity_data:
return self._entity_data.get('targetentityname')
return ""
@property
def explodeonspawn(self):
if "explodeonspawn" in self._entity_data:
return bool(self._entity_data.get('explodeonspawn'))
return bool(None)
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'No Damage - Only Force': (1, 1), 'Push players': (2, 0),
'Push radially - not as a sphere': (4, 0), 'Test LOS before pushing': (8, 0),
'Disorient player if pushed': (16, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def inner_radius(self):
if "inner_radius" in self._entity_data:
return float(self._entity_data.get('inner_radius'))
return float(0)
@property
def pushscale(self):
if "pushscale" in self._entity_data:
return float(self._entity_data.get('pushscale'))
return float(1)
@property
def ConvertToDebrisWhenPossible(self):
if "ConvertToDebrisWhenPossible" in self._entity_data:
return self._entity_data.get('ConvertToDebrisWhenPossible')
return "0"
class env_physimpact(Targetname, Parentname):
pass
icon_sprite = "editor/env_physexplosion.vmat"
@property
def angles(self):
if "angles" in self._entity_data:
return self._entity_data.get('angles')
return "0 0 0"
@property
def magnitude(self):
if "magnitude" in self._entity_data:
return int(self._entity_data.get('magnitude'))
return int(100)
@property
def distance(self):
if "distance" in self._entity_data:
return int(self._entity_data.get('distance'))
return int(0)
@property
def directionentityname(self):
if "directionentityname" in self._entity_data:
return self._entity_data.get('directionentityname')
return ""
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'No fall-off': (1, 0), 'Infinite Length': (2, 0), 'Ignore Mass': (4, 0),
'Ignore Surface Normal When Applying Force': (8, 1)}.items():
if value & key > 0:
flags.append(name)
return flags
class env_fire(Targetname, Parentname, EnableDisable):
pass
icon_sprite = "editor/env_fire"
@property
def health(self):
if "health" in self._entity_data:
return int(self._entity_data.get('health'))
return int(30)
@property
def firesize(self):
if "firesize" in self._entity_data:
return int(self._entity_data.get('firesize'))
return int(64)
@property
def fireattack(self):
if "fireattack" in self._entity_data:
return int(self._entity_data.get('fireattack'))
return int(4)
@property
def firetype(self):
if "firetype" in self._entity_data:
return self._entity_data.get('firetype')
return "0"
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Infinite Duration': (1, 0), 'Smokeless': (2, 0), 'Start On': (4, 0),
'Start Full': (8, 0), "Don't drop": (16, 0), 'No glow': (32, 0),
'Delete when out': (128, 0), 'Visible from above': (256, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def ignitionpoint(self):
if "ignitionpoint" in self._entity_data:
return float(self._entity_data.get('ignitionpoint'))
return float(32)
@property
def damagescale(self):
if "damagescale" in self._entity_data:
return float(self._entity_data.get('damagescale'))
return float(1.0)
class env_firesource(Targetname, Parentname):
pass
icon_sprite = "editor/env_firesource"
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Start On': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def fireradius(self):
if "fireradius" in self._entity_data:
return float(self._entity_data.get('fireradius'))
return float(128)
@property
def firedamage(self):
if "firedamage" in self._entity_data:
return float(self._entity_data.get('firedamage'))
return float(10)
class env_firesensor(Targetname, Parentname):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Start On': (1, 1)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def fireradius(self):
if "fireradius" in self._entity_data:
return float(self._entity_data.get('fireradius'))
return float(128)
@property
def heatlevel(self):
if "heatlevel" in self._entity_data:
return float(self._entity_data.get('heatlevel'))
return float(32)
@property
def heattime(self):
if "heattime" in self._entity_data:
return float(self._entity_data.get('heattime'))
return float(0)
class env_entity_igniter(Targetname):
pass
@property
def target(self):
if "target" in self._entity_data:
return self._entity_data.get('target')
return None
@property
def lifetime(self):
if "lifetime" in self._entity_data:
return float(self._entity_data.get('lifetime'))
return float(10)
class env_fog_controller(Targetname, SystemLevelChoice):
pass
icon_sprite = "materials/editor/env_fog_controller.vmat"
@property
def fogenable(self):
if "fogenable" in self._entity_data:
return bool(self._entity_data.get('fogenable'))
return bool(1)
@property
def fogblend(self):
if "fogblend" in self._entity_data:
return bool(self._entity_data.get('fogblend'))
return bool(0)
@property
def use_angles(self):
if "use_angles" in self._entity_data:
return bool(self._entity_data.get('use_angles'))
return bool(0)
@property
def fogcolor(self):
if "fogcolor" in self._entity_data:
return parse_int_vector(self._entity_data.get('fogcolor'))
return parse_int_vector("255 255 255")
@property
def fogcolor2(self):
if "fogcolor2" in self._entity_data:
return parse_int_vector(self._entity_data.get('fogcolor2'))
return parse_int_vector("255 255 255")
@property
def fogdir(self):
if "fogdir" in self._entity_data:
return | |
<reponame>atruszkowska/NR-population-revac
# ------------------------------------------------------------------
#
# Module for generation of households in an ABM population
#
# ------------------------------------------------------------------
import math, copy
import random, warnings
# import abm_utils as aut
class Households(object):
''' Class for generation of households '''
def __init__(self, n_tot, fres, res_map=None, funit=None):
''' Generate individual households from input data '''
#
# Household is defined as a single living unit
# fr_vacant will not be occupied but will be stored and reported
#
# n_tot - total number of households according to census
# fres - name of the file with residential data, first line assumed header
# res_map - name of the file with data for mapping residential types
# funit - multiunit stats file for creating multiunit buildings from ArcGIS
#
# Total number of households
self.ntot = n_tot
# Number of units per floor
self.n_u_fl = 0
# Data
# Buildings
self.res_buildings = []
# Households
self.households = []
# Households without retirement homes if merging
self.houses_no_ret = []
# map for validation
self.test_map = {}
if funit is None:
# Additional Data - Mapped Types
self.res_map = {}
# Load the building data and the type map
self.read_gis_data(fres)
self.read_gis_types(res_map)
# Count and create the households
self.create_households()
else:
# Additional data - unit distribution map
self.unit_map = {}
# Load building data and map type
self.read_gis_data_arcgis(fres)
self.read_unit_stats(funit)
# Count and create the households
self.create_households_arcgis()
def read_gis_data(self, fname):
''' Load GIS data on residential buidlings from a file '''
with open(fname, 'r') as fin:
# Skip the header
next(fin)
for line in fin:
temp = {}
line = line.strip().split()
# Common information
temp['type'] = line[0]
temp['lon'] = float(line[2])
temp['lat'] = float(line[1])
temp['is_business_multi'] = False
# Units/floors
if line[0] == 'B':
temp['floors'] = int(line[3])
temp['units'] = 0
elif line[0] == 'D':
temp['floors'] = 0
temp['units'] = int(line[3])
elif line[0] == 'A':
temp['floors'] = 0
temp['units'] = 0
elif line[0] == 'C':
# Check if and apartment building (ignoring townhouses)
if (len(line) > 3) and (line[3].isdigit()):
temp['floors'] = int(line[3])
temp['units'] = 0
temp['is_business_multi'] = True
else:
temp['floors'] = 0
temp['units'] = 0
else:
raise ValueError('Wrong type of residential building in the input')
self.res_buildings.append(temp)
def read_gis_data_arcgis(self, fname):
''' Load residential buildings collected with the ArcGIS approach '''
with open(fname, 'r') as fin:
# Skip header line
next(fin)
for line in fin:
temp = {}
line = line.strip().split()
# Add line data to temp
temp['type'] = line[0]
temp['lon'] = float(line[1])
temp['lat'] = float(line[2])
# Add data from temp to buildings list
self.res_buildings.append(temp)
def read_gis_types(self, fname):
''' Loads a map with GIS residential building types and descriptions '''
with open(fname, 'r') as fin:
for line in fin:
line = line.strip().split()
self.res_map[line[0]] = (' ').join(line[2:])
def read_unit_stats(self, fname):
''' loads a map with housing unit distribution'''
with open(fname, 'r') as fin:
for line in fin:
line = line.strip().split()
self.unit_map[line[0]] = line[1]
def create_households_arcgis(self):
''' Create households (including multi-unit buildings) using ArcGIS approach '''
# Test counters
test_count = 0
excess = 0
# Flag to switch to all town random choice
ran_out = False
ID = 1
# --- Creating 1 unit households
# Get number of 1 unit households from unit map - 'hh' for households
hh = int(self.unit_map['1_detached']) + int(self.unit_map['1_attached']) + int(self.unit_map['mobile']) + int(
self.unit_map['other'])
# Check list length is greater than or equal to than sample size
if hh <= len(self.res_buildings):
res_sample = random.sample(range(0, len(self.res_buildings)), hh)
# Add directly to database
for bID in res_sample:
self.add_household(ID, self.res_buildings[bID])
ID += 1
test_count += 1
self.test_map['1_unit'] = test_count
test_count = 0
# Removing 1 unit buildings from res_buildings
self.res_buildings = [x for ind, x in enumerate(self.res_buildings) if ind not in res_sample]
else:
raise Exception("Error! 1-unit sample size is greater than residential buildings list")
# --- Creating 2 unit households and households with ranges
excludeList = ['1_attached', '1_detached', '20+', 'mobile', 'other']
# In case adding randomly
multi_unit_buildings = copy.deepcopy(self.res_buildings)
for key, value in self.unit_map.items():
if key not in excludeList:
# Get the number of units per building
units = int(key)
# Get the number of households
hh = int(value)
# Calculate the number of buildings with this many households
nbuild = math.floor(hh / units)
# Randomly assign
if 0 < nbuild <= len(self.res_buildings):
res_sample = random.sample(range(0, len(self.res_buildings)), nbuild)
for bID in res_sample:
# Duplicate entries for each building based on number of units in the building
for unit in range(units):
self.add_household(ID, self.res_buildings[bID])
ID += 1
# Increment debugging counter to get number of units added
test_count += 1
self.test_map[units] = test_count
# Keep track of remainder to add to 20+
excess += hh - test_count
test_count = 0
# Delete multi-unit buildings from res_buildings
self.res_buildings = [x for ind, x in enumerate(self.res_buildings) if ind not in res_sample]
else:
warnings.warn("Number of multi-unit buildings less than requested, adding randomly")
res_sample = random.sample(range(0, len(multi_unit_buildings)), nbuild)
for bID in res_sample:
# Duplicate entries for each building based on number of units in the building
for unit in range(units):
self.add_household(ID, multi_unit_buildings[bID])
ID += 1
# Increment debugging counter to get number of units added
test_count += 1
self.test_map[units] = test_count
# Keep track of remainder to add to 20+
ran_out = True
excess += hh - test_count
test_count = 0
# --- Creating 20+ unit buildings
units = 20
# counter for 20+ unit households
count_20 = 0
# Get the target number of households in buildings with more than 20 units
hh = int(self.unit_map['20+'])
# Get the maximum number of buildings to be sampled
nbuild = math.floor(hh/units)
# Get a random sample of indices
res_sample = random.sample(range(0, len(self.res_buildings)), nbuild)
# Dump all buildings that are not included in the sample
self.res_buildings = [x for ind, x in enumerate(self.res_buildings) if ind in res_sample]
# Add units sequentially while the total households added is below the target
while count_20 < (hh+excess):
if ran_out == False:
for building in self.res_buildings:
self.add_household(ID, building)
# Track number of units for each 20+ building
if 'n_units' in building:
building['n_units'] += 1
else:
building['n_units'] = 1
# Increment ID and unit counter
ID += 1
count_20 += 1
# Break if target reached
if (count_20+excess) >= hh:
break
else:
# No more buildings left, distribute randomly across all multiunit
random.shuffle(multi_unit_buildings)
for building in multi_unit_buildings:
self.add_household(ID, building)
# Track number of units for each 20+ building
if 'n_units' in building:
building['n_units'] += 1
else:
building['n_units'] = 1
# Increment ID and unit counter
ID += 1
count_20 += 1
# Break if target reached
if (count_20+excess) >= hh:
break
# Store number of units added for testing
self.test_map['20+'] = count_20-excess
self.test_map['remainder added to 20+ buildings'] = excess
def create_households(self):
''' Create and store all the households '''
# Count all known households and floors
self.n_u_fl, n_acd = self.compute_units_per_floor()
# Create individual households
temp = {}
ID = 1
# Total number in type B (multiunit buildings)
cur_B = 0
for building in self.res_buildings:
if (building['type'] == 'A'):
# Single household residences
self.add_household(ID, building)
ID += 1
elif building['type'] == 'D':
# Townhouses
for unit in range(building['units']):
self.add_household(ID, building)
ID += 1
elif building['type'] == 'B':
# Apartment buildings/multi unit buildings
n_units_bld = building['floors'] * self.n_u_fl
cur_B += n_units_bld
for unit in range(n_units_bld):
self.add_household(ID, building)
ID += 1
elif building['type'] == 'C':
if building['floors'] == 0:
# Single household with a business
self.add_household(ID, building)
ID += 1
else:
# Apartment complex with a business
n_units_bld = building['floors'] * self.n_u_fl
cur_B += n_units_bld
for unit in range(n_units_bld):
self.add_household(ID, building)
ID += 1
else:
raise ValueError('Wrong type of residential building in the input')
# Correct for rounding - adds a unit per building until number of
# households equals requested
wanted_B = self.ntot - n_acd
if not (cur_B == wanted_B):
self.add_units(cur_B, wanted_B, ID)
def compute_units_per_floor(self):
''' Returns number of households per building floor
and total number of housholds except for multiunit
buildings '''
n_fl_tot | |
from functools import partial
import inspect
import math
from numpy.testing import assert_allclose
import onnx
import os
import pytest
import tempfile
import torch
import torch.nn.functional as F
from onnxruntime import set_seed
from onnxruntime.capi.ort_trainer import IODescription as Legacy_IODescription,\
ModelDescription as Legacy_ModelDescription,\
LossScaler as Legacy_LossScaler,\
ORTTrainer as Legacy_ORTTrainer
from onnxruntime.training import _utils, amp, checkpoint, optim, orttrainer, TrainStepInfo,\
model_desc_validation as md_val,\
orttrainer_options as orttrainer_options
import _test_commons,_test_helpers
###############################################################################
# Helper functions ############################################################
###############################################################################
def _load_pytorch_transformer_model(device, dynamic_axes=False, legacy_api=False):
# Loads external Pytorch TransformerModel into utils
pytorch_transformer_path = os.path.join('samples', 'python', 'pytorch_transformer')
pt_model_path = os.path.join(pytorch_transformer_path, 'pt_model.py')
pt_model = _utils.import_module_from_file(pt_model_path)
ort_utils_path = os.path.join(pytorch_transformer_path, 'ort_utils.py')
ort_utils = _utils.import_module_from_file(ort_utils_path)
utils_path = os.path.join(pytorch_transformer_path, 'utils.py')
utils = _utils.import_module_from_file(utils_path)
# Modeling
model = pt_model.TransformerModel(28785, 200, 2, 200, 2, 0.2).to(device)
my_loss = ort_utils.my_loss
if legacy_api:
if dynamic_axes:
model_desc = ort_utils.legacy_transformer_model_description_dynamic_axes()
else:
model_desc = ort_utils.legacy_transformer_model_description()
else:
if dynamic_axes:
model_desc = ort_utils.transformer_model_description_dynamic_axes()
else:
model_desc = ort_utils.transformer_model_description()
# Preparing data
train_data, val_data, test_data = utils.prepare_data(device, 20, 20)
return model, model_desc, my_loss, utils.get_batch, train_data, val_data, test_data
###############################################################################
# Testing starts here #########################################################
###############################################################################
@pytest.mark.parametrize("test_input", [
({}),
({'batch': {},
'device': {},
'distributed': {},
'mixed_precision': {},
'utils': {},
'_internal_use': {}})
])
def testORTTrainerOptionsDefaultValues(test_input):
''' Test different ways of using default values for incomplete input'''
expected_values = {
'batch': {
'gradient_accumulation_steps': 1
},
'device': {
'id': 'cuda',
'mem_limit': 0
},
'distributed': {
'world_rank': 0,
'world_size': 1,
'local_rank': 0,
'allreduce_post_accumulation': False,
'deepspeed_zero_optimization': {
'stage' : 0,
},
'enable_adasum': False
},
'lr_scheduler': None,
'mixed_precision': {
'enabled': False,
'loss_scaler': None
},
'graph_transformer': {
'attn_dropout_recompute': False,
'gelu_recompute': False,
'transformer_layer_recompute': False,
'number_recompute_layers': 0
},
'utils': {
'frozen_weights': [],
'grad_norm_clip': True,
'invertible_layer_norm_gradient': False,
'run_symbolic_shape_infer': False
},
'debug': {
'deterministic_compute': False,
'check_model_export': False,
'model_with_training_graph_path': ''
},
'_internal_use': {
'enable_internal_postprocess': True,
'extra_postprocess': None,
'onnx_opset_version' : 12,
'enable_onnx_contrib_ops': True,
}
}
actual_values = orttrainer_options.ORTTrainerOptions(test_input)
assert actual_values._validated_opts == expected_values
@pytest.mark.parametrize("input,error_msg", [
({'mixed_precision': {'enabled': 1}},\
"Invalid options: {'mixed_precision': [{'enabled': ['must be of boolean type']}]}")
])
def testORTTrainerOptionsInvalidMixedPrecisionEnabledSchema(input, error_msg):
'''Test an invalid input based on schema validation error message'''
with pytest.raises(ValueError) as e:
orttrainer_options.ORTTrainerOptions(input)
assert str(e.value) == error_msg
@pytest.mark.parametrize("input_dict,input_dtype,output_dtype", [
({'inputs': [('in0', [])],
'outputs': [('out0', []), ('out1', [])]},(torch.int,),(torch.float,torch.int32,)),
({'inputs': [('in0', ['batch', 2, 3])],
'outputs': [('out0', [], True)]}, (torch.int8,), (torch.int16,)),
({'inputs': [('in0', []), ('in1', [1]), ('in2', [1, 2]), ('in3', [1000, 'dyn_ax1']), ('in4', ['dyn_ax1', 'dyn_ax2', 'dyn_ax3'])],
'outputs': [('out0', [], True), ('out1', [1], False), ('out2', [1, 'dyn_ax1', 3])]},
(torch.float,torch.uint8,torch.bool,torch.double,torch.half,), (torch.float,torch.float,torch.int64))
])
def testORTTrainerModelDescValidSchemas(input_dict, input_dtype, output_dtype):
r''' Test different ways of using default values for incomplete input'''
model_description = md_val._ORTTrainerModelDesc(input_dict)
# Validating hard-coded learning rate description
assert model_description.learning_rate.name == md_val.LEARNING_RATE_IO_DESCRIPTION_NAME
assert model_description.learning_rate.shape == [1]
assert model_description.learning_rate.dtype == torch.float32
# Validating model description from user
for idx, i_desc in enumerate(model_description.inputs):
assert isinstance(i_desc, model_description._InputDescription)
assert len(i_desc) == 2
assert input_dict['inputs'][idx][0] == i_desc.name
assert input_dict['inputs'][idx][1] == i_desc.shape
for idx, o_desc in enumerate(model_description.outputs):
assert isinstance(o_desc, model_description._OutputDescription)
assert len(o_desc) == 3
assert input_dict['outputs'][idx][0] == o_desc.name
assert input_dict['outputs'][idx][1] == o_desc.shape
is_loss = input_dict['outputs'][idx][2] if len(input_dict['outputs'][idx]) == 3 else False
assert is_loss == o_desc.is_loss
# Set all_finite name and check its description
model_description.all_finite = md_val.ALL_FINITE_IO_DESCRIPTION_NAME
assert model_description.all_finite.name == md_val.ALL_FINITE_IO_DESCRIPTION_NAME
assert model_description.all_finite.shape == [1]
assert model_description.all_finite.dtype == torch.bool
# Set loss_scale_input and check its description
model_description.loss_scale_input = md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME
assert model_description.loss_scale_input.name == md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME
assert model_description.loss_scale_input.shape == []
assert model_description.loss_scale_input.dtype == torch.float32
# Append type to inputs/outputs tuples
for idx, i_desc in enumerate(model_description.inputs):
model_description.add_type_to_input_description(idx, input_dtype[idx])
for idx, o_desc in enumerate(model_description.outputs):
model_description.add_type_to_output_description(idx, output_dtype[idx])
# Verify inputs/outputs tuples are replaced by the typed counterparts
for idx, i_desc in enumerate(model_description.inputs):
assert isinstance(i_desc, model_description._InputDescriptionTyped)
assert input_dtype[idx] == i_desc.dtype
for idx, o_desc in enumerate(model_description.outputs):
assert isinstance(o_desc, model_description._OutputDescriptionTyped)
assert output_dtype[idx] == o_desc.dtype
@pytest.mark.parametrize("input_dict,error_msg", [
({'inputs': [(True, [])],
'outputs': [(True, [])]},
"Invalid model_desc: {'inputs': [{0: ['the first element of the tuple (aka name) must be a string']}], "
"'outputs': [{0: ['the first element of the tuple (aka name) must be a string']}]}"),
({'inputs': [('in1', None)],
'outputs': [('out1', None)]},
"Invalid model_desc: {'inputs': [{0: ['the second element of the tuple (aka shape) must be a list']}], "
"'outputs': [{0: ['the second element of the tuple (aka shape) must be a list']}]}"),
({'inputs': [('in1', [])],
'outputs': [('out1', [], None)]},
"Invalid model_desc: {'outputs': [{0: ['the third element of the tuple (aka is_loss) must be a boolean']}]}"),
({'inputs': [('in1', [True])],
'outputs': [('out1', [True])]},
"Invalid model_desc: {'inputs': [{0: ['each shape must be either a string or integer']}], "
"'outputs': [{0: ['each shape must be either a string or integer']}]}"),
({'inputs': [('in1', [])],
'outputs': [('out1', [], True), ('out2', [], True)]},
"Invalid model_desc: {'outputs': [{1: ['only one is_loss can bet set to True']}]}"),
({'inputz': [('in1', [])],
'outputs': [('out1', [], True)]},
"Invalid model_desc: {'inputs': ['required field'], 'inputz': ['unknown field']}"),
({'inputs': [('in1', [])],
'outputz': [('out1', [], True)]},
"Invalid model_desc: {'outputs': ['required field'], 'outputz': ['unknown field']}"),
])
def testORTTrainerModelDescInvalidSchemas(input_dict, error_msg):
r''' Test different ways of using default values for incomplete input'''
with pytest.raises(ValueError) as e:
md_val._ORTTrainerModelDesc(input_dict)
assert str(e.value) == error_msg
def testDynamicLossScaler():
rtol = 1e-7
default_scaler = amp.loss_scaler.DynamicLossScaler()
# Initial state
train_step_info = orttrainer.TrainStepInfo(optim.LambConfig())
assert_allclose(default_scaler.loss_scale, float(1 << 16),
rtol=rtol, err_msg="loss scale mismatch")
assert default_scaler.up_scale_window == 2000
assert_allclose(default_scaler.min_loss_scale, 1.0,
rtol=rtol, err_msg="min loss scale mismatch")
assert_allclose(default_scaler.max_loss_scale, float(
1 << 24), rtol=rtol, err_msg="max loss scale mismatch")
# Performing 9*2000 updates to cover all branches of LossScaler.update(train_step_info.all_finite=True)
loss_scale = float(1 << 16)
for cycles in range(1, 10):
# 1999 updates without overflow produces 1999 stable steps
for i in range(1, 2000):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == i
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg=f"loss scale mismatch at update {i}")
# 2000th update without overflow doubles the loss and zero stable steps until max_loss_scale is reached
new_loss_scale = default_scaler.update(train_step_info)
if cycles <= 8:
loss_scale *= 2
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# After 8 cycles, loss scale should be float(1 << 16)*(2**8)
assert_allclose(new_loss_scale, float(1 << 16)
* (2**8), rtol=rtol, err_msg="loss scale mismatch")
# After 9 cycles, loss scale reaches max_loss_scale and it is not doubled from that point on
loss_scale = float(1 << 16)*(2**8)
for count in range(1, 2050):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == (count % 2000)
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# Setting train_step_info.all_finite = False to test down scaling
train_step_info.all_finite = False
# Performing 24 updates to half the loss scale each time
loss_scale = float(1 << 16)*(2**8)
for count in range(1, 25):
new_loss_scale = default_scaler.update(train_step_info)
loss_scale /= 2
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# After 24 updates with gradient overflow, loss scale is 1.0
assert_allclose(new_loss_scale, 1.,
rtol=rtol, err_msg="loss scale mismatch")
# After 25 updates, min_loss_scale is reached and loss scale is not halfed from that point on
for count in range(1, 5):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
def testDynamicLossScalerCustomValues():
rtol = 1e-7
scaler = amp.loss_scaler.DynamicLossScaler(automatic_update=False,
loss_scale=3,
up_scale_window=7,
min_loss_scale=5,
max_loss_scale=10)
assert scaler.automatic_update == False
assert_allclose(scaler.loss_scale, 3, rtol=rtol,
err_msg="loss scale mismatch")
assert_allclose(scaler.min_loss_scale, 5, rtol=rtol,
err_msg="min loss scale mismatch")
assert_allclose(scaler.max_loss_scale, 10, rtol=rtol,
err_msg="max loss scale mismatch")
assert scaler.up_scale_window == 7
def testTrainStepInfo():
'''Test valid initializations of TrainStepInfo'''
optimizer_config = optim.LambConfig()
fetches=['out1','out2']
step_info = orttrainer.TrainStepInfo(optimizer_config=optimizer_config,
all_finite=False,
fetches=fetches,
optimization_step=123,
step=456)
assert step_info.optimizer_config == optimizer_config
assert step_info.all_finite == False
assert step_info.fetches == fetches
assert step_info.optimization_step == 123
assert step_info.step == 456
step_info = orttrainer.TrainStepInfo(optimizer_config)
assert step_info.optimizer_config == optimizer_config
assert step_info.all_finite == True
assert step_info.fetches == []
assert step_info.optimization_step == 0
assert step_info.step == 0
@pytest.mark.parametrize("invalid_input", [
(-1),
('Hello'),
])
def testTrainStepInfoInvalidInput(invalid_input):
'''Test invalid initialization of TrainStepInfo'''
optimizer_config = optim.LambConfig()
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, all_finite=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, fetches=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, optimization_step=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, step=invalid_input)
@pytest.mark.parametrize("optim_name,lr,alpha,default_alpha", [
('AdamOptimizer', .1, .2, None),
('LambOptimizer', .2, .3, None),
('SGDOptimizer', .3, .4, None),
('SGDOptimizer', .3, .4, .5)
])
def testOptimizerConfig(optim_name, lr, alpha, default_alpha):
'''Test initialization of _OptimizerConfig'''
defaults = {'lr': lr, 'alpha': alpha}
params = [{'params': ['fc1.weight', 'fc2.weight']}]
if default_alpha is not None:
params[0].update({'alpha': default_alpha})
else:
params[0].update({'alpha': alpha})
cfg = optim.config._OptimizerConfig(
name=optim_name, params=params, defaults=defaults)
assert cfg.name == optim_name
rtol = 1e-07
assert_allclose(defaults['lr'],
cfg.lr, rtol=rtol, err_msg="lr mismatch")
# 1:1 mapping between defaults and params's hyper parameters
for param in params:
for k, _ in param.items():
if k != 'params':
assert k in cfg.defaults, "hyper parameter {k} not present in one of the parameter params"
for k, _ in cfg.defaults.items():
for param in cfg.params:
assert k in param, "hyper parameter | |
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
import os
import pprint
import sgtk
HookBaseClass = sgtk.get_hook_baseclass()
class PhotoshopCCDocumentPublishPlugin(HookBaseClass):
"""
Plugin for publishing Photoshop documents in Shotgun.
"""
@property
def icon(self):
"""
Path to an png icon on disk
"""
# look for icon one level up from this hook's folder in "icons" folder
return os.path.join(
self.disk_location,
os.pardir,
"icons",
"publish.png"
)
@property
def name(self):
"""
One line display name describing the plugin
"""
return "Publish to Shotgun"
@property
def description(self):
"""
Verbose, multi-line description of what the plugin does. This can
contain simple html for formatting.
"""
loader_url = "https://support.shotgunsoftware.com/hc/en-us/articles/219033078"
return """
Publishes the file to Shotgun. A <b>Publish</b> entry will be
created in Shotgun which will include a reference to the file's current
path on disk. Other users will be able to access the published file via
the <b><a href='%s'>Loader</a></b> so long as they have access to
the file's location on disk.
If the session has not been saved, validation will fail and a button
will be provided in the logging output to save the file.
<h3>File versioning</h3>
If the filename contains a version number, the process will bump the
file to the next version after publishing.
The <code>version</code> field of the resulting <b>Publish</b> in
Shotgun will also reflect the version number identified in the filename.
The basic worklfow recognizes the following version formats by default:
<ul>
<li><code>filename.v###.ext</code></li>
<li><code>filename_v###.ext</code></li>
<li><code>filename-v###.ext</code></li>
</ul>
After publishing, if a version number is detected in the file, the file
will automatically be saved to the next incremental version number.
For example, <code>filename.v001.ext</code> will be published and copied
to <code>filename.v002.ext</code>
If the next incremental version of the file already exists on disk, the
validation step will produce a warning, and a button will be provided in
the logging output which will allow saving the session to the next
available version number prior to publishing.
<br><br><i>NOTE: any amount of version number padding is supported.</i>
<h3>Overwriting an existing publish</h3>
A file can be published multiple times however only the most recent
publish will be available to other users. Warnings will be provided
during validation if there are previous publishes.
""" % (loader_url,)
# TODO: add link to workflow docs
@property
def settings(self):
"""
Dictionary defining the settings that this plugin expects to receive
through the settings parameter in the accept, validate, publish and
finalize methods.
A dictionary on the following form::
{
"Settings Name": {
"type": "settings_type",
"default": "default_value",
"description": "One line description of the setting"
}
The type string should be one of the data types that toolkit accepts as
part of its environment configuration.
"""
return {
"File Types": {
"type": "list",
"default": "[]",
"description": (
"List of file types to include. Each entry in the list "
"is a list in which the first entry is the Shotgun "
"published file type and subsequent entries are file "
"extensions that should be associated.")
},
}
@property
def item_filters(self):
"""
List of item types that this plugin is interested in.
Only items matching entries in this list will be presented to the
accept() method. Strings can contain glob patters such as *, for example
["maya.*", "file.maya"]
"""
return ["photoshop.document"]
def accept(self, settings, item):
"""
Method called by the publisher to determine if an item is of any
interest to this plugin. Only items matching the filters defined via the
item_filters property will be presented to this method.
A publish task will be generated for each item accepted here. Returns a
dictionary with the following booleans:
- accepted: Indicates if the plugin is interested in this value at
all. Required.
- enabled: If True, the plugin will be enabled in the UI, otherwise
it will be disabled. Optional, True by default.
- visible: If True, the plugin will be visible in the UI, otherwise
it will be hidden. Optional, True by default.
- checked: If True, the plugin will be checked in the UI, otherwise
it will be unchecked. Optional, True by default.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: dictionary with boolean keys accepted, required and enabled
"""
document = item.properties.get("document")
if not document:
self.logger.warn("Could not determine the document for item")
return {"accepted": False}
path = _document_path(document)
if not path:
# the document has not been saved before (no path determined).
# provide a save button. the document will need to be saved before
# validation will succeed.
self.logger.warn(
"The Photoshop document '%s' has not been saved." %
(document.name,),
extra=self._get_save_as_action(document)
)
self.logger.info(
"Photoshop '%s' plugin accepted document: %s." %
(self.name, document.name)
)
return {
"accepted": True,
"checked": True
}
def validate(self, settings, item):
"""
Validates the given item to check that it is ok to publish.
Returns a boolean to indicate validity.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
:returns: True if item is valid, False otherwise.
"""
publisher = self.parent
document = item.properties["document"]
path = _document_path(document)
if not path:
save_error_message = "The Photoshop document '%s' has not been saved." % (document.name,)
# the document still requires saving. provide a save button.
# validation fails.
self.logger.error(
save_error_message,
extra=self._get_save_as_action(document)
)
raise Exception(save_error_message)
# get the path in a normalized state. no trailing separator,
# separators are appropriate for current os, no double separators,
# etc.
sgtk.util.ShotgunPath.normalize(path)
# get the publish name for this file path. this will ensure we get a
# consistent publish name when looking up existing publishes.
publish_name = publisher.util.get_publish_name(path)
# see if there are any other publishes of this path with a status.
# Note the name, context, and path *must* match the values supplied to
# register_publish in the publish phase in order for this to return an
# accurate list of previous publishes of this file.
publishes = publisher.util.get_conflicting_publishes(
item.context,
path,
publish_name,
filters=["sg_status_list", "is_not", None]
)
if publishes:
conflict_info = (
"If you continue, these conflicting publishes will no longer "
"be available to other users via the loader:<br>"
"<pre>%s</pre>" % (pprint.pformat(publishes),)
)
self.logger.warn(
"Found %s conflicting publishes in Shotgun" %
(len(publishes),),
extra={
"action_show_more_info": {
"label": "Show Conflicts",
"tooltip": "Show the conflicting publishes in Shotgun",
"text": conflict_info
}
}
)
# if the file has a version number in it, see if the next version exists
next_version_path = publisher.util.get_next_version_path(path)
if next_version_path and os.path.exists(next_version_path):
# determine the next available version_number. just keep asking for
# the next one until we get one that doesn't exist.
while os.path.exists(next_version_path):
next_version_path = publisher.util.get_next_version_path(
next_version_path)
# now extract the version number of the next available to display
# to the user
version = publisher.util.get_version_number(next_version_path)
engine = publisher.engine
version_error_message = "The next version of this file already exists on disk."
self.logger.error(
version_error_message,
extra={
"action_button": {
"label": "Save to v%s" % (version,),
"tooltip": "Save to the next available version number, "
"v%s" % (version,),
"callback": lambda: engine.save_to_path(document,
next_version_path)
}
}
)
raise Exception(version_error_message)
self.logger.info("A Publish will be created in Shotgun and linked to:")
self.logger.info(" %s" % (path,))
return True
def publish(self, settings, item):
"""
Executes the publish logic for the given item and settings.
:param settings: Dictionary of Settings. The keys are strings, matching
the keys returned in the settings property. The values are `Setting`
instances.
:param item: Item to process
"""
publisher = self.parent
engine = publisher.engine
document | |
<filename>terminis/terminis.py
# -*- coding: utf-8 -*-
import sys
try:
import curses
except ImportError:
sys.exit(
"""This program requires curses.
You can install it on Windows with:
pip install --user windows-curses"""
)
else:
curses.COLOR_ORANGE = curses.COLOR_WHITE
import random
import sched
import time
import os
import locale
import subprocess
try:
import configparser
except ImportError: # Python2
import ConfigParser as configparser
DIR_NAME = "Terminis"
HELP_MSG = """terminis [options]
Tetris clone for terminal
--help\tshow command usage (this message)
--edit\tedit controls in text editor
--reset\treset to default controls settings
--level=n\tstart at level n (integer between 1 and 15)"""
class Rotation:
CLOCKWISE = 1
COUNTERCLOCKWISE = -1
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x+other.x, self.y+other.y)
class Movement:
LEFT = Point(-1, 0)
RIGHT = Point(1, 0)
DOWN = Point(0, 1)
class Scheduler(sched.scheduler, dict):
def __init__(self):
sched.scheduler.__init__(self, time.time, time.sleep)
dict.__init__(self)
def repeat(self, name, delay, action, args=tuple()):
self[name] = sched.scheduler.enter(self, delay, 1, self._repeat, (name, delay, action, args))
def _repeat(self, name, delay, action, args):
del(self[name])
self.repeat(name, delay, action, args)
action(*args)
def single_shot(self, name, delay, action, args=tuple()):
self[name] = sched.scheduler.enter(self, delay, 1, self._single_shot, (name, action, args))
def _single_shot(self, name, action, args):
del(self[name])
action(*args)
def cancel(self, name):
if name in self:
sched.scheduler.cancel(self, self.pop(name))
scheduler = Scheduler()
class Tetromino:
SUPER_ROTATION_SYSTEM = (
{
Rotation.COUNTERCLOCKWISE: (Point(0, 0), Point(1, 0), Point(1, -1), Point(0, 2), Point(1, 2)),
Rotation.CLOCKWISE: (Point(0, 0), Point(-1, 0), Point(-1, -1), Point(0, 2), Point(-1, 2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(0, 0), Point(1, 0), Point(1, 1), Point(0, -2), Point(1, -2)),
Rotation.CLOCKWISE: (Point(0, 0), Point(1, 0), Point(1, 1), Point(0, -2), Point(1, -2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(0, 0), Point(-1, 0), Point(-1, -1), Point(0, 2), Point(-1, 2)),
Rotation.CLOCKWISE: (Point(0, 0), Point(1, 0), Point(1, -1), Point(0, 2), Point(1, 2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(0, 0), Point(-1, 0), Point(-1, 1), Point(0, -2), Point(-1, -2)),
Rotation.CLOCKWISE: (Point(0, 0), Point(-1, 0), Point(-1, 1), Point(0, 2), Point(-1, -2))
}
)
lock_delay = 0.5
fall_delay = 1
color_pair = curses.COLOR_BLACK
def __init__(self, matrix, position):
self.matrix = matrix
self.position = position
self.minoes_positions = self.MINOES_POSITIONS
self.orientation = 0
self.rotation_point_5_used = False
self.rotated_last = False
self.hold_enabled = True
def move_rotate(self, movement, minoes_positions):
potential_position = self.position + movement
if all(
self.matrix.is_free_cell(potential_position+mino_position)
for mino_position in minoes_positions
):
self.position = potential_position
if "lock" in scheduler:
scheduler.cancel("lock")
scheduler.single_shot("lock", self.lock_delay, self.matrix.lock)
return True
else:
return False
def move(self, movement, lock=True, refresh=True):
if self.move_rotate(movement, self.minoes_positions):
self.rotated_last = False
if refresh:
self.matrix.refresh()
return True
else:
if (
lock
and movement == Movement.DOWN
and "lock" not in scheduler
):
scheduler.single_shot("lock", self.lock_delay, self.matrix.lock)
self.matrix.refresh()
return False
def rotate(self, direction):
rotated_minoes_positions = tuple(
Point(-direction*mino_position.y, direction*mino_position.x)
for mino_position in self.minoes_positions
)
for rotation_point, liberty_degree in enumerate(self.SUPER_ROTATION_SYSTEM[self.orientation][direction], start=1):
if self.move_rotate(liberty_degree, rotated_minoes_positions):
self.minoes_positions = rotated_minoes_positions
self.orientation = (self.orientation+direction) % 4
self.rotated_last = True
if rotation_point == 5:
self.rotation_point_5_used = True
self.matrix.refresh()
return True
else:
return False
def soft_drop(self):
if self.move(Movement.DOWN):
self.matrix.game.stats.piece_dropped(1)
def hard_drop(self):
lines = 0
while self.move(Movement.DOWN, lock=False, refresh=False):
lines += 2
self.matrix.refresh()
self.matrix.game.stats.piece_dropped(lines)
self.matrix.lock()
def fall(self):
self.move(Movement.DOWN)
def t_spin(self):
return ""
class O(Tetromino):
SUPER_ROTATION_SYSTEM = tuple()
MINOES_POSITIONS = (Point(0, 0), Point(1, 0), Point(0, -1), Point(1, -1))
COLOR = curses.COLOR_YELLOW
def rotate(self, direction):
return False
class I(Tetromino):
SUPER_ROTATION_SYSTEM = (
{
Rotation.COUNTERCLOCKWISE: (Point(0, 1), Point(-1, 1), Point(2, 1), Point(-1, -1), Point(2, 2)),
Rotation.CLOCKWISE: (Point(1, 0), Point(-1, 0), Point(2, 0), Point(-1, 1), Point(2, -2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(-1, 0), Point(1, 0), Point(-2, 0), Point(1, -1), Point(-2, 2)),
Rotation.CLOCKWISE: (Point(0, 1), Point(-1, 1), Point(2, 1), Point(-1, -1), Point(2, 2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(0, -1), Point(1, -1), Point(-2, -1), Point(1, 1), Point(-2, -2)),
Rotation.CLOCKWISE: (Point(-1, 0), Point(1, 0), Point(-2, 0), Point(1, -1), Point(-2, 2)),
},
{
Rotation.COUNTERCLOCKWISE: (Point(1, 0), Point(-1, 0), Point(2, 0), Point(-1, 1), Point(2, -2)),
Rotation.CLOCKWISE: (Point(0, 1), Point(1, -1), Point(-2, -1), Point(1, 1), Point(-2, -2)),
},
)
MINOES_POSITIONS = (Point(-1, 0), Point(0, 0), Point(1, 0), Point(2, 0))
COLOR = curses.COLOR_CYAN
class T(Tetromino):
MINOES_POSITIONS = (Point(-1, 0), Point(0, 0), Point(0, -1), Point(1, 0))
COLOR = curses.COLOR_MAGENTA
T_SLOT = (Point(-1, -1), Point(1, -1), Point(1, 1), Point(-1, 1))
def t_spin(self):
if self.rotated_last:
a = not self.matrix.is_free_cell(self.position+self.T_SLOT[self.orientation])
b = not self.matrix.is_free_cell(self.position+self.T_SLOT[(1+self.orientation)%4])
c = not self.matrix.is_free_cell(self.position+self.T_SLOT[(3+self.orientation)%4])
d = not self.matrix.is_free_cell(self.position+self.T_SLOT[(2+self.orientation)%4])
if self.rotation_point_5_used or (a and b and (c or d)):
return "T-SPIN"
elif c and d and (a or b):
return "MINI T-SPIN"
return ""
class L(Tetromino):
MINOES_POSITIONS = (Point(-1, 0), Point(0, 0), Point(1, 0), Point(1, -1))
COLOR = curses.COLOR_ORANGE
class J(Tetromino):
MINOES_POSITIONS = (Point(-1, -1), Point(-1, 0), Point(0, 0), Point(1, 0))
COLOR = curses.COLOR_BLUE
class S(Tetromino):
MINOES_POSITIONS = (Point(-1, 0), Point(0, 0), Point(0, -1), Point(1, -1))
COLOR = curses.COLOR_GREEN
class Z(Tetromino):
MINOES_POSITIONS = (Point(-1, -1), Point(0, -1), Point(0, 0), Point(1, 0))
COLOR = curses.COLOR_RED
class Window:
def __init__(self, width, height, begin_x, begin_y):
self.window = curses.newwin(height, width, begin_y, begin_x)
if self.TITLE:
self.title_begin_x = (width-len(self.TITLE)) // 2 + 1
self.piece = None
self.refresh()
def draw_border(self):
self.window.erase()
self.window.border()
if self.TITLE:
self.window.addstr(0, self.title_begin_x, self.TITLE, curses.A_BOLD)
def draw_piece(self):
if self.piece:
if "lock" in scheduler:
attr = self.piece.color_pair | curses.A_BLINK | curses.A_REVERSE
else:
attr = self.piece.color_pair
for mino_position in self.piece.minoes_positions:
position = mino_position + self.piece.position
self.draw_mino(position.x, position.y, attr)
def draw_mino(self, x, y, attr):
if y >= 0:
self.window.addstr(y, x*2+1, "██", attr)
class Matrix(Window):
NB_COLS = 10
NB_LINES = 21
WIDTH = NB_COLS*2+2
HEIGHT = NB_LINES+1
PIECE_POSITION = Point(4, -1)
TITLE = ""
def __init__(self, game, begin_x, begin_y):
begin_x += (game.WIDTH - self.WIDTH) // 2
begin_y += (game.HEIGHT - self.HEIGHT) // 2
self.game = game
self.cells = [
[None for x in range(self.NB_COLS)]
for y in range(self.NB_LINES)
]
self.piece = None
Window.__init__(self, self.WIDTH, self.HEIGHT, begin_x, begin_y)
def refresh(self, paused=False):
self.draw_border()
if paused:
self.window.addstr(11, 9, "PAUSE", curses.A_BOLD)
else:
for y, line in enumerate(self.cells):
for x, color in enumerate(line):
if color is not None:
self.draw_mino(x, y, color)
self.draw_piece()
self.window.refresh()
def is_free_cell(self, position):
return (
0 <= position.x < self.NB_COLS
and position.y < self.NB_LINES
and not (position.y >= 0 and self.cells[position.y][position.x] is not None)
)
def lock(self):
if not self.piece.move(Movement.DOWN):
scheduler.cancel("fall")
t_spin = self.piece.t_spin()
for mino_position in self.piece.minoes_positions:
position = mino_position + self.piece.position
if position.y >= 0:
self.cells[position.y][position.x] = self.piece.color_pair
else:
self.game.over()
return
nb_lines_cleared = 0
for y, line in enumerate(self.cells):
if all(mino for mino in line):
self.cells.pop(y)
self.cells.insert(0, [None for x in range(self.NB_COLS)])
nb_lines_cleared += 1
self.game.stats.piece_locked(nb_lines_cleared, t_spin)
self.piece = None
self.game.new_piece()
class HoldNext(Window):
HEIGHT = 6
PIECE_POSITION = Point(6, 3)
def __init__(self, width, begin_x, begin_y):
Window.__init__(self, width, self.HEIGHT, begin_x, begin_y)
def refresh(self, paused=False):
self.draw_border()
if not paused:
self.draw_piece()
self.window.refresh()
class Hold(HoldNext):
TITLE = "HOLD"
class Next(HoldNext):
TITLE = "NEXT"
class Stats(Window):
SCORES = (
{"name": "", "": 0, "MINI T-SPIN": 1, "T-SPIN": 4},
{"name": "SINGLE", "": 1, "MINI T-SPIN": 2, "T-SPIN": 8},
{"name": "DOUBLE", "": 3, "T-SPIN": 12},
{"name": "TRIPLE", "": 5, "T-SPIN": 16},
{"name": "TETRIS", "": 8}
)
TITLE = "STATS"
FILE_NAME = ".high_score"
if sys.platform == "win32":
DIR_PATH = os.environ.get("appdata", os.path.expanduser("~\Appdata\Roaming"))
else:
DIR_PATH = os.environ.get("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
DIR_PATH = os.path.join(DIR_PATH, DIR_NAME)
FILE_PATH = os.path.join(DIR_PATH, FILE_NAME)
def __init__(self, game, width, height, begin_x, begin_y):
for arg in sys.argv[1:]:
if arg.startswith("--level="):
try:
self.level = int(arg[8:])
except ValueError:
sys.exit(HELP_MSG)
else:
self.level = max(1, self.level)
self.level = min(15, self.level)
self.level -= 1
break
else:
self.level = 0
self.game = game
self.width = width
self.height = height
self.goal = 0
self.score = 0
try:
with open(self.FILE_PATH, "r") as f:
self.high_score = int(f.read())
except:
self.high_score = 0
self.combo = -1
self.time = time.time()
self.lines_cleared = 0
self.strings = []
Window.__init__(self, width, height, begin_x, begin_y)
self.new_level()
def refresh(self):
self.draw_border()
self.window.addstr(2, 2, "SCORE\t{:n}".format(self.score))
if self.score >= self.high_score:
self.window.addstr(3, 2, "HIGH\t{:n}".format(self.high_score), curses.A_BLINK|curses.A_BOLD)
else:
self.window.addstr(3, 2, "HIGH\t{:n}".format(self.high_score))
self.window.addstr(5, 2, "LEVEL\t%d" % self.level)
self.window.addstr(6, 2, "GOAL\t%d" % self.goal)
self.window.addstr(7, 2, "LINES\t%d" % self.lines_cleared)
start_y = self.height - len(self.strings) - 2
for y, string in enumerate(self.strings, start=start_y):
x = (self.width-len(string)) // 2 + 1
self.window.addstr(y, x, string)
self.refresh_time()
def refresh_time(self):
t = time.localtime(time.time() - self.time)
self.window.addstr(4, 2, "TIME\t%02d:%02d:%02d" % (t.tm_hour-1, t.tm_min, t.tm_sec))
self.window.refresh()
def new_level(self):
self.level += 1
if self.level <= 20:
Tetromino.fall_delay = pow(0.8 - ((self.level-1)*0.007), self.level-1)
if self.level > 15:
Tetromino.lock_delay = 0.5 * pow(0.9, self.level-15)
self.goal += 5 * self.level
self.refresh()
def piece_dropped(self, lines):
self.score | |
[x2Prime, x2PrimeRequest, x2PrimeLoad, x2PrimeTimeWasted, x2PrimeTime, x2PrimeReward, x2PrimeCost,
x2FinalReward, x2FinalCost, x2PrimePartiallyServed, x2PrimeServedRequests,
x2PartiallyServedRequestsAllVehicles, x2ServedRequestsAll, isItPossible] = \
updateAllRoutes(x2Prime, x2PrimeRequest, xPrimeTime, tMatrix, allRequests)
elif level == 5: # intra-swap operator
[x2Prime, x2PrimeRequest] = findBestIntraSwap(xPrime, xPrimeRequest, xPrimeTime, xPrimeServed, xPrimeReward,
xPrimeCost, tMatrix, allRequests, numOfRequests)
[x2Prime, x2PrimeRequest, x2PrimeLoad, x2PrimeTimeWasted, x2PrimeTime, x2PrimeReward, x2PrimeCost,
x2FinalReward, x2FinalCost, x2PrimePartiallyServed, x2PrimeServedRequests,
x2PartiallyServedRequestsAllVehicles, x2ServedRequestsAll, isItPossible] = \
updateAllRoutes(x2Prime, x2PrimeRequest, xPrimeTime, tMatrix, allRequests)
elif level == 6: # delete operator
[x2Prime, x2PrimeRequest, x2PrimeProfit] = findBestDeletion(xPrime, xPrimeRequest, xPrimeTime, xPrimeServed,
xPrimeReward, xPrimeCost, tMatrix, allRequests)
[x2Prime, x2PrimeRequest, x2PrimeLoad, x2PrimeTimeWasted, x2PrimeTime, x2PrimeReward, x2PrimeCost,
x2FinalReward, x2FinalCost, x2PrimePartiallyServed, x2PrimeServedRequests,
x2PartiallyServedRequestsAllVehicles, x2ServedRequestsAll, isItPossible] = \
updateAllRoutes(x2Prime, x2PrimeRequest, xPrimeTime, tMatrix, allRequests)
x2PrimeProfit = x2FinalReward - x2FinalCost
xPrimeProfit = sum(xPrimeReward) - sum(xPrimeCost)
if x2PrimeProfit > xPrimeProfit and isItPossible: # if we found an improvement
xPrime = copy.deepcopy(x2Prime)
xPrimeRequest = copy.deepcopy(x2PrimeRequest)
xPrimeTime = copy.deepcopy(x2PrimeTime)
xPrimeTimeWasted = copy.deepcopy(x2PrimeTimeWasted)
xPrimeReward = x2PrimeReward[:]
xPrimeCost = x2PrimeCost[:]
xPrimeLoad = copy.deepcopy(x2PrimeLoad)
xPrimeServed = copy.deepcopy(x2PrimeServedRequests)
xPrimeServedAll = x2ServedRequestsAll[:]
xPrimeFinalReward = x2FinalReward
xPrimeFinalCost = x2FinalCost
level = 1
xProfitPlot.append([time.clock() - start_time, x2PrimeProfit])
else: # if we did not find an improvement, move to the next operator
level += 1
xProfitPlot.append([time.clock() - start_time, xPrimeProfit])
[xPrime, xPrimeRequest, xPrimeLoad, xPrimeTimeWasted, xPrimeTime, xPrimeReward, xPrimeCost, xFinalReward,
xFinalCost, xPrimePartiallyServed, xPrimeServedRequests, xPartiallyServedRequestsAllVehicles, xServedRequestsAll,
isItPossible] = updateAllRoutes(xPrime, xPrimeRequest, xPrimeTime, tMatrix, allRequests)
return xPrime, xPrimeRequest, xPrimeLoad, xPrimeTime, xPrimeServed, xPrimeServedAll, xPrimeReward, xPrimeCost,\
dataOfImages
def checkPrecedenceConstraintsRoute(currentRoute, currentRequests, allRequests):
for i in range(len(currentRequests)):
matches = [k for k, x in enumerate(currentRequests) if x == currentRequests[i]]
# if delivery node is found prior to the pickup node in the request, precedence constraints are violated
if currentRoute[matches[0]] == allRequests[currentRequests[i] - 1].deliveryNode:
return False
return True
def checkCapacityConstraints(consideredRequest, allowedCapacity, totalLoad, allRequests):
# we want to make sure that adding the load of the considered node to the vehicle will not exceed capacity
# load total = load before arriving at node
# allRequests[ ].load = load at considered Node
return totalLoad + allRequests[consideredRequest - 1].load <= allowedCapacity
def checkTimeWindowsConstraints(consideredRequest, consideredNode, currentRoute, tMatrix, allRequests, currentTime):
checked = True # if this is false, that means that the node considered cannot be added due to time windows
currTime = currentTime # this is used as a dummy variable for the total Time of the route
timeWasted = 0 # this is used as an indication of the total waiting time for a customer in a pickup location
e1 = 0
l1 = 0
travelTime = tMatrix[currentRoute[-1] - 1][consideredNode - 1] # travel time from last node visited to next node
currTime += travelTime # add it to total time
# find the time windows of the next node to be added to the route
if consideredNode == allRequests[consideredRequest - 1].pickupNode: # if considered node is a pickup node
e1 = allRequests[consideredRequest - 1].pickupTimeWindowE
l1 = allRequests[consideredRequest - 1].pickupTimeWindowL
elif consideredNode == allRequests[consideredRequest - 1].deliveryNode: # if considered node is a delivery node
e1 = allRequests[consideredRequest - 1].deliveryTimeWindowE
l1 = allRequests[consideredRequest - 1].deliveryTimeWindowL
if currTime < e1: # driver must wait and time is wasted
timeWasted += e1 - currTime
checked = True
elif currTime <= l1: # on time
checked = True
else: # if time window not reached
checked = False
return checked, timeWasted, travelTime
def checkIfRouteIsPossible(currentRoute, consideredRequest, consideredNode, timeTotal, travelTime, timeWasted,
timeLimit, tMatrix, driverFinalDestination, allRequests, partialServedRequests,
driverNumber):
# after time windows, capacity and precedence constraints are satisfied for the considered node, we try to add it
# ensuring that other constraints concerning other nodes (and this one) are not violated
nodesLeftToServe = []
dummyPartiallyServedRequests = partialServedRequests[:]
dummyPartiallyServedRequests.remove(driverNumber) # request of driver is not to be considered (it's the end point)
if type(dummyPartiallyServedRequests) != 'NoneType':
for i in range(len(dummyPartiallyServedRequests)):
# we check which delivery nodes must be visited to complete the requests
nodesLeftToServe.append(allRequests[dummyPartiallyServedRequests[i] - 1].deliveryNode)
if consideredNode != allRequests[consideredRequest - 1].deliveryNode: # if we are considering a pickup node
nodesLeftToServe.append(allRequests[consideredRequest - 1].deliveryNode) # add its corresponding delivery node
dummyPartiallyServedRequests.append(consideredRequest)
# all the possible combinations of the unserved delivery nodes
permuts = list(itertools.permutations(nodesLeftToServe))
permutsRequests = list(itertools.permutations(dummyPartiallyServedRequests))
if permuts == [()]:
return True # if permuts is empty, then we have no unserved delivery nodes
else:
bestTime = float("inf") # no best Time so far, therefore, just put it to infinity for now
for i in range(len(permuts)): # for all permutations
routeCopy = currentRoute[:]
routeCopy.append(consideredNode) # append considered node to the route
# the new time is the previous total Time summed with the time required to add the considered node
theTotalTime = timeTotal + travelTime + timeWasted
dummyTime = theTotalTime
# placing the current permutation in the sequence to be tested for satisfaction of constraints
sequence = permuts[i]
requestSequence = permutsRequests[i]
# if all of these are true, then route is possible w.r.t. time windows
timeWindowConstraints = [False] * len(sequence)
for j in range(len(sequence)): # for each node in sequence
lastNodeVisited = routeCopy[-1] # last node in the route
# travel time from last node visited to the next node in sequence
tij = tMatrix[lastNodeVisited - 1][sequence[j] - 1]
sequenceRequest = requestSequence[j]
# check if (just) the next node is possible w.r.t. time windows to add to the sequence
# also calculate the total time wasted in waiting for this delivery (there should not be any)
[checkTime, theTimeWasted, dummyTravelTime] =\
checkTimeWindowsConstraints(sequenceRequest, sequence[j], routeCopy, tMatrix,
allRequests, dummyTime)
timeWindowConstraints[j] = checkTime
if checkTime == False: # if a node is not possible due to time windows
break # break out of the for (j) loop
dummyTime += tij + theTimeWasted # adding to the total Time
routeCopy.append(sequence[j]) # and appending the next node since it is possible w.r.t time windows
if all(timeWindowConstraints) == True: # if all nodes in the sequence are possible w.r.t. time windows
# we must append the driver's destination point to the route
dummyTime += tMatrix[routeCopy[-1] - 1][driverFinalDestination - 1]
routeCopy.append(driverFinalDestination)
if dummyTime < bestTime: # if we found a better option
bestTime = dummyTime # replace best
# we only return true if there is a combination (sequence) in which the time limits are not violated
return bestTime <= timeLimit
def updateRouteInfo(currentRoute, currentRequest, currentRouteTime, tMatrix, allRequests):
# USED TO UPDATE THE INFORMATION ON A SINGLE ROUTE
# ------------------------------------------------ INITIALIZATION ------------------------------------------------
tempRoute = []
tempRouteRequest = []
tempRouteLoad = []
tempRouteTimeWasted = []
tempRouteTime = []
tempServedRequests = []
tempPartiallyServedRequests = []
tempFinalReward = 0
tempFinalCost = 0
tempRoute.append(currentRoute[0])
tempRouteRequest.append(currentRequest[0])
tempRouteLoad.append(0)
tempRouteTimeWasted.append(0)
tempRouteTime.append(currentRouteTime[0])
tempPartiallyServedRequests.append(currentRequest[0])
# ----------------------------------------------------------------------------------------------------------------
for i in range(1, len(currentRoute)): # testing all the nodes inside the route
# CAPACITY
if allRequests[currentRequest[i] - 1].pickupNode == currentRoute[i]: # if it is a pickup node
tempRouteLoad.append(tempRouteLoad[-1] + allRequests[currentRequest[i] - 1].load) # increase the load
tempPartiallyServedRequests.append(currentRequest[i])
if tempRouteLoad[-1] > capacity: # if capacity constraints have been violated
# exit the function with isItPossible = False
return currentRoute, currentRequest, tempRouteLoad, tempRouteTimeWasted, tempRouteTime,\
tempFinalReward, tempFinalCost, tempPartiallyServedRequests, tempServedRequests, False
# capacity constraints cannot be violated if we are at a delivery node
elif allRequests[currentRequest[i] - 1].deliveryNode == currentRoute[i]:
tempServedRequests.append(currentRequest[i])
tempPartiallyServedRequests.remove(currentRequest[i])
# if request considered is the driver's request, then no reward is given
if currentRequest[i] == currentRequest[0]:
tempRouteLoad.append(0)
else: # update the load and reward
tempRouteLoad.append(tempRouteLoad[-1] - allRequests[currentRequest[i] - 1].load)
tempFinalReward += allRequests[currentRequest[i] - 1].reward # update the reward
# TIME WINDOWS
[timeWindowsCheck, temporaryTimeWasted, timeToTravel] =\
checkTimeWindowsConstraints(currentRequest[i], currentRoute[i], tempRoute, tMatrix,
allRequests, tempRouteTime[-1])
tempRouteTimeWasted.append(temporaryTimeWasted) # update time wasted list
if timeWindowsCheck: # if time windows constraints are not violated
tempRouteTime.append(tempRouteTime[-1] + tempRouteTimeWasted[i] + timeToTravel) # update time list
else: # time windows constraints have been violated
# exit the function with isItPossible = False
return currentRoute, currentRequest, tempRouteLoad, tempRouteTimeWasted, tempRouteTime, tempFinalReward,\
tempFinalCost, tempPartiallyServedRequests, tempServedRequests, False
# we are not considering wasted time as a cost
# cost is calculated to be 10c per minute
tempFinalCost += 0.0016 * timeToTravel
tempRoute.append(currentRoute[i])
tempRouteRequest.append(currentRequest[i])
return tempRoute, tempRouteRequest, tempRouteLoad, tempRouteTimeWasted, tempRouteTime, tempFinalReward,\
tempFinalCost, tempPartiallyServedRequests, tempServedRequests, True
def updateAllRoutes(currentRoute, currentRequest, currentRouteTime, tMatrix, allRequests):
# ------------------------------------------------ INITIALIZATION ------------------------------------------------
tempRoute = []
tempRouteRequest | |
authentication_type: Authentication type.
:type authentication_type: str
:param network_isolation: Optional resource information to enable network isolation for
request.
:type network_isolation: ~azure.mgmt.sql.models.NetworkIsolationSettings
"""
_validation = {
'storage_key_type': {'required': True},
'storage_key': {'required': True},
'storage_uri': {'required': True},
'administrator_login': {'required': True},
'administrator_login_password': {'required': True},
}
_attribute_map = {
'database_name': {'key': 'databaseName', 'type': 'str'},
'edition': {'key': 'edition', 'type': 'str'},
'service_objective_name': {'key': 'serviceObjectiveName', 'type': 'str'},
'max_size_bytes': {'key': 'maxSizeBytes', 'type': 'str'},
'storage_key_type': {'key': 'storageKeyType', 'type': 'str'},
'storage_key': {'key': 'storageKey', 'type': 'str'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
'administrator_login': {'key': 'administratorLogin', 'type': 'str'},
'administrator_login_password': {'key': '<PASSWORD>', 'type': 'str'},
'authentication_type': {'key': 'authenticationType', 'type': 'str'},
'network_isolation': {'key': 'networkIsolation', 'type': 'NetworkIsolationSettings'},
}
def __init__(
self,
**kwargs
):
super(ImportNewDatabaseDefinition, self).__init__(**kwargs)
self.database_name = kwargs.get('database_name', None)
self.edition = kwargs.get('edition', None)
self.service_objective_name = kwargs.get('service_objective_name', None)
self.max_size_bytes = kwargs.get('max_size_bytes', None)
self.storage_key_type = kwargs['storage_key_type']
self.storage_key = kwargs['storage_key']
self.storage_uri = kwargs['storage_uri']
self.administrator_login = kwargs['administrator_login']
self.administrator_login_password = kwargs['administrator_login_password']
self.authentication_type = kwargs.get('authentication_type', None)
self.network_isolation = kwargs.get('network_isolation', None)
class InstanceFailoverGroup(ProxyResource):
"""An instance failover group.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param read_write_endpoint: Read-write endpoint of the failover group instance.
:type read_write_endpoint: ~azure.mgmt.sql.models.InstanceFailoverGroupReadWriteEndpoint
:param read_only_endpoint: Read-only endpoint of the failover group instance.
:type read_only_endpoint: ~azure.mgmt.sql.models.InstanceFailoverGroupReadOnlyEndpoint
:ivar replication_role: Local replication role of the failover group instance. Possible values
include: "Primary", "Secondary".
:vartype replication_role: str or ~azure.mgmt.sql.models.InstanceFailoverGroupReplicationRole
:ivar replication_state: Replication state of the failover group instance.
:vartype replication_state: str
:param partner_regions: Partner region information for the failover group.
:type partner_regions: list[~azure.mgmt.sql.models.PartnerRegionInfo]
:param managed_instance_pairs: List of managed instance pairs in the failover group.
:type managed_instance_pairs: list[~azure.mgmt.sql.models.ManagedInstancePairInfo]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'replication_role': {'readonly': True},
'replication_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'read_write_endpoint': {'key': 'properties.readWriteEndpoint', 'type': 'InstanceFailoverGroupReadWriteEndpoint'},
'read_only_endpoint': {'key': 'properties.readOnlyEndpoint', 'type': 'InstanceFailoverGroupReadOnlyEndpoint'},
'replication_role': {'key': 'properties.replicationRole', 'type': 'str'},
'replication_state': {'key': 'properties.replicationState', 'type': 'str'},
'partner_regions': {'key': 'properties.partnerRegions', 'type': '[PartnerRegionInfo]'},
'managed_instance_pairs': {'key': 'properties.managedInstancePairs', 'type': '[ManagedInstancePairInfo]'},
}
def __init__(
self,
**kwargs
):
super(InstanceFailoverGroup, self).__init__(**kwargs)
self.read_write_endpoint = kwargs.get('read_write_endpoint', None)
self.read_only_endpoint = kwargs.get('read_only_endpoint', None)
self.replication_role = None
self.replication_state = None
self.partner_regions = kwargs.get('partner_regions', None)
self.managed_instance_pairs = kwargs.get('managed_instance_pairs', None)
class InstanceFailoverGroupListResult(msrest.serialization.Model):
"""A list of instance failover groups.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.InstanceFailoverGroup]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[InstanceFailoverGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstanceFailoverGroupListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class InstanceFailoverGroupReadOnlyEndpoint(msrest.serialization.Model):
"""Read-only endpoint of the failover group instance.
:param failover_policy: Failover policy of the read-only endpoint for the failover group.
Possible values include: "Disabled", "Enabled".
:type failover_policy: str or ~azure.mgmt.sql.models.ReadOnlyEndpointFailoverPolicy
"""
_attribute_map = {
'failover_policy': {'key': 'failoverPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstanceFailoverGroupReadOnlyEndpoint, self).__init__(**kwargs)
self.failover_policy = kwargs.get('failover_policy', None)
class InstanceFailoverGroupReadWriteEndpoint(msrest.serialization.Model):
"""Read-write endpoint of the failover group instance.
All required parameters must be populated in order to send to Azure.
:param failover_policy: Required. Failover policy of the read-write endpoint for the failover
group. If failoverPolicy is Automatic then failoverWithDataLossGracePeriodMinutes is required.
Possible values include: "Manual", "Automatic".
:type failover_policy: str or ~azure.mgmt.sql.models.ReadWriteEndpointFailoverPolicy
:param failover_with_data_loss_grace_period_minutes: Grace period before failover with data
loss is attempted for the read-write endpoint. If failoverPolicy is Automatic then
failoverWithDataLossGracePeriodMinutes is required.
:type failover_with_data_loss_grace_period_minutes: int
"""
_validation = {
'failover_policy': {'required': True},
}
_attribute_map = {
'failover_policy': {'key': 'failoverPolicy', 'type': 'str'},
'failover_with_data_loss_grace_period_minutes': {'key': 'failoverWithDataLossGracePeriodMinutes', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(InstanceFailoverGroupReadWriteEndpoint, self).__init__(**kwargs)
self.failover_policy = kwargs['failover_policy']
self.failover_with_data_loss_grace_period_minutes = kwargs.get('failover_with_data_loss_grace_period_minutes', None)
class InstancePool(TrackedResource):
"""An Azure SQL instance pool.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: The name and tier of the SKU.
:type sku: ~azure.mgmt.sql.models.Sku
:param subnet_id: Resource ID of the subnet to place this instance pool in.
:type subnet_id: str
:param v_cores: Count of vCores belonging to this instance pool.
:type v_cores: int
:param license_type: The license type. Possible values are 'LicenseIncluded' (price for SQL
license is included) and 'BasePrice' (without SQL license price). Possible values include:
"LicenseIncluded", "BasePrice".
:type license_type: str or ~azure.mgmt.sql.models.InstancePoolLicenseType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'subnet_id': {'key': 'properties.subnetId', 'type': 'str'},
'v_cores': {'key': 'properties.vCores', 'type': 'int'},
'license_type': {'key': 'properties.licenseType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstancePool, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.subnet_id = kwargs.get('subnet_id', None)
self.v_cores = kwargs.get('v_cores', None)
self.license_type = kwargs.get('license_type', None)
class InstancePoolEditionCapability(msrest.serialization.Model):
"""The instance pool capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The instance pool version name.
:vartype name: str
:ivar supported_families: The supported families.
:vartype supported_families: list[~azure.mgmt.sql.models.InstancePoolFamilyCapability]
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'supported_families': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_families': {'key': 'supportedFamilies', 'type': '[InstancePoolFamilyCapability]'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstancePoolEditionCapability, self).__init__(**kwargs)
self.name = None
self.supported_families = None
self.status = None
self.reason = kwargs.get('reason', None)
class InstancePoolFamilyCapability(msrest.serialization.Model):
"""The instance pool family capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Family name.
:vartype name: str
:ivar supported_license_types: List of supported license types.
:vartype supported_license_types: list[~azure.mgmt.sql.models.LicenseTypeCapability]
:ivar supported_vcores_values: List of supported virtual cores values.
:vartype supported_vcores_values: list[~azure.mgmt.sql.models.InstancePoolVcoresCapability]
:ivar status: The status of the capability. Possible values include: "Visible", "Available",
"Default", "Disabled".
:vartype status: str or ~azure.mgmt.sql.models.CapabilityStatus
:param reason: The reason for the capability not being available.
:type reason: str
"""
_validation = {
'name': {'readonly': True},
'supported_license_types': {'readonly': True},
'supported_vcores_values': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_license_types': {'key': 'supportedLicenseTypes', 'type': '[LicenseTypeCapability]'},
'supported_vcores_values': {'key': 'supportedVcoresValues', 'type': '[InstancePoolVcoresCapability]'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstancePoolFamilyCapability, self).__init__(**kwargs)
self.name = None
self.supported_license_types = None
self.supported_vcores_values = None
self.status = None
self.reason = kwargs.get('reason', None)
class InstancePoolListResult(msrest.serialization.Model):
"""A list of Azure SQL instance pools.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Array of results.
:vartype value: list[~azure.mgmt.sql.models.InstancePool]
:ivar next_link: Link to retrieve next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[InstancePool]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(InstancePoolListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class InstancePoolUpdate(msrest.serialization.Model):
"""An update to an Instance pool.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(InstancePoolUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class InstancePoolVcoresCapability(msrest.serialization.Model):
"""The managed instance virtual cores capability.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The virtual cores identifier.
:vartype name: str
:ivar value: The virtual cores | |
axis=2)
epsilon = 1e-3
m_pred_used = tf.log(m_pred_used + epsilon)
m_total = tf.log(m_total + epsilon)
m_loss = tf.nn.l2_loss(m_total - m_pred_used)
q_loss = tf.nn.l2_loss(
(q_total - q_pred_used) * tf.reduce_sum(q_gates, axis=2))
q_loss /= tf.to_float(batch * length_q)
m_loss /= tf.to_float(batch * length_kv)
# We would like the query groups to be equal sized. The group
# size is discrete, so we need some trick here. We add a loss
# proportional to the product of the group size and the
# predictions for that group. This encourages the predictions to
# decrease for groups that are too big.
q_group_deviation = (q_group_size / q_group_target_size) - 1.0
q_balance_loss = tf.reduce_sum(
tf.reduce_mean(q_pred_biased, axis=1) *
q_group_deviation) / tf.to_float(batch)
m_group_deviation = (m_group_size / m_group_target_size) - 1.0
m_balance_loss = tf.reduce_sum(
tf.reduce_mean(m_pred_biased, axis=1) *
m_group_deviation) / tf.to_float(batch)
# The losses in this function only propagate back to variables
# defined in this function, and the losses outside of this
# function only propagate back to variables outside of this
# function. Assuming some kind of adaptive learning algorithm,
# it should not matter how much we scale the losses in this function.
# Still we scale them down a lot so that they should not show up
# much in the overall loss for the model.
extra_loss_multiplier = 1e-3
extra_loss = q_loss + m_loss + q_balance_loss + m_balance_loss
extra_loss *= extra_loss_multiplier
# Show a bunch of summaries.
if common_layers.should_generate_summaries() and make_image_summary:
tf.summary.histogram("q_group_size", q_group_size)
tf.summary.histogram("m_group_size", m_group_size)
tf.summary.scalar("q_loss", q_loss)
tf.summary.scalar("m_loss", m_loss)
tf.summary.scalar("q_balance_loss", q_balance_loss)
tf.summary.scalar("m_balance_loss", m_balance_loss)
tf.summary.histogram("m_pred_used", m_pred_used)
tf.summary.histogram("m_total", m_total)
tf.summary.histogram("q_pred_used", q_pred_used)
tf.summary.histogram("q_total", q_total)
if make_image_summary:
# image summaries are expensive.
# So we restrict them to head_num<4, query_position<512, batch_index=0.
trunc_heads = min(4, num_heads)
trunc_length_q = tf.minimum(length_q, 512)
# We recompute the attention for the first example, in an inefficient
# way - masking. This lets us show pretty pictures.
# [trunc_heads, length_q, group]
q_gates_trunc = q_gates[:trunc_heads, :trunc_length_q, :]
# [trunc_heads, length_kv, group]
m_gates_trunc = m_gates[:trunc_heads, :, :]
grouping_mask = tf.matmul(
q_gates_trunc, m_gates_trunc, transpose_b=True)
q_trunc = q[:trunc_heads, :trunc_length_q, :]
k_trunc = kv[:trunc_heads, :, :depth_qk]
logits_trunc = tf.matmul(q_trunc, k_trunc, transpose_b=True)
if mask_right:
band = common_layers.ones_matrix_band_part(trunc_length_q, length_kv,
-1, 0)
trunc_bias = tf.expand_dims((1.0 - band) * -1e9, 0)
logits_trunc += trunc_bias
att_trunc = tf.nn.softmax(logits_trunc)
mask_coverage = tf.reduce_sum(grouping_mask * att_trunc) / (
tf.to_float(trunc_length_q) * trunc_heads)
tf.summary.scalar("coverage", mask_coverage)
att_trunc_hdr = tf.pow(att_trunc, 0.2) # for high-dynamic-range
mask_channel = grouping_mask * tf.maximum(att_trunc_hdr, 0.3)
image = tf.stack([att_trunc_hdr, mask_channel, mask_channel], axis=3)
tf.summary.image("att", image, max_outputs=trunc_heads)
# show one group for each head.
att_per_group = tf.expand_dims(weights[:trunc_heads, 0, :, :], -1)
tf.summary.image(
"att_per_group_%d",
tf.pow(att_per_group, 0.2),
max_outputs=trunc_heads)
return o, extra_loss
def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True,
save_weights_to=None,
dropout_broadcast_dims=None):
"""Dot-product attention.
Args:
q: Tensor with shape [..., length_q, depth_k].
k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must
match with q.
v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must
match with q.
bias: bias Tensor (see attention_bias())
dropout_rate: a float.
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
make_image_summary: True if you want an image summary.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than rank of q.
Specifies in which dimensions to broadcast the dropout decisions.
Returns:
Tensor with shape [..., length_q, depth_v].
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv]
if bias is not None:
bias = common_layers.cast_like(bias, logits)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if save_weights_to is not None:
save_weights_to[scope.name] = weights
save_weights_to[scope.name + "/logits"] = logits
# Drop out attention links for each head.
weights = common_layers.dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
if common_layers.should_generate_summaries() and make_image_summary:
attention_image_summary(weights, image_shapes)
return tf.matmul(weights, v)
def _generate_relative_positions_matrix(length, max_relative_position):
"""Generates matrix of relative positions between inputs."""
range_vec = tf.range(length)
range_mat = tf.reshape(tf.tile(range_vec, [length]), [length, length])
distance_mat = range_mat - tf.transpose(range_mat)
distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position,
max_relative_position)
# Shift values to be >= 0. Each integer still uniquely identifies a relative
# position difference.
final_mat = distance_mat_clipped + max_relative_position
return final_mat
def _generate_relative_positions_embeddings(length, depth,
max_relative_position, name):
"""Generates tensor of size [length, length, depth]."""
with tf.variable_scope(name):
relative_positions_matrix = _generate_relative_positions_matrix(
length, max_relative_position)
vocab_size = max_relative_position * 2 + 1
# Generates embedding for each relative position of dimension depth.
embeddings_table = tf.get_variable("embeddings", [vocab_size, depth])
embeddings = tf.gather(embeddings_table, relative_positions_matrix)
return embeddings
def _relative_attention_inner(x, y, z, transpose):
"""Relative position-aware dot-product attention inner calculation.
This batches matrix multiply calculations to avoid unnecessary broadcasting.
Args:
x: Tensor with shape [batch_size, heads, length, length or depth].
y: Tensor with shape [batch_size, heads, length, depth].
z: Tensor with shape [length, length, depth].
transpose: Whether to transpose inner matrices of y and z. Should be true if
last dimension of x is depth, not length.
Returns:
A Tensor with shape [batch_size, heads, length, length or depth].
"""
batch_size = tf.shape(x)[0]
heads = x.get_shape().as_list()[1]
length = tf.shape(x)[2]
# xy_matmul is [batch_size, heads, length, length or depth]
xy_matmul = tf.matmul(x, y, transpose_b=transpose)
# x_t is [length, batch_size, heads, length or depth]
x_t = tf.transpose(x, [2, 0, 1, 3])
# x_t_r is [length, batch_size * heads, length or depth]
x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1])
# x_tz_matmul is [length, batch_size * heads, length or depth]
x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose)
# x_tz_matmul_r is [length, batch_size, heads, length or depth]
x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1])
# x_tz_matmul_r_t is [batch_size, heads, length, length or depth]
x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3])
return xy_matmul + x_tz_matmul_r_t
def dot_product_attention_relative(q,
k,
v,
bias,
max_relative_position,
dropout_rate=0.0,
image_shapes=None,
name=None,
make_image_summary=True):
"""Calculate relative position-aware dot-product self-attention.
The attention calculation is augmented with learned representations for the
relative position between each element in q and each element in k and v.
Args:
q: a Tensor with shape [batch, heads, length, depth].
k: a Tensor with shape [batch, heads, length, depth].
v: a Tensor with shape [batch, heads, length, depth].
bias: bias Tensor.
max_relative_position: an integer specifying the maximum distance between
inputs that unique position embeddings should be learned for.
dropout_rate: a floating point number.
image_shapes: optional tuple of integer scalars.
name: an optional string.
make_image_summary: Whether to make an attention image summary.
Returns:
A Tensor.
Raises:
ValueError: if max_relative_position is not > 0.
"""
if not max_relative_position:
raise ValueError("Max relative position (%s) should be > 0 when using "
"relative self attention." % (max_relative_position))
with tf.variable_scope(
name, default_name="dot_product_attention_relative", values=[q, k, v]):
# This calculation only works for self attention.
# q, k and v must therefore have the same shape.
q.get_shape().assert_is_compatible_with(k.get_shape())
q.get_shape().assert_is_compatible_with(v.get_shape())
# Use separate embeddings suitable for keys and values.
depth = q.get_shape().as_list()[3]
length = common_layers.shape_list(q)[2]
relations_keys = _generate_relative_positions_embeddings(
length, depth, max_relative_position, "relative_positions_keys")
relations_values = _generate_relative_positions_embeddings(
length, depth, max_relative_position, "relative_positions_values")
# Compute self attention considering the relative position embeddings.
logits = _relative_attention_inner(q, k, relations_keys, True)
if bias is not None:
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
weights = tf.nn.dropout(weights, 1.0 - dropout_rate)
if not tf.get_variable_scope().reuse and make_image_summary:
attention_image_summary(weights, image_shapes)
return _relative_attention_inner(weights, v, relations_values, False)
def _relative_position_to_absolute_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position - query_position + length - 1]
The dimensions of the output represent:
[batch, heads, query_position, memory_position]
Only works with masked_attention. Undefined behavior for regions of the
input where memory_position > query_position.
Args:
x: a Tensor with shape [batch, heads, length, length]
Returns:
a Tensor with shape [batch, heads, length, length]
"""
batch, heads, length, _ = common_layers.shape_list(x)
x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]])
x = tf.reshape(x, [batch, heads, 1 + length, length])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
return x
def _absolute_position_to_relative_position_masked(x):
"""Helper to dot_product_self_attention_relative_v2.
Rearrange an attention logits or weights Tensor.
The dimensions of the input represent:
[batch, heads, query_position, memory_position]
The dimensions of | |
k_v, d_v, c_v):
locdata()
elif operation == 'entangle':
if self.entangleaction(time_stamp, periods, k_v, d_v, c_v):
locdata()
elif operation == 'entangle_and_cross_up':
if self.crossup_entangle_action(time_stamp, periods, k_v, d_v, c_v):
locdata()
elif operation == 'entangle_and_cross_up_within_period':
if self.crossup_entangle_period_action(time_stamp, periods, duration, k_v, d_v):
locdata()
elif operation == 'entangle_within_period':
if self.entangle_period_action(time_stamp, periods, duration, k_v, d_v):
locdata()
elif operation == 'divergence_price_lower_and_k_higher':
if self.deviate_price_k_action(time_stamp, duration, k_v):
locdata()
elif operation == 'divergence_price_lower_and_k_higher_simple':
if self.deviate_price_k_s_action(time_stamp, duration, k_v, d_v):
locdata()
else:
logger.error("%s is not supported!" % operation)
else:
ret_valid = False
return ret_valid, ret_value
class OBVAction(ActionBase):
def __init__(self, data: pd.DataFrame, obv_period: int):
super().__init__(data)
self.__obv_p = obv_period
def executeaction(self, **kwargs):
index = kwargs['index']
total_value = 0
ret_valid = False
if index - (self.__obv_p - 1) < 0:
return ret_valid, total_value
for i in range(self.__obv_p):
index_internal = index - i
price_dist = self.high_ticker(index_internal) - self.low_ticker(index_internal)
if price_dist != 0:
total_value += self.volume_ticker(index_internal) * \
((self.close_ticker(index_internal)-self.low_ticker(index_internal))-
(self.high_ticker(index_internal)-self.close_ticker(index_internal)))/price_dist
else:
ret_valid = True
return ret_valid, total_value
class OBVUpACTION(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def __calcv_a_obv(self, index: int, period_a: int, period: int):
ret_valid = False
total_value = 0
obv_indicator = OBVAction(self._data, period)
if index - (period_a - 1) - (period - 1) < 0:
return ret_valid, total_value
for i in range(period_a):
index_interal = index - i
valid, obv_v = obv_indicator.executeaction(index=index_interal)
if not valid:
break
total_value += obv_v
else:
total_value = total_value / period_a
ret_valid = True
return ret_valid, total_value
def executeaction(self, **kwargs):
occurrences = kwargs['occurrence_time']
obv_p = kwargs['obv_period']
obv_a_p = kwargs['obv_a_period']
ret_valid = False
ret_value = pd.DataFrame(columns=columns)
obv_indicator = OBVAction(self._data, obv_p)
oa = occurrences.array
for time_stamp_original in oa:
cur_index = self.getindex(time_stamp_original)
if cur_index is None:
continue
valid1, obv_v = obv_indicator.executeaction(index=cur_index)
if valid1:
valid2, obv_a_v = self.__calcv_a_obv(cur_index, obv_a_p, obv_p)
if valid2:
ret_valid = True
if obv_v > 0 and obv_v > obv_a_v:
row = self._data.loc[time_stamp_original]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp_original, True]
return ret_valid, ret_value
class StrategyBasedOnDayKAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
self.__min_length = 1
def __getCloseTicker(self, index):
valid = False
ret = None
if index < self.__min_length - 1:
logger.error("index is invalid in data for EXPEMA", index)
else:
ret = self.close_ticker(index)
if ret is not None:
valid = True
return valid, ret
def __calcamplitudeavg(self, period: int, percent: float):
ret_valid_t = False
ret_value_t = False
requiredlength = period + 2
if len(self._data) < requiredlength:
return ret_valid_t, ret_value_t
ret_valid_t = True
total: float = 0
for i in range(1, requiredlength-1):
index: int = -1 - i
total += (self.high_ticker(index)-self.low_ticker(index))/self.close_ticker(index-1)
if total/period >= percent/100:
ret_value_t = True
return ret_valid_t, ret_value_t
def __calc_price_kavg(self, k_period, isgreater):
ret_valid_t = False
ret_value_t = False
if len(self._data.index) >= k_period:
start_index = end_index = -1
maaction = MAAction(start_index, end_index, k_period, self._data)
valid_ma, result_ma = maaction.executeaction(fn_ticker=self.close_ticker)
if valid_ma:
ret_valid_t = True
if start_index in result_ma:
tmp_ret = self.close_ticker(start_index) >= result_ma[start_index]
ret_value_t = not (isgreater ^ tmp_ret)
return ret_valid_t, ret_value_t
def __calckavg(self, k_period, calc_period, isgreater):
ret_valid_t = False
ret_value_t = True
start_index = -1
end_index = 0 - calc_period
if len(self._data.index) >= k_period + calc_period - 1:
maaction = MAAction(start_index, end_index, k_period, self._data)
valid_ma, result_ma = maaction.executeaction(fn_ticker=self.close_ticker)
if valid_ma:
ret_valid_t = True
if isgreater:
for index, avg_v in result_ma.items():
if self.close_ticker(index) <= avg_v:
ret_value_t = False
break
if isgreater and not ret_value_t:
return ret_valid_t, ret_value_t
count = 0
cursor = start_index
while count < calc_period - 1:
cursor_p = cursor - 1
if cursor in result_ma and cursor_p in result_ma:
if result_ma[cursor] <= result_ma[cursor_p]:
ret_value_t = False
break
else:
ret_value_t = False
break
count += 1
cursor -= 1
return ret_valid_t, ret_value_t
def __calcemadif(self, dif_period, calc_period):
ret_valid_t = False
ret_value_t = True
start_index = -1
ema = XMAAction(self._data)
ret_v, ema_dif_v = ema.executeaction(minlength=1,
fnf=self.__getCloseTicker,
weight=2 / (dif_period + 1),
intvalue=0,
reason='EXPMA_dif',
operationtype='EMA')
if ret_v:
ret_valid_t = True
count = 0
cursor = start_index
while count < calc_period - 1:
cursor_p = cursor - 1
if ema_dif_v[cursor] is not None and ema_dif_v[cursor_p] is not None:
if ema_dif_v[cursor] <= ema_dif_v[cursor_p]:
ret_value_t = False
break
else:
ret_value_t = False
break
count += 1
cursor -= 1
return ret_valid_t, ret_value_t
def executeaction(self, **kwargs):
operation = kwargs['operation']
amplitude_period = kwargs.get('amplitude_peroid', 5)
amplitude_percent = kwargs.get('amplitude_percent', 3)
avgk_k_period = kwargs.get('avgk_period', 20)
avgk_calc_period = kwargs.get('avgk_calc_period', 2)
avgk_greater = kwargs.get('avgk_greater', False)
avg_ema_dif_period = kwargs.get('avg_ema_dif_period', 12)
avg_ema_avg_period = kwargs.get('avg_ema_dif_period', 50)
ret_valid = False
ret_value_bool = False
ret_value = pd.DataFrame(columns=columns)
if operation == 'amplitude_avg':
ret_valid, ret_value_bool = self.__calcamplitudeavg(amplitude_period, amplitude_percent)
elif operation == 'avg_k_go':
ret_valid, ret_value_bool = self.__calckavg(avgk_k_period, avgk_calc_period, avgk_greater)
elif operation == 'price_k_avg':
ret_valid, ret_value_bool = self.__calc_price_kavg(avgk_k_period, avgk_greater)
elif operation == 'expma_dif_go':
ret_valid, ret_value_bool = self.__calcemadif(avg_ema_dif_period, avgk_calc_period)
else:
logger.error("%s is not supported!" % operation)
if ret_valid and ret_value_bool:
time_stamp = self._data.index[-1]
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
return ret_valid, ret_value
class EXPMACrossAction(ActionBase):
def __init__(self, data: pd.DataFrame):
super().__init__(data)
def executeaction(self, **kwargs):
ret_valid = True
ret_value = pd.DataFrame(columns=columns)
if len(self._data.close) < 2:
ret_valid = False
return ret_valid, ret_value
try:
expma_12, expma_50 = EXPMA(self._data.close.values)
tmp_ret = CROSS(expma_12, expma_50)
if tmp_ret[-2] == 0 and tmp_ret[-1] == 1:
time_stamp = self._data.index[-1]
row = self._data.loc[time_stamp]
ret_value.loc[len(ret_value)] = [row['gid'], row['open'], row['close'],
row['high'], row['low'], row['volume'],
time_stamp, True]
except BaseException as be:
logger.error("The EXPMA is failed", be)
ret_valid = False
return ret_valid, ret_value
class StockData:
def __init__(self, sector: str = ''):
self.sector = sector
self.__data = {}
def update(self, symbol: str, data: pd.DataFrame):
self.__data.update({str(symbol): data})
def get(self, symbol: str) -> pd.DataFrame:
try:
ret = self.__data[str(symbol)]
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
ret = pd.DataFrame(columns=columns)
return ret
def has_symbol(self, symbol: str) -> bool:
if symbol in self.__data:
return True
else:
return False
def keys(self):
return self.__data.keys()
def clear(self):
self.__data.clear()
def remove(self, symbol: str) -> pd.DataFrame:
try:
if symbol in self.__data:
del self.__data[symbol]
except Exception as ee:
logger.error("error >>>", ee)
traceback.print_exc()
def loadsectors(context: DataContext):
if not DataContext.iscountryChina():
return
filename = "sectors_allocation"
filepath = os.path.join(r'./', filename)
append_value(context.sectors, '000001', [str(code).zfill(6) for code in DataContext.code_spotlighted])
with open(filepath, 'r') as file:
for line in file.read().splitlines():
sector_symbols = line.split(":")
if len(sector_symbols) > 1:
symbols = sector_symbols[1].split(",")
if len(symbols) > 1:
for symbol in symbols:
append_value(context.sectors, sector_symbols[0], symbol)
def loadsectorsfromEM():
date_t = datetime.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0).strftime("%Y-%m-%d")
if DataContext.iscountryChina():
sectors = sectors_CN.keys()
elif DataContext.iscountryUS():
sectors = sectors_US.keys()
filename = "sectors_allocation"
filepath = os.path.join(r'./', filename)
with open(filepath, 'w+') as file:
for sector_i in sectors:
if sector_i == '000001':
pass
else:
data = c.sector(sector_i, date_t)
if data.ErrorCode != 0:
logger.debug("request sector %s Error, %s" % (sector_i, data.ErrorMsg))
else:
file.write('{}:'.format(sector_i))
symbolsinsector = []
for code in data.Data:
code_l = code.split(".")
if len(code_l) > 1:
symbolsinsector.append(code_l[0])
file.writelines(",".join(symbolsinsector))
file.write('\r\n')
fetchdatacounter = 0
barcounter_15 = 0
roundresult_15 = 0
firstroundresult_15 = 0
barcounter_30 = 0
roundresult_30 = 0
firstroundresult_30 = 0
barcounter_60 = 0
roundresult_60 = 0
firstroundresult_60 = 0
fetch_count = 1
def snapshot(context: DataContext):
# 1) rank sectors over previous consecutive 10 business days
# 2) start fetching data once market is open
# 3) get a snapshot of stocks depending on country every other 3 seconds according to limitation
# 4) update stock data in context
# 5) calculate indicators based on newly fetched stock data
# 6) send result to another thread to handle
global fetchdatacounter, fetch_count
global barcounter_15, roundresult_15, firstroundresult_15
global barcounter_30, roundresult_30, firstroundresult_30
global barcounter_60, roundresult_60, firstroundresult_60
current_time = datetime.datetime.now()
current_date = datetime.date.today()
if DataContext.iscountryChina():
opentime = datetime.datetime.combine(current_date, context.marketopentime)
closetime = datetime.datetime.combine(current_date, context.marketclosetime)
breakstarttime = datetime.datetime.combine(current_date, context.marketbreakstarttime)
breakstoptime = datetime.datetime.combine(current_date, context.marketbreakstoptime)
elif DataContext.iscountryUS():
opentime = datetime.datetime.combine(current_date, context.marketopentime)
closetime = datetime.datetime.combine(current_date, context.marketclosetime)
target_time = datetime.timedelta(days=0, hours=0, minutes=0, seconds=0)
symbols_exchange = []
for sector in context.markets:
symbols_exchange += context.symbols_exchange[stock_group[sector]]
symbols_original_len = len(symbols_exchange)
symbols_tmp = set(symbols_exchange)
symbols_tmp.difference_update(DataContext.invalid_stock_codes)
symbols_exchange = list(symbols_tmp)
def update_stock_data_in_context(timeout=0):
global fetchdatacounter, fetch_count
global barcounter_15, roundresult_15, firstroundresult_15
global barcounter_30, roundresult_30, firstroundresult_30
global barcounter_60, roundresult_60, firstroundresult_60
# 1) french data
logger.debug("totally scan %d stocks but the number of original stock codes is %d" %
(len(symbols_exchange), symbols_original_len))
'''
EM has been obsoleted.
stock_data = csqsnapshot_t(symbols_exchange, "NOW,VOLUME,OPEN,HIGH,LOW", "Ispandas=1")
if not isinstance(stock_data, c.EmQuantData):
'''
logger.debug("Start to fetch stock data count: {}".format(fetch_count))
starttime = time.perf_counter()
stock_data = None
try:
# FIXME: only for datasource AKShare
# stock_data = ak.stock_zh_a_spot()
# need to set | |
entities_recognition_tasks=[EntitiesRecognitionTask(model_version="bad")],
# at this moment this should cause all documents to be errors, which isn't correct behavior but I'm using it here to test document ordering with errors. :)
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
in_order = ["56", "0", "19", "1"]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
self.assertEqual(len(results), len(docs))
for idx, resp in enumerate(results):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_show_stats_and_model_version_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask(model_version="latest")],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask(model_version="latest")],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask(model_version="latest")],
show_stats=True,
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
self.assertEqual(len(results), len(docs))
# self.assertEqual(results.statistics.document_count, 5)
# self.assertEqual(results.statistics.transaction_count, 4)
# self.assertEqual(results.statistics.valid_document_count, 4)
# self.assertEqual(results.statistics.erroneous_document_count, 1)
@pytest.mark.playback_test_only
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint(self, client):
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="en",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_dont_use_language_hint(self, client):
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@pytest.mark.playback_test_only
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_per_item_dont_use_language_hint(self, client):
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@pytest.mark.playback_test_only
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_obj_input(self, client):
async def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="en",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_dict_input(self, client):
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="en",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="en"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="en"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="en",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
docs = [{"id": "1", "language": "en", "text": "I will go to the park."},
{"id": "2", "language": "en", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
language="en",
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
task_results = getattr(results_pages[0], task_type)
self.assertEqual(len(task_results), 1)
results = task_results[0].results
for r in results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"default_language": "en"
})
async def test_client_passed_default_language_hint(self, client):
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
tasks = getattr(results_pages[0], task_type)
self.assertEqual(len(tasks), 1)
self.assertEqual(len(tasks[0].results), 3)
for r in tasks[0].results:
self.assertFalse(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_language_hint_method(self, client):
async with client:
response = await (await client.begin_analyze(
["This should fail because we're passing in an invalid language hint"],
language="notalanguage",
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
tasks = getattr(results_pages[0], task_type)
self.assertEqual(len(tasks), 1)
for r in tasks[0].results:
self.assertTrue(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_language_hint_docs(self, client):
async with client:
response = await (await client.begin_analyze(
[{"id": "1", "language": "notalanguage",
"text": "This should fail because we're passing in an invalid language hint"}],
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
results_pages = []
async for p in response:
results_pages.append(p)
self.assertEqual(len(results_pages), 1)
task_types = [
"entities_recognition_results",
"key_phrase_extraction_results",
"pii_entities_recognition_results"
]
for task_type in task_types:
tasks = getattr(results_pages[0], task_type)
self.assertEqual(len(tasks), 1)
for r in tasks[0].results:
self.assertTrue(r.is_error)
@GlobalTextAnalyticsAccountPreparer()
async def test_rotate_subscription_key(self, resource_group, location, text_analytics_account,
text_analytics_account_key):
credential = AzureKeyCredential(text_analytics_account_key)
client = TextAnalyticsClient(text_analytics_account, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
self.assertIsNotNone(response)
credential.update("xxx") # Make authentication fail
with self.assertRaises(ClientAuthenticationError):
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
credential.update(text_analytics_account_key) # Authenticate successfully again
response = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)).result()
self.assertIsNotNone(response)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_user_agent(self, client):
async def callback(resp):
self.assertIn("azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()),
resp.http_request.headers["User-Agent"]
)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
async with client:
poller = await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask()],
key_phrase_extraction_tasks=[KeyPhraseExtractionTask()],
pii_entities_recognition_tasks=[PiiEntitiesRecognitionTask()],
polling_interval=self._interval()
)
self.assertIn("azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()),
poller._polling_method._initial_response.http_request.headers["User-Agent"]
)
await poller.result() # need to call this before tearDown runs even though we don't need the response for the test.
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_model_version_error_single_task(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
with self.assertRaises(HttpResponseError):
async with client:
result = await (await client.begin_analyze(
docs,
entities_recognition_tasks=[EntitiesRecognitionTask(model_version="bad")],
polling_interval=self._interval()
)).result()
@pytest.mark.playback_test_only
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_model_version_error_multiple_tasks(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
async with client:
response | |
#BEGIN_HEADER
import os
import sys
import traceback
import argparse
import json
import logging
import time
from pprint import pprint
import string
import subprocess
from os import environ
from ConfigParser import ConfigParser
import re
from collections import OrderedDict
import uuid
from string import Template
# 3rd party imports
import requests
import pandas as pd
import numpy as np
# KBase imports
import biokbase.workspace.client
from biokbase.CoExpression.authclient import KBaseAuth as _KBaseAuth
import biokbase.Transform.script_utils as script_utils
def error_report(err_msg, expr, workspace_service_url, workspace_name, provenance, ws):
## Create report object:
report = "Failed : {0}".format(err_msg)
reportObj = {
'objects_created':[],
'text_message':report
}
# generate a unique name for the Method report
reportName = 'ErrorReport_'+str(hex(uuid.getnode()))
report_info = ws.save_objects({
'workspace':workspace_name,
'objects':[
{
'type':'KBaseReport.Report',
'data':reportObj,
'name':reportName,
'meta':{},
'hidden':1,
'provenance':provenance
}
]
})[0]
return { "report_name" : reportName,"report_ref" : "{0}/{1}/{2}".format(report_info[6],report_info[0],report_info[4]) }
#END_HEADER
class CoExpression:
'''
Module Name:
CoExpression
Module Description:
Co-Expression Service APIs
This module provides services in support of the coexpression network.
The modules supports retrieval of the following information:
1. Identify differentially expressed genes
2. WGCNA clustering
'''
######## WARNING FOR GEVENT USERS #######
# Since asynchronous IO can lead to methods - even the same method -
# interrupting each other, you must be *very* careful when using global
# state. A method could easily clobber the state set by another while
# the latter method is running.
#########################################
VERSION = "1.0.4"
GIT_URL = "https://github.com/sjyoo/coexpression"
GIT_COMMIT_HASH = "cd2d4e4351c6e80cf4b642f72f4fd1a5c0576617"
#BEGIN_CLASS_HEADER
FVE_2_TSV = 'trns_transform_KBaseFeatureValues_ExpressionMatrix_to_TSV'
TSV_2_FVE = 'trns_transform_TSV_Exspression_to_KBaseFeatureValues_ExpressionMatrix'
RAWEXPR_DIR = 'raw_dir'
FLTRD_DIR = 'fltr_dir'
CLSTR_DIR = 'clstr_dir'
FINAL_DIR = 'final_dir'
EXPRESS_FN = 'expression.tsv'
SAMPLE_FN = 'sample.tsv'
COEX_FILTER = 'coex_filter'
COEX_CLUSTER = 'coex_cluster2'
FLTRD_FN = 'filtered.tsv'
CLSTR_FN = 'clusters.tsv'
CSTAT_FN = 'cluster_stat.tsv'
FINAL_FN = 'filtered.json'
PVFDT_FN = 'pv_distribution.json'
GENELST_FN = 'selected.tsv'
__WS_URL = 'https://ci.kbase.us/services/ws'
__HS_URL = 'https://ci.kbase.us/services/handle_service'
__SHOCK_URL = 'https://ci.kbase.us/services/shock-api'
__PUBLIC_SHOCK_NODE = 'true'
logger = None
def _dumpExp2File(self, oexpr, odir, ofn):
try:
os.makedirs(odir)
except:
pass
try:
df = pd.DataFrame(oexpr['data']['values'], index=oexpr['data']['row_ids'], columns=oexpr['data']['col_ids'])
mask = pd.Series(df.index == 'NA').add(pd.Series(df.index == '')).values == 0 # remove 'NA' or '' (missing gene name)
df = df.iloc[mask,]
df.to_csv(path_or_buf = odir + "/" + ofn, sep='\t', na_rep = 'NA' )
except:
self.logger.error("Failed to dump expression object into tsv file:" + traceback.format_exc());
raise
def _subselectExp(self, oexpr, gl):
exp_idx = [oexpr['data']['row_ids'].index(x) for x in gl]
oexpr['data']['row_ids'] = [oexpr['data']['row_ids'][x] for x in exp_idx]
oexpr['data']['values'] = [oexpr['data']['values'][x] for x in exp_idx]
return oexpr
#END_CLASS_HEADER
# config contains contents of config file in a hash or None if it couldn't
# be found
def __init__(self, config):
#BEGIN_CONSTRUCTOR
#pprint(config)
if 'auth-service-url' in config:
self.__AUTH_SERVICE_URL = config['auth-service-url']
if 'ws_url' in config:
self.__WS_URL = config['ws_url']
if 'shock_url' in config:
self.__SHOCK_URL = config['shock_url']
if 'hs_url' in config:
self.__HS_URL = config['hs_url']
if 'cluster_dir' in config:
self.__CLSTR_DIR = config['cluster_dir']
if 'final_dir' in config:
self.__FINAL_DIR = config['final_dir']
if 'coex_filter' in config:
self.__COEX_FILTER = config['coex_filter']
if 'coex_cluster' in config:
self.__COEX_CLUSTER = config['coex_cluster']
if 'force_shock_node_2b_public' in config: # expect 'true' or 'false' string
self.__PUBLIC_SHOCK_NODE = config['force_shock_node_2b_public']
# logging
self.logger = logging.getLogger('CoExpression')
if 'log_level' in config:
self.logger.setLevel(config['log_level'])
else:
self.logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.logger.info("Logger was set")
#END_CONSTRUCTOR
pass
def diff_p_distribution(self, ctx, args):
# ctx is the context object
# return variables are: result
#BEGIN diff_p_distribution
try:
os.makedirs(self.RAWEXPR_DIR)
except:
pass
try:
os.makedirs(self.FLTRD_DIR)
except:
pass
try:
os.makedirs(self.FINAL_DIR)
except:
pass
if self.logger is None:
self.logger = script_utils.stderrlogger(__file__)
result = {}
self.logger.info("Starting conversion of KBaseFeatureValues.ExpressionMatrix to TSV")
token = ctx['token']
eenv = os.environ.copy()
eenv['KB_AUTH_TOKEN'] = token
param = args
auth_client = _KBaseAuth(self.__AUTH_SERVICE_URL)
user_id = auth_client.get_user(token)
workspace_name_t = Template(param['workspace_name'])
workspace_name = workspace_name_t.substitute(user_id=user_id)
from biokbase.workspace.client import Workspace
ws = Workspace(url=self.__WS_URL, token=token)
expr = ws.get_objects([{'workspace': workspace_name, 'name' : param['object_name']}])[0]['data']
self._dumpExp2File(expr, self.RAWEXPR_DIR, self.EXPRESS_FN)
self.logger.info("Identifying differentially expressed genes")
## Prepare sample file
# detect num of columns
ncol = len(expr['data']['col_ids'])
# force to use ANOVA if the number of sample is two
if(ncol == 3): param['method'] = 'anova'
with open("{0}/{1}".format(self.RAWEXPR_DIR, self.SAMPLE_FN), 'wt') as s:
s.write("0")
for j in range(1,ncol):
s.write("\t{0}".format(j))
s.write("\n")
## Run coex_filter
cmd_coex_filter = [self.COEX_FILTER, '-i', "{0}/{1}".format(self.RAWEXPR_DIR, self.EXPRESS_FN), '-o', "{0}/{1}".format(self.FLTRD_DIR, self.FLTRD_FN),
'-m', param['method'], '-n', '10', '-s', "{0}/{1}".format(self.RAWEXPR_DIR, self.SAMPLE_FN),
'-x', "{0}/{1}".format(self.RAWEXPR_DIR, self.GENELST_FN), '-t', 'y', '-j', self.PVFDT_FN]
if 'num_features' in param:
cmd_coex_filter.append("-n")
cmd_coex_filter.append(str(param['num_features']))
if 'p_value' in param:
cmd_coex_filter.append("-p")
cmd_coex_filter.append(str(param['p_value']))
tool_process = subprocess.Popen(cmd_coex_filter, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.logger.info(stdout)
if stderr is not None and len(stderr) > 0:
self.logger.info(stderr)
## loading pvalue distribution FDT
pvfdt = {'row_labels' :[], 'column_labels' : [], "data" : [[]]};
pvfdt = OrderedDict(pvfdt)
with open(self.PVFDT_FN, 'r') as myfile:
pvfdt = json.load(myfile)
data_obj_name = "{0}.fdt".format(param['out_figure_object_name'])
pvfdt['id'] = data_obj_name
fig_properties = {"xlabel" : "-log2(p-value)", "ylabel" : "Number of features", "xlog_mode" : "-log2", "ylog_mode" : "none", "title" : "Histogram of P-values", "plot_type" : "histogram"}
sstatus = ws.save_objects({'workspace' : workspace_name, 'objects' : [{'type' : 'MAK.FloatDataTable',
'data' : pvfdt,
'name' : data_obj_name}]})
data_ref = "{0}/{1}/{2}".format(sstatus[0][6], sstatus[0][0], sstatus[0][4])
fig_properties['data_ref'] = data_ref
sstatus = ws.save_objects({'workspace' : workspace_name, 'objects' : [{'type' : 'CoExpression.FigureProperties',
'data' : fig_properties,
'name' : (param['out_figure_object_name'])}]})
result = fig_properties
#END diff_p_distribution
# At some point might do deeper type checking...
if not isinstance(result, dict):
raise ValueError('Method diff_p_distribution return value ' +
'result is not type dict as required.')
# return the results
return [result]
def filter_genes(self, ctx, args):
# ctx is the context object
# return variables are: result
#BEGIN filter_genes
try:
os.makedirs(self.RAWEXPR_DIR)
except:
pass
try:
os.makedirs(self.FLTRD_DIR)
except:
pass
try:
os.makedirs(self.FINAL_DIR)
except:
pass
if self.logger is None:
self.logger = script_utils.stderrlogger(__file__)
result = {}
self.logger.info("Starting conversion of KBaseFeatureValues.ExpressionMatrix to TSV")
token = ctx['token']
eenv = os.environ.copy()
eenv['KB_AUTH_TOKEN'] = token
param = args
auth_client = _KBaseAuth(self.__AUTH_SERVICE_URL)
user_id = auth_client.get_user(token)
workspace_name_t = Template(param['workspace_name'])
workspace_name = workspace_name_t.substitute(user_id=user_id)
provenance = [{}]
if 'provenance' in ctx:
provenance = ctx['provenance']
provenance[0]['input_ws_objects']=[workspace_name+'/'+param['object_name']]
from biokbase.workspace.client import Workspace
ws = Workspace(url=self.__WS_URL, token=token)
expr = ws.get_objects([{'workspace': workspace_name, 'name' : param['object_name']}])[0]['data']
self._dumpExp2File(expr, self.RAWEXPR_DIR, self.EXPRESS_FN)
self.logger.info("Identifying differentially expressed genes")
## Prepare sample file
# detect num of columns
ncol = len(expr['data']['col_ids'])
# force to use ANOVA if the number of sample is two
if(ncol == 3): param['method'] = 'anova'
with open("{0}/{1}".format(self.RAWEXPR_DIR, self.SAMPLE_FN), 'wt') as s:
s.write("0")
for j in range(1,ncol):
s.write("\t{0}".format(j))
s.write("\n")
## Run coex_filter
cmd_coex_filter = [self.COEX_FILTER, '-i', "{0}/{1}".format(self.RAWEXPR_DIR, self.EXPRESS_FN), '-o', "{0}/{1}".format(self.FLTRD_DIR, self.FLTRD_FN),
'-m', param['method'], '-s', "{0}/{1}".format(self.RAWEXPR_DIR, self.SAMPLE_FN),
'-x', "{0}/{1}".format(self.RAWEXPR_DIR, self.GENELST_FN), '-t', 'y']
if 'num_features' in param:
cmd_coex_filter.append("-n")
cmd_coex_filter.append(str(param['num_features']))
if 'p_value' in param:
cmd_coex_filter.append("-p")
cmd_coex_filter.append(str(param['p_value']))
if 'p_value' not in param and 'num_features' not in param:
self.logger.error("One of p_value or num_features must be defined");
return error_report("One of p_value or num_features must be defined", expr,self.__WS_URL, workspace_name, provenance, ws)
#sys.exit(2) #TODO: No error handling in narrative so we do graceful termination
#if 'p_value' in param and 'num_features' in param:
# self.logger.error("Both of p_value and num_features cannot be defined together");
# sys.exit(3)
tool_process = subprocess.Popen(cmd_coex_filter, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
self.logger.info(stdout)
if stderr is not None and len(stderr) > 0:
self.logger.info(stderr)
## checking genelist
with open("{0}/{1}".format(self.RAWEXPR_DIR, self.GENELST_FN),'r') as glh:
gl = glh.readlines()
gl = [x.strip('\n') for x in gl]
if(len(gl) < 1) :
self.logger.error("No genes are selected")
return error_report("Increase p_value or specify num_features", expr,self.__WS_URL, workspace_name, provenance, ws)
#sys.exit(4)
## Upload FVE
if 'description' not in expr:
expr['description'] = "Filtered Expression Matrix"
expr['description'] += " : Filtered by '{1}' method ".format(expr['description'], param['method'])
expr = self._subselectExp(expr, gl)
ex_info = ws.save_objects({'workspace' : workspace_name, 'objects' : [{'type' : 'KBaseFeatureValues.ExpressionMatrix',
'data' : expr,
'name' : (param['out_expr_object_name'])}]})[0]
## Upload FeatureSet
fs ={'elements': {}}
fs['description'] = "FeatureSet identified by filtering method '{0}' ".format(param['method'])
fs['description'] += "from {0}/{1}".format(workspace_name, param['object_name'])
for g in gl:
if 'genome_ref' in expr:
fs['elements'][g] = [expr['genome_ref']]
else:
fs['elements'][g] = []
fs_info = ws.save_objects({'workspace' : workspace_name, 'objects' : [{'type' : 'KBaseCollections.FeatureSet',
'data' : fs,
'name' : (param['out_fs_object_name'])}]})[0]
## Create report object:
report = "Filtering | |
role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), conceptFrom=relFrom.qname, order=rel.arcElement.get("order"), linkrole=rel.linkrole, linkroleDefinition=modelXbrl.roleTypeDefinition(rel.linkrole),
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if self.validateSBRNL and not relFrom.isTuple:
if relTo in localPreferredLabels:
if {None, preferredLabel} & localPreferredLabels[relTo]:
self.modelXbrl.error("SBR.NL.2.3.4.06",
_("Non-distinguished preferredLabel presentation relations from concept %(conceptFrom)s in base set role %(linkrole)s"),
modelObject=rel, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=relTo.qname)
localPreferredLabels[relTo].add(preferredLabel)
targetConceptPreferredLabels.clear()
orderRels.clear()
localPreferredLabels.clear() # clear for next relationship
for conceptPresented in conceptsPresented:
if conceptPresented in usedCalcsPresented:
usedCalcPairingsOfConcept = usedCalcsPresented[conceptPresented]
if len(usedCalcPairingsOfConcept & conceptsPresented) > 0:
usedCalcPairingsOfConcept -= conceptsPresented
# 6.15.02, 6.15.03 semantics checks for totals and calc arcs (by tree walk)
if validateLoggingSemantic:
for rootConcept in parentChildRels.rootConcepts:
self.checkCalcsTreeWalk(parentChildRels, rootConcept, isStatementSheet, False, conceptsUsed, set())
elif arcrole == XbrlConst.summationItem:
if self.validateEFMorGFM:
# 6.14.3 check for relation concept periods
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
allElrRelSet = modelXbrl.relationshipSet(arcrole)
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
# 6.14.03 must have matched period types across relationshp
if isinstance(relTo, ModelConcept) and relFrom.periodType != relTo.periodType:
self.modelXbrl.error(("EFM.6.14.03", "GFM.1.07.03"),
"Calculation relationship period types mismatched in base set role %(linkrole)s from %(conceptFrom)s to %(conceptTo)s",
modelObject=rel, linkrole=rel.linkrole, conceptFrom=relFrom.qname, conceptTo=relTo.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
# 6.14.5 concepts used must have pres in same ext link
if relFrom in conceptsUsed and relTo in conceptsUsed:
fromObjId = relFrom.objectIndex
toObjId = relTo.objectIndex
if fromObjId < toObjId:
usedCalcsPresented[fromObjId].add(toObjId)
else:
usedCalcsPresented[toObjId].add(fromObjId)
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error(("EFM.N/A", "GFM.1.07.06"),
_("Duplicate calculations relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, orderRels[order]), linkrole=rel.linkrole, conceptFrom=relFrom.qname, order=order,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
directedCycleRels = self.directedCycle(relFrom,relFrom,fromRelationships,{relFrom})
if directedCycleRels is not None:
self.modelXbrl.error(("EFM.6.14.04", "GFM.1.07.04"),
_("Calculation relationships have a directed cycle in base set role %(linkrole)s starting from %(concept)s"),
modelObject=[relFrom] + directedCycleRels, linkrole=ELR, concept=relFrom.qname, linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
orderRels.clear()
# if relFrom used by fact and multiple calc networks from relFrom, test 6.15.04
if rels and relFrom in conceptsUsed:
relFromAndTos = (relFrom.objectIndex,) + tuple(sorted((rel.toModelObject.objectIndex
for rel in rels if isinstance(rel.toModelObject, ModelConcept))))
if relFromAndTos in usedCalcFromTosELR:
otherRels = usedCalcFromTosELR[relFromAndTos]
otherELR = otherRels[0].linkrole
self.modelXbrl.log("WARNING-SEMANTIC", ("EFM.6.15.04", "GFM.2.06.04"),
_("Calculation relationships should have a same set of targets in %(linkrole)s and %(linkrole2)s starting from %(concept)s"),
modelObject=[relFrom] + rels + otherRels, linkrole=ELR, linkrole2=otherELR, concept=relFrom.qname)
else:
usedCalcFromTosELR[relFromAndTos] = rels
elif self.validateSBRNL:
# find a calc relationship to get the containing document name
for modelRel in self.modelXbrl.relationshipSet(arcrole, ELR).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.9.01",
_("Calculation linkbase linkrole %(linkrole)s"),
modelObject=modelRel, linkrole=ELR)
break
elif arcrole == XbrlConst.all or arcrole == XbrlConst.notAll:
drsELRs.add(ELR)
elif arcrole == XbrlConst.dimensionDomain or arcrole == XbrlConst.dimensionDefault and \
self.validateEFMorGFM:
# 6.16.3 check domain targets in extension linkbases are domain items
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
for rel in rels:
relTo = rel.toModelObject
if not (isinstance(relTo, ModelConcept) and relTo.type is not None and relTo.type.isDomainItemType) and not self.isStandardUri(rel.modelDocument.uri):
self.modelXbrl.error(("EFM.6.16.03", "GFM.1.08.03"),
_("Definition relationship from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s requires domain item target"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, conceptTo=(relTo.qname if relTo is not None else None), linkrole=rel.linkrole)
elif self.validateSBRNL:
if arcrole == XbrlConst.dimensionDefault:
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
self.modelXbrl.error("SBR.NL.2.3.6.05",
_("Dimension-default in from %(conceptFrom)s to %(conceptTo)s in role %(linkrole)s is not allowed"),
modelObject=modelRel, conceptFrom=modelRel.fromModelObject.qname, conceptTo=modelRel.toModelObject.qname,
linkrole=modelRel.linkrole)
''' removed per RH 2013-01-11
if not (XbrlConst.isStandardArcrole(arcrole) or XbrlConst.isDefinitionOrXdtArcrole(arcrole)):
for modelRel in self.modelXbrl.relationshipSet(arcrole).modelRelationships:
relTo = modelRel.toModelObject
relFrom = modelRel.fromModelObject
if not ((isinstance(relFrom,ModelConcept) and isinstance(relTo,ModelConcept)) or
(relFrom.modelDocument.inDTS and
(relTo.qname == XbrlConst.qnGenLabel and modelRel.arcrole == XbrlConst.elementLabel) or
(relTo.qname == XbrlConst.qnGenReference and modelRel.arcrole == XbrlConst.elementReference) or
(relTo.qname == self.qnSbrLinkroleorder))):
self.modelXbrl.error("SBR.NL.2.3.2.07",
_("The source and target of an arc must be in the DTS from %(elementFrom)s to %(elementTo)s, in linkrole %(linkrole)s, arcrole %(arcrole)s"),
modelObject=modelRel, elementFrom=relFrom.qname, elementTo=relTo.qname,
linkrole=modelRel.linkrole, arcrole=arcrole)
'''
# definition tests (GFM only, for now)
if XbrlConst.isDefinitionOrXdtArcrole(arcrole) and disclosureSystem.GFM:
fromRelationships = modelXbrl.relationshipSet(arcrole,ELR).fromModelObjects()
for relFrom, rels in fromRelationships.items():
orderRels = {}
for rel in rels:
relTo = rel.toModelObject
order = rel.order
if order in orderRels and disclosureSystem.GFM:
self.modelXbrl.error("GFM.1.08.10",
_("Duplicate definitions relations from concept %(conceptFrom)s for order %(order)s in base set role %(linkrole)s to concept %(conceptTo)s and to concept %(conceptTo2)s"),
modelObject=(rel, relFrom, relTo), conceptFrom=relFrom.qname, order=order, linkrole=rel.linkrole,
conceptTo=rel.toModelObject.qname, conceptTo2=orderRels[order].toModelObject.qname)
else:
orderRels[order] = rel
if (arcrole not in (XbrlConst.dimensionDomain, XbrlConst.domainMember) and
rel.get("{http://xbrl.org/2005/xbrldt}usable") == "false"):
self.modelXrl.error("GFM.1.08.11",
_("Disallowed xbrldt:usable='false' attribute on %(arc)s relationship from concept %(conceptFrom)s in base set role %(linkrole)s to concept %(conceptTo)s"),
modelObject=(rel, relFrom, relTo), arc=rel.qname, conceptFrom=relFrom.qname, linkrole=rel.linkrole, conceptTo=rel.toModelObject.qname)
del localPreferredLabels # dereference
del usedCalcFromTosELR
del self.summationItemRelsSetAllELRs
self.modelXbrl.profileActivity("... filer relationships checks", minTimeToShow=1.0)
# checks on dimensions
ValidateFilingDimensions.checkDimensions(self, drsELRs)
self.modelXbrl.profileActivity("... filer dimensions checks", minTimeToShow=1.0)
for concept, hasPresentationRelationship in conceptsUsed.items():
if not hasPresentationRelationship:
self.modelXbrl.error(("EFM.6.12.03", "GFM.1.6.3"),
_("Concept used in instance %(concept)s does not participate in an effective presentation relationship"),
modelObject=[concept] + list(modelXbrl.factsByQname[concept.qname]), concept=concept.qname)
for fromIndx, toIndxs in usedCalcsPresented.items():
for toIndx in toIndxs:
fromModelObject = self.modelXbrl.modelObject(fromIndx)
toModelObject = self.modelXbrl.modelObject(toIndx)
calcRels = modelXbrl.relationshipSet(XbrlConst.summationItem) \
.fromToModelObjects(fromModelObject, toModelObject, checkBothDirections=True)
fromFacts = self.modelXbrl.factsByQname[fromModelObject.qname]
toFacts = self.modelXbrl.factsByQname[toModelObject.qname]
fromFactContexts = set(f.context.contextNonDimAwareHash for f in fromFacts if f.context is not None)
contextId = backupId = None # for EFM message
for f in toFacts:
if f.context is not None:
if f.context.contextNonDimAwareHash in fromFactContexts:
contextId = f.context.id
break
backupId = f.context.id
if contextId is None:
contextId = backupId
self.modelXbrl.error(("EFM.6.14.05", "GFM.1.7.5"),
_("Used calculation relationship from %(conceptFrom)s to %(conceptTo)s does not participate in an effective presentation relationship"),
modelObject=calcRels + [fromModelObject, toModelObject],
linkroleDefinition=self.modelXbrl.roleTypeDefinition(calcRels[0].linkrole if calcRels else None),
conceptFrom=self.modelXbrl.modelObject(fromIndx).qname, conceptTo=self.modelXbrl.modelObject(toIndx).qname, contextId=contextId)
if disclosureSystem.defaultXmlLang:
for concept, preferredLabelRels in conceptRelsUsedWithPreferredLabels.items():
for preferredLabelRel in preferredLabelRels:
preferredLabel = preferredLabelRel.preferredLabel
hasDefaultLangPreferredLabel = False
for modelLabelRel in labelsRelationshipSet.fromModelObject(concept):
modelLabel = modelLabelRel.toModelObject
if modelLabel.xmlLang.startswith(disclosureSystem.defaultXmlLang) and \
modelLabel.role == preferredLabel:
hasDefaultLangPreferredLabel = True
break
if not hasDefaultLangPreferredLabel:
self.modelXbrl.error("GFM.1.06.04", # 6.12.04 now reserved: ("EFM.6.12.04", "GFM.1.06.04"),
_("Concept %(concept)s missing %(lang)s preferred labels for role %(preferredLabel)s"),
modelObject=(preferredLabelRel, concept), concept=concept.qname, fromConcept=preferredLabelRel.fromModelObject.qname,
lang=disclosureSystem.defaultLanguage, preferredLabel=preferredLabel)
del conceptRelsUsedWithPreferredLabels
# 6 16 4, 1.16.5 Base sets of Domain Relationship Sets testing
self.modelXbrl.profileActivity("... filer preferred label checks", minTimeToShow=1.0)
''' try moving to plug-in
if self.validateSBRNL:
# check presentation link roles for generic linkbase order number
ordersRelationshipSet = modelXbrl.relationshipSet("http://www.nltaxonomie.nl/2011/arcrole/linkrole-order")
presLinkroleNumberURI = {}
presLinkrolesCount = 0
for countLinkroles in (True, False):
for roleURI, modelRoleTypes in modelXbrl.roleTypes.items():
for modelRoleType in modelRoleTypes:
if XbrlConst.qnLinkPresentationLink in modelRoleType.usedOns:
if countLinkroles:
presLinkrolesCount += 1
else:
if not ordersRelationshipSet:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number relationship set"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
else:
order = None
for orderNumRel in ordersRelationshipSet.fromModelObject(modelRoleType):
order = getattr(orderNumRel.toModelObject, "xValue", "(noPSVIvalue)")
if order in presLinkroleNumberURI:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole order number %(order)s of %(linkrole)s also used in %(otherLinkrole)s"),
modelObject=modelRoleType, order=order, linkrole=modelRoleType.roleURI, otherLinkrole=presLinkroleNumberURI[order])
else:
presLinkroleNumberURI[order] = modelRoleType.roleURI
if not order:
modelXbrl.error("SBR.NL.2.2.3.06",
_("Presentation linkrole %(linkrole)s missing order number"),
modelObject=modelRoleType, linkrole=modelRoleType.roleURI)
if countLinkroles and presLinkrolesCount < 2:
break # don't check order numbers if only one presentation linkrole
# check arc role definitions for labels
for arcroleURI, modelRoleTypes in modelXbrl.arcroleTypes.items():
for modelRoleType in modelRoleTypes:
if (not arcroleURI.startswith("http://xbrl.org/") and
modelRoleType.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces and
(not modelRoleType.genLabel(lang="nl") or not modelRoleType.genLabel(lang="en"))):
modelXbrl.error("SBR.NL.2.2.4.02",
_("ArcroleType missing nl or en generic label: %(arcrole)s"),
modelObject=modelRoleType, arcrole=arcroleURI)
for domainElt in typedDomainElements:
if domainElt.modelDocument.targetNamespace not in disclosureSystem.baseTaxonomyNamespaces:
if not domainElt.genLabel(fallbackToQname=False,lang="nl"):
modelXbrl.error("SBR.NL.2.2.8.01",
_("Typed dimension domain element %(concept)s must have a generic label"),
modelObject=domainElt, concept=domainElt.qname)
if domainElt.type is not None and domainElt.type.localName == "complexType":
modelXbrl.error("SBR.NL.2.2.8.02",
_("Typed dimension domain element %(concept)s has disallowed complex content"),
modelObject=domainElt, concept=domainElt.qname)
self.modelXbrl.profileActivity("... SBR role types and type facits checks", minTimeToShow=1.0)
'''
if self.validateEFM:
for pluginXbrlMethod in pluginClassMethods("Validate.EFM.Finally"):
pluginXbrlMethod(self, conceptsUsed)
elif self.validateSBRNL:
for pluginXbrlMethod in pluginClassMethods("Validate.SBRNL.Finally"):
pluginXbrlMethod(self, conceptsUsed)
self.modelXbrl.profileActivity("... plug in '.Finally' checks", minTimeToShow=1.0)
self.modelXbrl.profileStat(_("validate{0}").format(modelXbrl.modelManager.disclosureSystem.validationType))
modelXbrl.modelManager.showStatus(_("ready"), 2000)
def isStandardUri(self, uri):
try:
return self._isStandardUri[uri]
except KeyError:
isStd = (uri in self.disclosureSystem.standardTaxonomiesDict or
(not isHttpUrl(uri) and
# try 2011-12-23 RH: if works, remove the localHrefs
# any(u.endswith(e) for u in (uri.replace("\\","/"),) for e in disclosureSystem.standardLocalHrefs)
"/basis/sbr/" in uri.replace("\\","/")
))
self._isStandardUri[uri] = isStd
return isStd
def directedCycle(self, relFrom, origin, fromRelationships, path):
if relFrom in fromRelationships:
for rel in fromRelationships[relFrom]:
relTo = rel.toModelObject
if relTo == origin:
return [rel]
if relTo not in path: # report cycle only where origin causes the cycle
path.add(relTo)
foundCycle = self.directedCycle(relTo, origin, fromRelationships, path)
if foundCycle | |
new_bbox:
transform.bounding_box = self.bounding_box
else:
axes_ind = self._get_axes_indices()
if transform.n_inputs > 1:
transform.bounding_box = [self.bounding_box[ind] for ind in axes_ind][::-1]
else:
transform.bounding_box = self.bounding_box
result = transform(*args, **kwargs)
if with_units:
if self.output_frame.naxes == 1:
result = self.output_frame.coordinates(result)
else:
result = self.output_frame.coordinates(*result)
return result
def in_image(self, *args, **kwargs):
"""
This method tests if one or more of the input world coordinates are
contained within forward transformation's image and that it maps to
the domain of definition of the forward transformation.
In practical terms, this function tests
that input world coordinate(s) can be converted to input frame and that
it is within the forward transformation's ``bounding_box`` when
defined.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or
`~astropy.units.Unit` coordinates to be inverted
kwargs : dict
keyword arguments to be passed either to ``backward_transform``
(when defined) or to the iterative invert method.
Returns
-------
result : bool, numpy.ndarray
A single boolean value or an array of boolean values with `True`
indicating that the WCS footprint contains the coordinate
and `False` if input is outside the footprint.
"""
kwargs['with_bounding_box'] = True
kwargs['fill_value'] = np.nan
coords = self.invert(*args, **kwargs)
result = np.isfinite(coords)
if self.input_frame.naxes > 1:
result = np.all(result, axis=0)
if self.bounding_box is None or not np.any(result):
return result
if self.input_frame.naxes == 1:
if new_bbox:
x1, x2 = self.bounding_box.bounding_box()
else:
x1, x2 = self.bounding_box
if len(np.shape(args[0])) > 0:
result[result] = (coords[result] >= x1) & (coords[result] <= x2)
elif result:
result = (coords >= x1) and (coords <= x2)
else:
if len(np.shape(args[0])) > 0:
for c, (x1, x2) in zip(coords, self.bounding_box):
result[result] = (c[result] >= x1) & (c[result] <= x2)
elif result:
result = all([(c >= x1) and (c <= x2) for c, (x1, x2) in zip(coords, self.bounding_box)])
return result
def invert(self, *args, **kwargs):
"""
Invert coordinates from output frame to input frame using analytical or
user-supplied inverse. When neither analytical nor user-supplied
inverses are defined, a numerical solution will be attempted using
:py:meth:`numerical_inverse`.
.. note::
Currently numerical inverse is implemented only for 2D imaging WCS.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or `~astropy.units.Unit`
Coordinates to be inverted. The number of arguments must be equal
to the number of world coordinates given by ``world_n_dim``.
with_bounding_box : bool, optional
If `True` (default) values in the result which correspond to any
of the inputs being outside the bounding_box are set to
``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box (default is ``np.nan``).
with_units : bool, optional
If ``True`` returns a `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.SpectralCoord` object, by using the units of
the output cooridnate frame. Default is `False`.
Other Parameters
----------------
kwargs : dict
Keyword arguments to be passed to :py:meth:`numerical_inverse`
(when defined) or to the iterative invert method.
Returns
-------
result : tuple
Returns a tuple of scalar or array values for each axis.
"""
with_units = kwargs.pop('with_units', False)
if not utils.isnumerical(args[0]):
args = self.output_frame.coordinate_to_quantity(*args)
if self.output_frame.naxes == 1:
args = [args]
try:
if not self.backward_transform.uses_quantity:
args = utils.get_values(self.output_frame.unit, *args)
except (NotImplementedError, KeyError):
args = utils.get_values(self.output_frame.unit, *args)
if 'with_bounding_box' not in kwargs:
kwargs['with_bounding_box'] = True
if 'fill_value' not in kwargs:
kwargs['fill_value'] = np.nan
try:
# remove iterative inverse-specific keyword arguments:
akwargs = {k: v for k, v in kwargs.items() if k not in _ITER_INV_KWARGS}
result = self.backward_transform(*args, **akwargs)
except (NotImplementedError, KeyError):
result = self.numerical_inverse(*args, **kwargs, with_units=with_units)
if with_units and self.input_frame:
if self.input_frame.naxes == 1:
return self.input_frame.coordinates(result)
else:
return self.input_frame.coordinates(*result)
else:
return result
def numerical_inverse(self, *args, **kwargs):
"""
Invert coordinates from output frame to input frame using numerical
inverse.
.. note::
Currently numerical inverse is implemented only for 2D imaging WCS.
.. note::
This method uses a combination of vectorized fixed-point
iterations algorithm and `scipy.optimize.root`. The later is used
for input coordinates for which vectorized algorithm diverges.
Parameters
----------
args : float, array like, `~astropy.coordinates.SkyCoord` or `~astropy.units.Unit`
Coordinates to be inverted. The number of arguments must be equal
to the number of world coordinates given by ``world_n_dim``.
with_bounding_box : bool, optional
If `True` (default) values in the result which correspond to any
of the inputs being outside the bounding_box are set to
``fill_value``.
fill_value : float, optional
Output value for inputs outside the bounding_box (default is ``np.nan``).
with_units : bool, optional
If ``True`` returns a `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.SpectralCoord` object, by using the units of
the output cooridnate frame. Default is `False`.
tolerance : float, optional
*Absolute tolerance* of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
Default ``tolerance`` is 1.0e-5.
maxiter : int, optional
Maximum number of iterations allowed to reach a solution.
Default is 50.
quiet : bool, optional
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution. Default is `True`.
Other Parameters
----------------
adaptive : bool, optional
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default (`True`) is recommended.
.. note::
The :py:meth:`numerical_inverse` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`numerical_inverse` will continue
iterating *only* over the points that have not yet
converged to the required accuracy.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`numerical_inverse` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`numerical_inverse` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`numerical_inverse` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`numerical_inverse`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True` (default),
:py:meth:`numerical_inverse` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`numerical_inverse` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
| |
try:
scale = curve.ref_curve.scale
self._p("Using previous scale=%s" % scale, level=2)
except AttributeError:
# crude estimate that should be OK but not optimal
def _crude_scale_estimate(curve):
if callable(curve):
scale = 0.25 * (curve(0)[1]-curve(np.pi)[1])
else:
if isinstance(curve, (list, tuple)):
scale = curve[0] / 2.0
else:
scale = curve / 2.0
return scale
if self.predict_location: # don't assume inner common MOTS case
scale = _crude_scale_estimate(curve)
else: # assume we are doing neck trick with inner common MOTS
neck_info = self._get_neck_info(g)
if neck_info.has_data:
scale = abs(neck_info.z_dist)**(2/3.)
scale = min(scale, 2/3. * neck_info.smaller_z_extent)
scale = max(1e-3*neck_info.z_extent, scale)
else:
scale = _crude_scale_estimate(curve)
self._p("Using estimated scale=%s" % scale, level=2)
move = self._get_bipolar_origin(cfg)
return scale, move
def _determine_optimal_parameters(self, curve, cfg,
initial_smoothing=None):
r"""Return (close to optimal) scaling for bipolar coordinates."""
with timethis("Optimizing ref curve parameterization...",
silent=not self.timings):
scale, move = self._get_parameter_guesses(curve, cfg)
try:
result = optimize_bipolar_scaling(
curve=curve, move=move,
initial_scale=scale,
initial_smoothing=initial_smoothing,
verbose=self.verbosity > 1,
)
except IndexError as e:
# Optimization failed. Keep previous values.
self._p(" ERROR: Optimization failed (%s)" % (e,))
self._p(" Keeping previous values.")
return scale if initial_smoothing is None else scale, initial_smoothing
if initial_smoothing is None:
scale, smoothing = result, None
else:
scale, smoothing = result
self._p(" estimated optimal scale: %s" % scale)
if initial_smoothing is None:
return scale
self._p(" estimated optimal curv2 reparam smoothing: %s"
% smoothing)
return scale, smoothing
def _prepare_metric_for_computation(self, g, c):
r"""Load data and release file handles in common cases."""
# Handle just the most common cases where we trivially know which data
# will be needed. In these cases, we can load the data now and then
# release the file handle (if any).
do_load = False
what = []
if (c and self._has_prop_to_do('stability')
and "stability" not in c.user_data):
do_load = True
if (c and self._has_prop_to_do('stability_convergence')
and "stability_convergence" not in c.user_data):
do_load = True
if not c and not self._has_prop_to_do():
do_load = True
what = ["metric", "curv"]
if not c and self._has_prop_to_do('stability'):
do_load = True
what = []
if do_load:
with timethis("Loading slice data...", " elapsed: {}",
silent=not self.timings, eol=False):
g.load_data(*what)
g.release_file_handle()
def _has_prop_to_do(self, prop='any'):
r"""Return whether any or a particular property should be computed."""
props = self.props
if not isinstance(props, (list, tuple)):
props = [props]
if prop == 'any':
if len(props) == 1:
p = props[0]
return p is not None and p != 'none'
return bool(props)
return (prop in props
or ('all' in props and prop not in NEED_ACTIVATION_PROPS))
def _compute_properties(self, c, fname):
r"""Call compute_props() for the given curve.
In case properties have been computed and hence the curve data
changed, the curve is re-saved to disk with the new data.
"""
resave = False
with timethis("Computing properties...",
silent=not (self.timings and self._has_prop_to_do())):
stability_factors = self.stability_convergence_factors
if compute_props(hname=self.hname, c=c, props=self.props,
area_rtol=self.area_rtol,
min_stability_values=self.min_stability_values,
stability_convergence_factors=stability_factors,
max_multipole_n=self.max_multipole_n,
MOTS_map=self.MOTS_map, verbosity=self.verbosity,
fname=fname):
resave = True
if resave and fname:
c.save(fname, overwrite=True)
def _aux_MOTS_dir(self, hname):
r"""Get the directory (respecting the MOTS map) of auxiliary curves."""
run = self.MOTS_map.get(hname, self._run_name)
return op.join(self._parent_dir, run, hname)
def _get_neck_info(self, g):
if self._last_neck_info and self._last_neck_info.iteration == g.iteration:
return self._last_neck_info
self._last_neck_info = self._compute_neck_info(g)
return self._last_neck_info
def _compute_neck_info(self, g):
c = self._c_ref
top_dir = self._aux_MOTS_dir('top')
bot_dir = self._aux_MOTS_dir('bot')
threshold1, threshold2 = self.neck_trick_thresholds
neck_info = _NeckInfo(threshold1=threshold1, threshold2=threshold2,
iteration=g.iteration)
if not hasattr(c, 'find_neck'):
self._p("Not an ExpansionCurve. Neck moving not available.")
return neck_info
try:
x_neck, z_neck = c(c.find_neck('coord')[0])
except ValueError:
self._p("Neck not found.")
return neck_info
c_top = find_file(
pattern="%s/top_*_it%010d*.npy" % (top_dir, g.iteration),
skip_regex=r"_CE", load=True, verbose=self.verbosity > 1
)
c_bot = find_file(
pattern="%s/bot_*_it%010d*.npy" % (bot_dir, g.iteration),
skip_regex=r"_CE", load=True, verbose=self.verbosity > 1
)
with c_top.fix_evaluator():
x_top = max(
c_top(la)[0] for la in c_top.h.collocation_points(lobatto=False)
)
with c_bot.fix_evaluator():
x_bot = max(
c_bot(la)[0] for la in c_bot.h.collocation_points(lobatto=False)
)
pinching1 = min(x_top, x_bot) / x_neck
self._p("Smaller MOTS's width / neck width: %s (threshold=%s)"
% (pinching1, threshold1))
z_top = c_top(np.pi)[1]
z_bot = c_bot(0.0)[1]
z_top_outer = c_top(0.0)[1]
z_bot_outer = c_bot(np.pi)[1]
pinching2 = abs(z_top - z_bot) / (2*x_neck)
self._p("Individual horizons' distance / neck width: %s (threshold=%s)"
% (pinching2, threshold2))
neck_info.update(
x_top=x_top, z_top=z_top, x_bot=x_bot, z_bot=z_bot, x_neck=x_neck,
z_neck=z_neck, z_top_outer=z_top_outer, z_bot_outer=z_bot_outer,
pinching1=pinching1, pinching2=pinching2,
)
return neck_info
def predicted_location(self, time, prev_curve, full_output=False, verbose=True):
r"""Predict the location of a given curve at a given time.
This predicts the location of the *anchor* point at a given time. This
anchor is configured with the ``use_location`` property.
@return A tuple ``(z, dz)`` of the predicted location of the
z-coordinate of the anchor point and the amount `dz` by which
the `prev_curve` needs to be moved. Returns ``(None, None)`` if
the location cannot be predicted for any reason, e.g. when too few
steps have been taken or location prediction is not active.
@param time
Time at which the location of `prev_curve` should be predicted.
@param prev_curve
Curve to predict the location of.
@param verbose
Print some info. Default is `True`.
"""
if not self.predict_location:
return (None, None) if full_output else None
times = sorted(self._prev_locations.keys())
if self.backwards:
times = [t for t in reversed(times) if t > time]
else:
times = [t for t in times if t < time]
if len(times) < 2:
if verbose:
self._p("Too few previous curves to predict location.")
if not self.initial_velocity:
return (None, None) if full_output else None
if verbose:
self._p("Using given initial velocity dz/dt = %s" %
self.initial_velocity)
velocity = self.initial_velocity
else:
dz = self._prev_locations[times[-1]] - self._prev_locations[times[-2]]
dt = times[-1] - times[-2]
velocity = dz / dt
if verbose:
self._p("Estimated coordinate velocity dz/dt = %g/%g = %s"
% (dz, dt, velocity))
translation = velocity * (time - prev_curve.metric.time)
if not full_output:
return translation
try:
z0 = self._prev_locations[prev_curve.metric.time]
except KeyError:
z0 = self._get_shape_location(prev_curve)
if z0 is None:
return None, None
z_goal = z0 + translation
return z_goal, translation
def apply_velocity(self, g, prev_curve):
r"""Estimate horizon velocity and move the given curve accordingly.
@param g
Metric representing the time at which to estimate the MOTS
position.
@param prev_curve
Previous curve to move to the estimated position.
"""
dz = self.predicted_location(
time=g.time, prev_curve=prev_curve, verbose=True,
)
if not self.predict_location or dz is None:
return prev_curve
self._p("Applying translation of dz = %s" % dz)
return self._move_curve(prev_curve, dz)
def neck_trick(self, g, c):
r"""Check and possibly perform the *neck trick*.
@param g
Current slice's metric.
@param c
Reference curve (or initial guess) to apply the neck trick to.
Should be a .curve.expcurve.ExpansionCurve.
"""
if not self.do_neck_trick:
return c
neck_info = self._get_neck_info(g)
if not neck_info.has_data:
return c
if not neck_info.do_move_neck:
self._p("Neck pinching below threshold. Not moving reference curve.")
return c
self._p("Neck pinching above threshold. Moving...")
z_neck = neck_info.z_neck
z_center = neck_info.z_center
delta_z = -(z_neck - z_center)
c = self._move_curve(c, delta_z)
self._p("Neck moved by dz=%s to z=%s" % (delta_z, z_center))
return c
def _move_curve(self, c, delta_z):
if hasattr(c, 'ref_curve') and hasattr(c.ref_curve, 'add_z_offset'):
c = c.copy()
c.ref_curve = c.ref_curve.copy()
c.ref_curve.add_z_offset(delta_z)
else:
# Not a RefParamCurve. Convert to parametric curve.
c = ParametricCurve.from_curve(c, num=c.num)
self._p("Curve converted to ParametricCurve with resolution %s." % c.num)
c.add_z_offset(delta_z)
return c
@classmethod
def max_constraint_along_curve(cls, *args, **kwargs):
r"""Calls `props.max_constraint_along_curve()`."""
return max_constraint_along_curve(*args, **kwargs)
def _get_cfg(self, g, try_no):
r"""Construct a configuration for the given metric's slice."""
suffix = "it%010d" % g.iteration
if try_no is not None:
suffix += "_try%04d" % try_no
c_ref = self._c_ref
cfg = self._base_cfg(metric=g, suffix=suffix, c_ref=c_ref)
if self.strategy >= 2:
if c_ref and not isinstance(c_ref, numbers.Number):
cfg.update(
num=c_ref.num,
ref_num=max(
self.min_ref_num,
int(round(self.ref_num_factor*c_ref.num))
)
)
else:
cfg.update(
num=int(round(
self.min_ref_num / max(self.ref_num_factor, 0.1)
)),
ref_num=self.min_ref_num,
)
if self._should_override_resolution(g):
cfg.update(num=self.initial_num)
if self.ref_smoothing is not None:
self._change_reparam_settings(
cfg, "curv2", smoothing=self.ref_smoothing
)
return cfg
def _should_override_resolution(self, g):
r"""Return whether we should override the curve resolution for this slice."""
if self.initial_num is None:
return False
if self.follow_resolution and not self._is_first_slice(g):
return False
return True
def _is_first_slice(self, g):
r"""Return whether the given metric belongs to the first slice considered."""
return g.iteration == self.get_metric(0).iteration
def _change_reparam_settings(self, cfg, new_strategy=None, **new_settings):
r"""Merge new settings into current `reparam` parameters.
No check is done to ensure consistent settings, e.g. we don't remove
`smoothing` from the arguments even if `new_strategy` is incompatible.
Currently, the caller of this class has to provide | |
"""Please Not Another Compiler Compiler -- LR parser generator library.
"""
from collections import namedtuple
from operator import methodcaller
from functools import partial
import sys
import re
_str_type = globals().get("basestring", str)
def identity(x):
"""Identity functions, useful as semantic action for rules."""
return x
def namedtuple_with_visit(name, fields, doc=""):
"""Similar to `collections.namedtuple' but in addition has a `visit' method.
Example:
Foo = namedtuple_with_visit("Foo", "x y")
foo = Foo(3, 4)
class Visitor:
def Foo(self, x, y):
print("Foo(%d,%d) % (x, y))
foo.visit(Visitor())
# prints Foo(3,4)
"""
def visit(self, visitor):
return getattr(visitor, name)(*self)
cls = type(name, (namedtuple(name, fields),), {"__slots__" : (), "visit": visit, "__doc__":doc})
visit.__doc__ = "Calls visitor.%s(%s)" % (name, ", ".join(cls._fields))
return cls
Shift = namedtuple_with_visit("Shift", "state", "Represents a Shift action.")
Goto = namedtuple_with_visit("Goto", "state", "Represent a Goto action for non-terminals.")
Reduce = namedtuple_with_visit("Reduce", "rule pop nonterminal", "Represent a Reduce action.")
Accept = namedtuple_with_visit("Accept", "", "Means that the parsing is complete and succesfull.")
Conflict = namedtuple_with_visit("Conflict", "actions", "A conflict between two or more parsing actions.")
Error = namedtuple_with_visit("Error", "", "Illegal input.")
class _LR0Item(namedtuple("_LR0Item", "rule position")):
__slots__ = ()
def __str__(self):
rule = self.rule
id = rule.id
if id == -1:
rule_str = "Start rule"
else:
rule_str = "Rule %d" % id
names = rule.rhs_names[:self.position] + ["."] + rule.rhs_names[self.position:]
return rule_str + " : " + rule.lhs_name + " -> " + " ".join(names)
def reducable(self):
return self.rule.N == self.position
def is_interesting(self):
return True
return self.position > 0 or self.rule.id == -1 or self.rule.N == 0
class _LR1Item(namedtuple("_LR1Item", "lr0item lookaheads")):
__slots__ = ()
def __str__(self):
return str(self.lr0item) + " look-ahead: " + " ".join(lookahead.name for lookahead in self.lookaheads)
def compute_closure(self, visited, depth=0):
lr0item = self.lr0item
visited_lookaheads = visited.get(lr0item, frozenset())
if visited_lookaheads.issuperset(self.lookaheads):
return
visited[lr0item] = visited_lookaheads.union(self.lookaheads)
rule = lr0item.rule
position = lr0item.position
if position < rule.N:
next_symbol = rule.rhs[position]
if not next_symbol.is_terminal:
lookaheads = rule.item_first[position + 1]
if rule.item_nullable[position + 1]:
lookaheads = lookaheads.union(self.lookaheads)
if lookaheads:
for new_lr0item in next_symbol.start_items:
new_item = _LR1Item(new_lr0item, lookaheads)
new_item.compute_closure(visited, depth+1)
def _itemset_closure(itemset):
visited = {}
for item in itemset:
item.compute_closure(visited)
return frozenset(_LR1Item(k, visited[k]) for k in visited)
def _itemset_advance(itemset, mapping):
transition_table = {}
for item in itemset:
lr0item = item.lr0item
rule = lr0item.rule
position = lr0item.position
if position < rule.N:
next_symbol = rule.rhs[position]
transition_set = transition_table.get(next_symbol, [])
transition_set.append(_LR1Item(_LR0Item(rule, position + 1), item.lookaheads))
transition_table[next_symbol] = transition_set
return {k.name: mapping(k, _itemset_closure(transition_table[k])) for k in transition_table}
def _merge_actions(action1, action2):
if action1 is None:
return action2
elif isinstance(action1, Conflict):
return Conflict(action1.actions + (action2,))
else:
return Conflict((action1, action2))
class _Rule(object):
nullable = False
def __init__(self, rule, id):
self.rule = rule
self.id = id
lhs, rhs = rule.split("->", 1)
self.lhs_name = lhs.strip()
self.rhs_names = rhs.split()
N = len(self.rhs_names)
self.N = N
self.item_nullable = [False] * N + [True]
self.item_first = [frozenset()] * (N + 1)
def __repr__(self):
return "_Rule(%s, %s)" % (repr(self.rule), self.id)
def compute_nullable(self):
rhs = self.rhs
item_nullable = self.item_nullable
nullable = True
for i in range(len(rhs))[::-1]:
sym = rhs[i]
nullable = nullable and sym.nullable
item_nullable[i] = nullable
return item_nullable[0]
def compute_first(self):
rhs = self.rhs
item_first = self.item_first
first = set()
for i in range(len(rhs))[::-1]:
sym = rhs[i]
sym_first = sym.first
if sym.nullable:
first.update(sym_first)
else:
first = set(sym_first)
item_first[i] = frozenset(sym for sym in first if sym.is_terminal)
return first
class _NonTerminal(object):
is_terminal = False
nullable = False
def __init__(self, name):
self.name = name
self.first = frozenset([self])
self.rules = []
def compute_nullable(self):
result = False
for rule in self.rules:
nullable = rule.compute_nullable()
rule.nullable = nullable
result = result or nullable
return result
def compute_first(self):
result = set(self.first)
for rule in self.rules:
first = rule.compute_first()
rule.first = first
result.update(first)
return result
class _Terminal(object):
is_terminal = True
nullable = False
start_items = frozenset()
def __init__(self, name):
self.name = name
self.first = frozenset([self])
def compute_first(self):
return self.first
def compute_nullable(self):
return True
_eof = "<eof>"
def _make_parse_table(rules, report=None):
"""Given a sequence of grammar rules, create the action table of the LR(1) machine."""
symbols = {}
states = []
states_sentences = []
states_dict = {frozenset() : None}
nonterminals = []
terminals = []
def make_nonterminal(name):
"""Create a new non-terminal object."""
nonterm = _NonTerminal(name)
nonterminals.append(nonterm)
return nonterm
def make_terminal(name):
"""Create a new terminal object."""
term = _Terminal(name)
terminals.append(term)
return term
def make_state(itemset, symbol=None):
"""Create a new state or return existing."""
try:
return states_dict[itemset]
except KeyError:
state = len(states)
states.append(itemset)
if symbol is None:
state_sentence = ()
else:
state_sentence = states_sentences[current_state] + (symbol,)
states_sentences.append(state_sentence)
states_dict[itemset] = state
return state
def make_action(symbol, itemset):
"""Create either a shift or a goto action."""
assert symbol != start_symbol
state = make_state(itemset, symbol)
if symbol.is_terminal:
return Shift(state)
else:
return Goto(state)
def compute_fixpoint(name):
"""Compute a fixpoint function on the non-terminals."""
compute = methodcaller("compute_" + name)
changed = True
while changed:
changed = False
for nonterm in nonterminals:
value = compute(nonterm)
if value != getattr(nonterm, name):
setattr(nonterm, name, value)
changed = True
def lookup_symbol(name, create):
"""Find or create a terminal or non-terminal."""
try:
return symbols[name]
except KeyError:
result = create(name)
symbols[name] = result
return result
# This terminal denotes the end of the input.
eof = make_terminal(_eof)
# Initialize the rule objects
rules = [_Rule(rules[i], i) for i in range(len(rules))]
for rule in rules:
lhs = lookup_symbol(rule.lhs_name, make_nonterminal)
rule.lhs = lhs
for rule in rules:
rule.rhs = [lookup_symbol(s, make_terminal) for s in rule.rhs_names]
# Create the start rule
start_name = rules[0].lhs_name
start_symbol = make_nonterminal(start_name + "'")
start_rule = _Rule("%s' -> %s" % (start_name, start_name), -1)
start_rule.lhs = start_symbol
start_rule.rhs = [lookup_symbol(start_name, make_nonterminal)]
rules.append(start_rule)
# Distribute the rules over the non-terminals
for rule in rules:
rule.lhs.rules.append(rule)
# Compute `nullable' and `first' properties on non-terminals
compute_fixpoint("nullable")
compute_fixpoint("first")
# Prepare the LR0-items with the position at the start of the rule, for each non-terminal.
for nonterm in nonterminals:
nonterm.start_items = [_LR0Item(rule, 0) for rule in nonterm.rules]
# The initial state
start_state = make_state(_itemset_closure([_LR1Item(_LR0Item(start_rule, 0), frozenset([eof]))]))
# Create all states, and fill action table with shift and goto actions
parse_table = []
current_state = 0
while current_state < len(states):
itemset = states[current_state]
parse_table.append(_itemset_advance(itemset, make_action))
current_state = current_state + 1
parse_table = parse_table
# Add the reduce/accept actions
for state in range(len(states)):
action_row = parse_table[state]
for lr1item in states[state]:
lr0item = lr1item.lr0item
if lr0item.reducable():
rule = lr0item.rule
if rule == start_rule:
action = Accept()
else:
action = Reduce(rule.id, rule.N, rule.lhs.name)
for lookahead in lr1item.lookaheads:
current_action = action_row.get(lookahead.name, None)
action_row[lookahead.name] = _merge_actions(current_action, action)
# Generate a report, if requested
if report is not None:
write = report.write
write("Terminals: %s\n" % " ".join(term.name for term in terminals))
for nonterm in nonterminals:
write("\nNon-terminal %s: nullable: %s, first: %s\n" % (nonterm.name, nonterm.nullable, " ".join(sym.name for sym in nonterm.first if sym.is_terminal)))
for rule in nonterm.rules:
for i in range(max(len(rule.rhs), 1)):
item = _LR0Item(rule, i)
write("%s: nullable: %s, first: %s\n" % (str(item), rule.item_nullable[i], " ".join(sym.name for sym in rule.item_first[i])))
for state in range(len(states)):
write("\nState %d, " % state)
write("example sentence: %s\n" % " ".join(symbol.name for symbol in states_sentences[state]))
sorted_items = [item for item in states[state] if item.lr0item.is_interesting()]
sorted_items.sort(key=lambda item: item.lr0item.rule.id)
write("LR1 itemset:\n" + "".join(" %s\n" % str(item) for item in sorted_items))
actions = parse_table[state]
write("Actions:\n")
write("".join(" %s : %s\n" % (k, actions[k]) for k in actions))
return parse_table
class FileCache(object):
def __init__(self, filename):
self.filename = filename
def read_object(self):
with open(self.filename, "rb") as file:
import pickle
return pickle.load(file)
def write_object(self, obj):
import os.path
import tempfile
import shutil
# On most reasonable operating systems, a move in the same directory is
# guaranteed to be atomic.
# This code tries to take advantge of that by first creating temporary file in
# the destination directory, and then moving it over the destination filename.
with tempfile.NamedTemporaryFile(dir=os.path.dirname(self.filename), delete=False) as file:
import pickle
pickle.dump(obj, file)
name = file.name
shutil.move(name, self.filename)
def get(self, input, compute):
cached_value = None
try:
cached_value = self.read_object()
except (IOError, OSError, PickleError):
pass
if isinstance(cached_value, tuple) and len(cached_value) == 2 and cached_value[0] == input:
return cached_value[1]
result = compute(input)
try:
self.write_object((input, result))
except (IOError, OSError, PickleError):
pass
return result
def _make_parse_table_cached(rules, cache):
if isinstance(cache, _str_type):
cache = FileCache(cache)
return cache.get(tuple(rules), _make_parse_table)
def make_parse_table(rules, cache=None, report=None):
if cache is None or | |
#imports--------------------------------------------------------------------------------------------------------------------
import os
import glob
import sys
#import wget
import time
import subprocess
import shlex
import sys
import warnings
import random
import pickle
from Bio.SeqUtils import seq1
from Bio.PDB.PDBParser import PDBParser
from Bio import AlignIO
from sklearn.base import TransformerMixin
from sklearn.preprocessing import StandardScaler, Normalizer , MinMaxScaler , RobustScaler
from sklearn.decomposition import PCA
from sklearn.decomposition import IncrementalPCA
from scipy.ndimage import gaussian_filter
from skimage.transform import resize as imresize
import dask
import pdb
#sys.path.append('./ProFET/ProFET/feat_extract/')
#import FeatureGen
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
import h5py
#global parameters--------------------------------------------------------------------------------------------------------------------
verbose = True
#amount of properties stored in the voxels
propAmount = 12
#amino acids supported by protfeat
legalAANames = {b'A', b'R', b'N', b'D', b'C', b'Q', b'E', b'G', b'H', b'I', b'L', b'K', b'M', b'F', b'P', b'S', b'T', b'W', b'Y', b'V', b'B', b'Z', b'X'}
unambiguousAANames = {b'A', b'R', b'N', b'D', b'C', b'Q', b'E', b'G', b'H', b'I', b'L', b'K', b'M', b'F', b'P', b'S', b'T', b'W', b'Y', b'V'}
#working on a sample? how big?
sampling = True
sampleSize = 20
#to memorize size of biggest distmat array
maxDistmatSize = 2000
maxVoxelSeqLength = 1000
maxVoxelSeqAmount = 1000
#clipping value for pdb distmat FFT components (how many components should be kept?)
maxfft = 150
distmat_keep_edge = [35, 35]
keep_edge = [20, 50, 5] #aligns
#clip fft aln tensor at these max freqs
#3rd dim is always the same...
clipx,clipy,clipz = [ 700 , 30 , None ]
clipSizes = [ 1000 , 50 , None ]
#bath lengeth for calculations on pdb dataset
#1000 was too much for Sinteractive, hdf5 writing step was getting killed. 500 worked
batchlen = 3
#where is the pfam hdf5?
PfamFilepath = 'notebooks/Pfam-A.seed.h5'
#where to store the transformed pdb data
pdb_hdf5_storage = 'notebooks/results/transformed_pdb.h5'
#input data storage
pfam_hdf5_storage = 'notebooks/results/transformed_pfam.h5'
#where to save distmat originals
pdb_original_storage = 'notebooks/results/distmat_originals.h5'
#where to save voxel originals
pfam_original_storage = 'notebooks/results/voxel_originals.h5'
#where to save distmat original sizes
pdb_size_storage = 'notebooks/results/distmat_sizes.h5'
#where to save voxel original sizes
pfam_size_storage = 'notebooks/results/voxel_sizes.h5'
#function definitions--------------------------------------------------------------------------------------------------------------------
#fit the components of the output space
#y: array of stacked distmats (on the 1st axis)
def filterAA(aa):
if aa == b'B':
aa = b'D'
if aa == b'Z':
aa = b'E'
if aa == b'X' or aa not in legalAANames:
aa = b'A'
return aa
@dask.delayed()
def alnFileToArray(filename):
alnfile = filename
msa = AlignIO.read(alnfile , format = 'fasta')
#filter them directly
align_array = np.array([ [ filterAA(aa) for aa in list(rec.upper())] for rec in msa], np.character)
return align_array,msa
def alnArrayLineToSequence(align_array, index):
#reading hdf5 pfam aln
return ''.join( [ aa.decode('utf-8') for aa in align_array[index] ] )
#structs is a dictionary of locations of the files for structures
@dask.delayed()
def parsePDB(structs):
parser = PDBParser()
converter = {'ALA': 'A', 'ASX': 'B', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G',
'HIS': 'H', 'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P',
'GLN': 'Q', 'ARG': 'R', 'SER': 'S', 'THR': 'T', 'SEC': 'U', 'VAL': 'V', 'TRP': 'W',
'XAA': 'X', 'TYR': 'Y', 'GLX': 'Z'}
structseqs={}
with open( 'structs.fast' , 'w') as fastout:
for s in structs:
Structure = PDBParser().get_structure(s, structs[s])
for model in Structure:
for chain in model:
res = chain.get_residues()
seq = ''.join([ converter[r.get_resname()] for r in res if r.get_resname() in converter ] )
fastout.write('>' + s + '|'+ chain.id +'\\n')
fastout.write(str( seq ) +'\\n' )
structseqs[ s + '|'+ chain.id ] = seq
return structseqs
@dask.delayed()
def generateProtFeatDict(sequence):
features = FeatureGen.Get_Protein_Feat(sequence)
return features
#generate complete set of dictionary keys generated by protFET
def protFeatKeys(align_array):
dictKeys = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#sequence = str(msa[i].seq)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
dictKeys = dictKeys.union(set(generateProtFeatDict(sequence).keys()) - dictKeys)
return dictKeys
#generate ProtFET array for given align (maxKeys: all keys of the feature dictionary, over the entire set)
def alignToProtFeat(align_array, dictKeys):
#generate 2d array of ProtFET features for each sequence in align
align_features = np.zeros((align_array.shape[0], len(dictKeys)), dtype=float)
missingFeatures = set()
for i in range(align_array.shape[0]):
sequence = alnArrayLineToSequence(align_array, i)
#temporary fix for ProtFeat not supporting B, Z, X
sequence = sequence.replace('B', 'D')
sequence = sequence.replace('Z', 'E')
sequence = sequence.replace('X', 'A')
sequence = sequence.replace('.', '')
sequence = sequence.replace('-','')
featuresDict = generateProtFeatDict(sequence)
missingFeatures = dictKeys - set(featuresDict.keys())
for newKey in missingFeatures:
featuresDict[newKey] = float(0)
features = np.array(list(featuresDict.values()))
align_features[i,:] = features
return align_features
def generateGapMatrix(align_array):
gap_array = np.array([[1 if (align_array[i][j] == b'.' or align_array[i][j] == b'-') else 0 for j in range(align_array.shape[1])] for i in range(align_array.shape[0])])
return gap_array
@dask.delayed()
def generateAlignVoxel(align_array, numerical, verbose = False):
assert align_array is not None
align_prop_voxel = np.zeros((align_array.shape[0], align_array.shape[1], len(numerical) + 1), dtype=float)
if(verbose):
print('final voxel shape: ', align_prop_voxel.shape)
if(verbose):
print('initial array shape: ', align_array.shape)
for prop in numerical:
align_prop_array = np.zeros(align_array.shape, dtype=float)
#align_prop_array = [[properties[prop][bstring] for bstring in seq] for seq in align_array]
i = 0
j = 0
for seq in align_array:
j = 0
for bstring in seq:
if i >= align_array.shape[0] or j >= align_array.shape[1]:
print('out of bounds. align_array size: ', align_array.shape, 'i: ', i, 'j: ', j)
if bstring in unambiguousAANames:
align_prop_array[i][j] = properties[prop][bstring]
elif bstring == b'B':
align_prop_array[i][j] = properties[prop][b'D']
elif bstring == b'Z':
align_prop_array[i][j] = properties[prop][b'E']
else:
align_prop_array[i][j] = properties[prop][b'A']
j = j+1
i = i+1
align_prop_voxel[:,:,numerical.index(prop)] = align_prop_array
gap_array = generateGapMatrix(align_array)
align_prop_voxel[:,:,12] = gap_array
if(verbose):
print('full voxel shape: ', align_prop_voxel.shape)
return align_prop_voxel
#builds a dictionary of distmats in the set - structs is a dictionary of all the structures (which are then subdivided into chains)
#also adds the distmats to the corresponding pdb_chain_pfam_df column
@dask.delayed()
def PDBToDistmat_single(Structure, verbose = False):
assert Structure is not None
#return distmats for one pdb
distances = {}
for model in Structure:
for chain in model:
#the following condition on chain size is arbitrary and seems to work for now. chains too short or long were ignored for some reason (get_residues?). numbered chains may or may not be ok to use
if(len(chain) > 50 or len(chain) < 1500):
if(verbose):
print('chain: ', chain)
print(len(chain))
res = [r for r in chain.get_residues() if 'CA' in r]
if(len(res) > 0):
distmat = [[res2['CA'] - res1['CA'] if i > j else 0 for i,res1 in enumerate(res)] for j,res2 in enumerate(res)]
distmat = np.array(distmat)
distmat+= distmat.T
distances[chain] = distmat
if len(distances) == 0:
distances = None
return distances
@dask.delayed()
def fourierNDarray(distmats, maxfft = 100):
distmatFFT = None
distmatsFFT = {}
clipSizes = (maxfft, maxfft)
for k in distmats:
distmat = distmats[k]
if distmat is not None:
distmatFFT = np.fft.rfftn(distmat, s = clipSizes)
distmatsFFT[k] = distmatFFT
return distmatsFFT
@dask.delayed()
def fourierNDarrayVoxels(array, clipSizes = clipSizes):
arrayFFT = None
if array is not None:
arrayFFT = np.fft.rfftn(array, s = clipSizes)
return arrayFFT
@dask.delayed()
def voxelFFTCorners_noDepth(infft , keep_edge = [keep_edge[0], keep_edge[1]]):
#keep only the 4 outer corners of the voxel (depth stays the same)
#for storgage in hdf5
original_shape = infft.shape
#make a smaller voxel containing just the corners of the original
corners = np.zeros((2*keep_edge[0], 2*keep_edge[1], infft.shape[2]), np.complex)
boundaries = [[ keep_edge[i] , infft.shape[i] -keep_edge[i] ] for i in range(len(keep_edge))]
boundaries.append([0, infft.shape[2]])
for i,x in enumerate(boundaries[0]):
for j,y in enumerate(boundaries[1]):
for k,z in enumerate(boundaries[2]):
slc1 = [slice(None)] * len(infft.shape)
slc2 = [slice(None)] * len(infft.shape)
for n,corn in enumerate(zip([i,j],[x,y])):
if corn[0] == 0:
slc1[n] = slice(0, corn[1])
#print('corner slice: ', slc1)
slc2[n] = slice(0, corn[1])
#print('voxel slice: ', slc2)
else:
slc1[n] = slice(keep_edge[n], None)
#print('corner slice: ', slc1)
slc2[n] = slice(corn[1], None)
#print('voxel slice: ', slc2)
#grab corner
'''print(slc1, 'going to ')
print(slc2)'''
corners[slc1] = infft[slc2]
return corners
@dask.delayed()
def voxelFFTCorners(infft , keep_edge = keep_edge):
#create a compact little voxel only containing the corners of the fft
#for storgage in hdf5
original_shape = infft.shape
#make a smaller voxel containing just the corners of the original
corners = np.zeros(2*np.array(keep_edge), np.complex)
boundaries = [[ keep_edge[i] , infft.shape[i] -keep_edge[i] ] for i in range(len(keep_edge))]
#print(boundaries)
#not spatial axes... prob should use nicer var names but whatever
#keep the 8 corners of the cube
for i,x in enumerate(boundaries[0]):
for j,y in enumerate(boundaries[1]):
for k,z in enumerate(boundaries[2]):
slc1 = [slice(None)] * len(infft.shape)
slc2 = [slice(None)] * len(infft.shape)
'''print('i, j, k', i, j, k)
print('x, y, z', x, y, z)'''
for n,corn in enumerate(zip([i,j,k],[x,y,z])):
#print(corn)
if corn[0] == 0:
slc1[n] = slice(0, corn[1])
#print('corner slice: ', slc1)
slc2[n] = slice(0, corn[1])
#print('voxel slice: ', slc2)
else:
slc1[n] = slice(keep_edge[n], None)
#print('corner slice: ', | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import sys
import subprocess
import os
import urllib
from zipfile import ZipFile
from shutil import rmtree
import urlparse
spark_versions = \
{
"2.2.0": {"hadoop_versions": ["2.6", "2.7"]},
"2.1.0": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.2": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.1": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"2.0.0": {"hadoop_versions": ["2.3", "2.4", "2.6", "2.7"]},
"1.6.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.6.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.6.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.5.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.4.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.4.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.3.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4", "2.6"]},
"1.3.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.2": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.2.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.1.1": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.1.0": {"hadoop_versions": ["1", "cdh4", "2.3", "2.4"]},
"1.0.2": {"hadoop_versions": ["1", "cdh4"]},
"1.0.1": {"hadoop_versions": ["1", "cdh4"]},
"1.0.0": {"hadoop_versions": ["1", "cdh4"]},
}
toree_versions = \
{
"1" : "https://www.apache.org/dist/incubator/toree/0.1.0-incubating/toree-pip/apache-toree-0.1.0.tar.gz",
"2" : "https://www.apache.org/dist/incubator/toree/0.2.0-incubating/toree-pip/toree-0.2.0.tar.gz",
"3" : "https://www.apache.org/dist/incubator/toree/0.3.0-incubating/toree-pip/toree-0.3.0.tar.gz"
}
parser = argparse.ArgumentParser(description='Spark cluster deploy tools for Openstack.',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='Usage real-life examples:\t\n'
' ./spark-openstack -k borisenko -i ~/.ssh/id_rsa -s 2 -t spark.large -a 20545e58-59de-4212-a83f-3703b31622cf -n computations-net -f external_network --async launch spark-cluster\n'
' ./spark-openstack --async destroy spark-cluster\n'
'Look through README.md for more advanced usage examples.\n'
'Apache 2.0, ISP RAS 2016 (http://ispras.ru/en).\n')
parser.add_argument('action', type=str,
choices=["launch", "destroy", "get-master", "config"])
parser.add_argument('cluster_name', help="Name for your cluster")
parser.add_argument('option', nargs='?')
parser.add_argument('-k', '--key-pair')
parser.add_argument("-i", "--identity-file")
parser.add_argument("-s", "--slaves", type=int)
parser.add_argument("-n", "--virtual-network", help="Your virtual Openstack network id for cluster. If have only one network, you may not specify it")
parser.add_argument("-f", "--floating-ip-pool", help="Floating IP pool")
parser.add_argument("-t", "--instance-type")
parser.add_argument("-m", "--master-instance-type", help="master instance type, defaults to same as slave instance type")
parser.add_argument("-a", "--image-id")
parser.add_argument("-w", help="ignored")
parser.add_argument("--use-oracle-java", action="store_true", help="Use Oracle Java. If not set, OpenJDK is used")
parser.add_argument("--spark-worker-mem-mb", type=int, help="force worker memory value in megabytes (e.g. 14001)")
parser.add_argument("-j", "--deploy-jupyter", action='store_true', help="Should we deploy jupyter on master node.")
parser.add_argument("-jh", "--deploy-jupyterhub",action='store_true', help="Should we deploy jupyterHub on master node")
parser.add_argument("--spark-version", default="1.6.2", help="Spark version to use")
parser.add_argument("--hadoop-version", help="Hadoop version to use")
parser.add_argument("--boot-from-volume", default=False, help="Should the cluster be based on Cinder volumes. "
"Use it wisely")
parser.add_argument("--hadoop-user", default="ubuntu", help="User to use/create for cluster members")
parser.add_argument("--ansible-bin", help="path to ansible (and ansible-playbook, default='')")
parser.add_argument("--swift-username", help="Username for Swift object storage. If not specified, swift integration "
"is commented out in core-site.xml. You can also use OS_SWIFT_USERNAME"
"environment variable")
parser.add_argument("--swift-password", help="Username for Swift object storage. If not specified, swift integration "
"is commented out in core-site.xml. You can also use OS_SWIFT_PASSWORD"
"environment variable")
parser.add_argument("--nfs-share", default=[], nargs=2, metavar=("<nfs-path>", "<mount-path>"),
help="Should we mount some NFS share(s) on instances",
action='append')
parser.add_argument("--extra-jars", action="append", help="Add/replace extra jars to Spark (during launch). Jar file names must be different")
parser.add_argument("--deploy-ignite", action='store_true', help="Should we deploy Apache Ignite.")
parser.add_argument("--ignite-memory", default=50, type=float, help="Percentage of Spark worker memory to be given to Apache Ignite.")
parser.add_argument("--ignite-version", default="1.7.0", help="Apache Ignite version to use.")
parser.add_argument("--yarn", action='store_true', help="Should we deploy using Apache YARN.")
parser.add_argument("--deploy-elastic", action='store_true', help="Should we deploy ElasticSearch")
parser.add_argument("--es-heap-size", default='1g', help="ElasticSearch heap size")
parser.add_argument("--deploy-cassandra", action='store_true', help="Should we deploy Apache Cassandra")
parser.add_argument("--cassandra-version", default="2.2.10", help="Apache Cassandra version to use")
parser.add_argument("--skip-packages", action='store_true',
help="Skip package installation (Java, rsync, etc). Image must contain all required packages.")
parser.add_argument("--async", action="store_true",
help="Async Openstack operations (may not work with some Openstack environments)")
parser.add_argument("--tags", help="Ansible: run specified tags")
parser.add_argument("--skip-tags", help="Ansible: skip specified tags")
#parser.add_argument("--step", action="store_true", help="Execute play step-by-step")
args, unknown = parser.parse_known_args()
if args.tags is not None:
unknown.append("--tags")
unknown.append(args.tags)
if args.skip_tags is not None:
unknown.append("--skip-tags")
unknown.append(args.skip_tags)
if args.master_instance_type is None:
args.master_instance_type = args.instance_type
if "_" in args.cluster_name:
print("WARNING: '_' symbols in cluster name are not supported, replacing with '-'")
args.cluster_name = args.cluster_name.replace('_', '-')
ansible_cmd = "ansible"
ansible_playbook_cmd = "ansible-playbook"
if args.ansible_bin is not None:
ansible_cmd = os.path.join(args.ansible_bin, "ansible")
ansible_playbook_cmd = os.path.join(args.ansible_bin, "ansible-playbook")
def get_cassandra_connector_jar(spark_version):
spark_cassandra_connector_url = "http://dl.bintray.com/spark-packages/maven/datastax/spark-cassandra-connector/1.6.8-s_2.10/spark-cassandra-connector-1.6.8-s_2.10.jar" \
if args.spark_version.startswith("1.6") \
else "http://dl.bintray.com/spark-packages/maven/datastax/spark-cassandra-connector/2.0.3-s_2.11/spark-cassandra-connector-2.0.3-s_2.11.jar"
spark_cassandra_connector_filename = "/tmp/" + os.path.basename(urlparse.urlsplit(spark_cassandra_connector_url).path)
if not os.path.exists(spark_cassandra_connector_filename):
print("Downloading Spark Cassandra Connector for Spark version {0}".format(spark_version))
urllib.urlretrieve(spark_cassandra_connector_url,filename=spark_cassandra_connector_filename)
return spark_cassandra_connector_filename
def get_elastic_jar():
elastic_hadoop_url = "http://download.elastic.co/hadoop/elasticsearch-hadoop-5.5.0.zip"
elastic_hadoop_filename = "/tmp/" + os.path.basename(urlparse.urlsplit(elastic_hadoop_url).path)
elastic_dir = "/tmp/elasticsearch-hadoop/"
archive_path = "elasticsearch-hadoop-5.5.0/dist/elasticsearch-hadoop-5.5.0.jar"
elastic_path = os.path.join(elastic_dir, archive_path)
if not os.path.exists(elastic_path):
print("Downloading ElasticSearch Hadoop integration")
urllib.urlretrieve(elastic_hadoop_url, filename=elastic_hadoop_filename)
with ZipFile(elastic_hadoop_filename) as archive:
archive.extract(archive_path, path=elastic_dir)
return elastic_path
else:
return elastic_path
def make_extra_vars():
extra_vars = dict()
extra_vars["action"] = args.action
extra_vars["n_slaves"] = args.slaves
extra_vars["cluster_name"] = args.cluster_name
extra_vars["os_image"] = args.image_id
extra_vars["os_key_name"] = args.key_pair
extra_vars["flavor"] = args.instance_type
extra_vars["master_flavor"] = args.master_instance_type
extra_vars["floating_ip_pool"] = args.floating_ip_pool
extra_vars["virtual_network"] = args.virtual_network
extra_vars["ansible_user"] = args.hadoop_user
extra_vars["ansible_ssh_private_key_file"] = args.identity_file
extra_vars["os_project_name"] = os.getenv('OS_PROJECT_NAME') or os.getenv('OS_TENANT_NAME')
if not extra_vars["os_project_name"]:
print("It seems that you h aven't sources your Openstack OPENRC file; quiting")
exit(-1)
extra_vars["os_auth_url"] = os.getenv('OS_AUTH_URL')
if not extra_vars["os_auth_url"]:
print("It seems that you haven't sources your Openstack OPENRC file; quiting")
exit(-1)
extra_vars["hadoop_user"] = args.hadoop_user
if args.action == 'launch':
extra_vars["spark_version"] = args.spark_version
if args.hadoop_version:
if args.hadoop_version not in spark_versions[args.spark_version]["hadoop_versions"]:
print("Chosen Spark version doesn't support selected Hadoop version")
exit(-1)
extra_vars["hadoop_version"] = args.hadoop_version
else:
extra_vars["hadoop_version"] = spark_versions[args.spark_version]["hadoop_versions"][-1]
print("Deploying Apache Spark %s with Apache Hadoop %s"
% (extra_vars["spark_version"], extra_vars["hadoop_version"]))
extra_vars["boot_from_volume"] = args.boot_from_volume
extra_vars["os_swift_username"] = args.swift_username or os.getenv('OS_SWIFT_USERNAME') or None
if not extra_vars["os_swift_username"]:
del extra_vars["os_swift_username"]
extra_vars["os_swift_password"] = args.swift_password or os.getenv('OS_SWIFT_PASSWORD') or None
if not extra_vars["os_swift_password"]:
del extra_vars["os_swift_password"]
extra_vars["use_oracle_java"] = args.use_oracle_java
extra_vars["deploy_jupyter"] = args.deploy_jupyter
if (args.deploy_jupyter):
extra_vars["toree_version"] = toree_versions[extra_vars["spark_version"][0]]
extra_vars["deploy_jupyterhub"] = args.deploy_jupyterhub
extra_vars["nfs_shares"] = [{"nfs_path": l[0], "mount_path": l[1]} for l in args.nfs_share]
extra_vars["use_yarn"] = args.yarn
#ElasticSearch deployment => --extra-args
extra_vars["deploy_elastic"] = args.deploy_elastic
extra_vars["es_heap_size"] = args.es_heap_size
#Cassandra deployment => --extra-args
extra_vars["deploy_cassandra"] = args.deploy_cassandra
extra_vars["cassandra_version"] = args.cassandra_version
extra_vars["skip_packages"] = args.skip_packages
extra_vars["sync"] = "async" if args.async else "sync"
if args.extra_jars is None:
args.extra_jars = []
extra_jars = list()
def add_jar(path):
extra_jars.append({"name": os.path.basename(path), "path": os.path.abspath(path)})
for jar in args.extra_jars:
if os.path.isdir(jar):
for f in os.listdir(jar):
add_jar(os.path.join(jar, f))
else:
add_jar(jar)
# Obtain Cassandra connector jar if cassandra is deployed
if args.deploy_cassandra:
cassandra_jar = get_cassandra_connector_jar(args.spark_version)
add_jar(cassandra_jar)
if args.deploy_elastic:
elastic_jar = get_elastic_jar()
add_jar(elastic_jar)
extra_vars["extra_jars"] = extra_jars
extra_vars["deploy_ignite"] = args.deploy_ignite
extra_vars["ignite_version"] = args.ignite_version
return extra_vars
def err(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def parse_host_ip(resp):
"""parse ansible debug output with var=hostvars[inventory_hostname].ansible_ssh_host and return host"""
parts1 = resp.split("=>")
if len(parts1) != 2: err("unexpected ansible output")
parts2 = parts1[1].split(":")
if len(parts2) != 2: err("unexpected ansible output")
parts3 = parts2[1].split('"')
if len(parts3) != 3: err("unexpected ansible output")
return parts3[1]
def get_master_ip():
res = subprocess.check_output([ansible_cmd,
"-i", "openstack_inventory.py",
"--extra-vars", repr(make_extra_vars()),
"-m", "debug", "-a", "var=hostvars[inventory_hostname].ansible_ssh_host",
args.cluster_name + "-master"])
return parse_host_ip(res)
def ssh_output(host, cmd):
return subprocess.check_output(["ssh", "-q", "-t", "-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"-i", args.identity_file, "ubuntu@" + host, cmd])
def ssh_first_slave(master_ip, cmd):
#can't do `head -n1 /opt/spark/conf/slaves` since it's not deployed yet
return ssh_output(master_ip, "ssh %s-slave-1 '%s'" % (args.cluster_name, cmd.replace("'", "'\\''")))
#FIXME: copied from https://github.com/amplab/spark-ec2/blob/branch-1.5/deploy_templates.py
def get_worker_mem_mb(master_ip):
if args.spark_worker_mem_mb is not None:
return args.spark_worker_mem_mb
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
slave_ram_kb = int(ssh_first_slave(master_ip, mem_command))
slave_ram_mb = slave_ram_kb // 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if slave_ram_mb > 100*1024:
slave_ram_mb = slave_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif slave_ram_mb > 60*1024:
slave_ram_mb = slave_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif slave_ram_mb > 40*1024:
slave_ram_mb = slave_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif slave_ram_mb > 20*1024:
slave_ram_mb = slave_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif slave_ram_mb > 10*1024:
slave_ram_mb = slave_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
slave_ram_mb = max(512, slave_ram_mb - 1300) # Leave 1.3 GB RAM
return slave_ram_mb
def get_master_mem(master_ip):
mem_command = "cat /proc/meminfo | grep MemTotal | awk '{print $2}'"
master_ram_kb = int(ssh_output(master_ip, mem_command))
master_ram_mb = master_ram_kb // 1024
# Leave some RAM for the OS, Hadoop daemons, and system caches
if master_ram_mb > 100*1024:
master_ram_mb = master_ram_mb - 15 * 1024 # Leave 15 GB RAM
elif master_ram_mb > 60*1024:
master_ram_mb = master_ram_mb - 10 * 1024 # Leave 10 GB RAM
elif master_ram_mb > 40*1024:
master_ram_mb = master_ram_mb - 6 * 1024 # Leave 6 GB RAM
elif master_ram_mb > 20*1024:
master_ram_mb = master_ram_mb - 3 * 1024 # Leave 3 GB RAM
elif master_ram_mb > 10*1024:
master_ram_mb = master_ram_mb - 2 * 1024 # Leave 2 GB RAM
else:
master_ram_mb = max(512, master_ram_mb - 1300) # Leave 1.3 GB RAM
return "%s" % master_ram_mb
def get_slave_cpus(master_ip):
return int(ssh_first_slave(master_ip, "nproc"))
cmdline = [ansible_playbook_cmd]
cmdline.extend(unknown)
extra_vars = make_extra_vars()
if args.action == "launch":
cmdline_create = cmdline[:]
cmdline_create.extend(["create.yml", | |
"""The code editor of GraphDonkey, with a few features:
- Syntax Highlighting
- Error Highlighting
- Line Numbers
Based on https://stackoverflow.com/questions/2443358/how-to-add-lines-numbers-to-qtextedit
- Selection/Line Events (Duplicate, Copy, Cut, Comment/Uncomment, Auto-Indent, Indent/Unindent...)
- Parenthesis Matching
Based on QtQuarterly 31: https://doc.qt.io/archives/qq/QtQuarterly31.pdf
- Auto-Expand Squiggly and Square brackets
Author: <NAME>
Date: 12/14/2019
"""
from dbm import error
from PyQt5 import QtGui, QtWidgets, QtCore
from main.extra import Constants, left
from main.extra.IOHandler import IOHandler
from main.editor.Highlighter import BaseHighlighter
from main.extra.GraphicsView import GraphicsView
from main.Preferences import bool
from main.plugins import PluginLoader
from main.editor.Intellisense import Types, ICONS
import os
pluginloader = PluginLoader.instance()
Config = IOHandler.get_preferences()
class StatusBar(QtWidgets.QStatusBar):
def __init__(self, wrapper, parent=None):
super(StatusBar, self).__init__(parent)
self.wrapper = wrapper
self.statusMessage = QtWidgets.QLabel("")
self.statusMessage.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignBaseline)
self.positionIndicator = QtWidgets.QLabel(":")
self.positionIndicator.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.leCombo = QtWidgets.QComboBox()
self.seps = {
"Posix (LF)": '\n',
"Mac OS [Pre-OSX] (CR)": '\r',
"Windows (CRLF)": '\r\n'
}
for name in self.seps:
self.leCombo.addItem(name, self.seps[name])
self.setLineSep(os.linesep)
self.encCombo = QtWidgets.QComboBox()
self.encCombo.addItem("UTF-8", "utf-8")
self.encCombo.addItem("UTF-16", "utf-16")
self.encCombo.addItem("ASCII", "ascii")
self.encCombo.addItem("ISO-8859-1", "latin1")
self.ftCombo = QtWidgets.QComboBox()
rdr = QtWidgets.QLabel("Render with:")
rdr.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.rendererCombo = QtWidgets.QComboBox()
self.addPermanentWidget(QtWidgets.QLabel(" "))
self.addPermanentWidget(self.statusMessage, 7)
self.addPermanentWidget(self.positionIndicator, 1)
self.addPermanentWidget(self.leCombo, 1)
self.addPermanentWidget(self.encCombo, 1)
self.addPermanentWidget(self.ftCombo, 0)
self.addPermanentWidget(rdr, 1)
self.addPermanentWidget(self.rendererCombo, 0)
self.addPermanentWidget(QtWidgets.QLabel(" "))
def setLineSep(self, sep):
seps = {self.seps[n]: n for n in self.seps}
self.leCombo.setCurrentText(seps.get(sep, ""))
class EditorWrapper(QtWidgets.QWidget):
def __init__(self, parent):
super(EditorWrapper, self).__init__(parent)
self._layout = QtWidgets.QGridLayout()
self.editor = CodeEditor(self)
self.mainwindow = parent
self.types = []
self.statusBar = StatusBar(self)
self.filetype = self.statusBar.ftCombo
self.engine = self.statusBar.rendererCombo
self.linesep = self.statusBar.leCombo
self.encoding = self.statusBar.encCombo
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.addWidget(self.editor, 0, 0, 1, -1)
self.filetype.currentIndexChanged.connect(self.alter)
self.setTypes()
self.setLayout(self._layout)
def setTypes(self):
self.types = pluginloader.getFileTypes()
txt = self.filetype.currentText()
self.filetype.clear()
for type in self.types:
name, klass = self.types[type]
self.filetype.addItem(name, klass)
# TODO: determine filetype based on extension
self.filetype.setCurrentText(txt)
def alter(self, idx):
self.engine.clear()
if idx >= 0:
data = self.filetype.itemData(idx)
self.editor.alter(data(self.editor.document(), self.editor))
self.editor.highlighter.rehighlight()
ens = pluginloader.getEnginesForFileType(self.filetype.itemText(idx))
for en in ens:
self.engine.addItem(en)
self.engine.setCurrentText(Config.value("view/engine"))
self.mainwindow.displayGraph()
def setType(self, type):
self.filetype.setCurrentText(self.types[type][0])
class CodeEditor(QtWidgets.QPlainTextEdit):
def __init__(self, parent=None):
super(CodeEditor, self).__init__(parent)
self.mainwindow = parent.parent()
self.wrapper = parent
self.lineNumberArea = LineNumberArea(self)
self.undoAvailable.connect(self.mainwindow.setUndoEnabled)
self.redoAvailable.connect(self.mainwindow.setRedoEnabled)
self.blockCountChanged.connect(self.lineNrChanged)
self.updateRequest.connect(self.updateLineNumberArea)
self.cursorPositionChanged.connect(self.positionChangedSlot)
self.textChanged.connect(self.textChangedSlot)
self.updateLineNumberAreaWidth()
self.highlightCurrentLine()
self.highlighter = BaseHighlighter(self.document(), self)
self.matches = []
self.errors = []
self.setMouseTracking(True)
self.filename = ""
self.filecontents = ""
self.completer = None
self.setCompleter()
self.treeView = None
# Optimize performance of user experience; big files are slow when editing every change event
self.stTimer = QtCore.QTimer(self)
self.stTimer.setSingleShot(True)
self.stTimer.timeout.connect(self.stoppedTyping)
def textChangedSlot(self):
txt = self.toPlainText()
if bool(Config.value("editor/emptyline")) and not txt.endswith(Constants.LINE_ENDING) \
and not txt.endswith('\n'):
curs = self.textCursor()
curs.movePosition(QtGui.QTextCursor.End)
curs.insertText(Constants.LINE_ENDING)
curs = self.textCursor()
curs.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.KeepAnchor)
if len(curs.selectedText()) == 0:
curs.movePosition(QtGui.QTextCursor.PreviousCharacter)
self.setTextCursor(curs)
def alter(self, highlighter):
self.highlighter.deleteLater()
self.highlighter = highlighter
def convert(self, engine):
curs = self.textCursor()
return self.highlighter.parser.convert(self.toPlainText(), engine,
line=curs.block().blockNumber() + 1, col=curs.columnNumber())
def stoppedTyping(self):
self.highlighter.storeErrors()
if bool(Config.value("editor/useParser", True)):
self.highlightErrors()
def positionChangedSlot(self):
self.stTimer.start(int(Config.value("editor/autoreparse", 100)))
if bool(Config.value("editor/highlightCurrentLine")):
self.highlightCurrentLine()
if bool(Config.value("editor/parentheses")):
self.matchBrackets()
self.highlightMatches()
self.updateIndicator()
self.mainwindow.updateTitle()
def lineNrChanged(self):
self.updateLineNumberAreaWidth()
self.highlighter.rehighlight()
def contextMenuEvent(self, event: QtGui.QContextMenuEvent):
menu = QtWidgets.QMenu(self)
menu.addAction(self.mainwindow.action_Undo)
menu.addAction(self.mainwindow.action_Redo)
menu.addSeparator()
menu.addAction(self.mainwindow.action_Copy)
menu.addAction(self.mainwindow.action_Paste)
menu.addAction(self.mainwindow.action_Cut)
menu.addAction(self.mainwindow.action_Delete)
menu.addSeparator()
menu.addAction(self.mainwindow.action_Select_All)
menu.exec_(event.globalPos())
def isSaved(self):
"""Returns True if the file was saved."""
txt = self.toPlainText()
if self.filename != "":
return txt == self.filecontents
if bool(Config.value("editor/emptyline")):
txt = txt[:-len(os.linesep)]
return txt == ""
def save(self):
self.filecontents = self.toPlainText()
if self.treeView is not None and self.treeView.isVisible():
self.viewParseTree(False)
def clearContents(self):
self.selectAll()
self.insertPlainText("")
def clearFile(self):
self.filename = ""
self.filecontents = ""
def setCompleter(self):
self.completer = QtWidgets.QCompleter(self)
table = QtWidgets.QTableView()
table.horizontalHeader().hide()
table.verticalHeader().hide()
table.setShowGrid(False)
table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.completer.setPopup(table)
self.completer.setModel(QtGui.QStandardItemModel())
self.completer.setFilterMode(QtCore.Qt.MatchContains)
self.completer.setModelSorting(QtWidgets.QCompleter.CaseInsensitivelySortedModel)
self.completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)
self.completer.setWrapAround(False)
self.completer.setWidget(self)
self.completer.setCompletionMode(QtWidgets.QCompleter.UnfilteredPopupCompletion)
def insertCompletion(self, completion):
comp, _ = self.highlighter.parser.visitor.completer.get(completion)
if len(comp) == 0:
text = completion
else: # In case of snippets, use the value (not the name)
text = comp[0][0] if comp[0][2] is None else comp[0][2]
cursor = self.textCursor()
cursor.select(QtGui.QTextCursor.WordUnderCursor)
cursor.insertText(text)
self.setTextCursor(cursor)
def encapsulateText(self, o, c):
curs = self.textCursor()
s = curs.selectionStart()
e = curs.selectionEnd()
curs.beginEditBlock()
curs.setPosition(s)
self.setTextCursor(curs)
self.insertPlainText(o)
mc = True
if s == e:
ci = curs.block().userData().indexOf(curs.positionInBlock())
cpos = self.getClosingBracketPos((o, c), curs.block(), ci)
if cpos >= 0:
curs.setPosition(cpos)
oi = curs.block().userData().indexOf(curs.positionInBlock())
opos = self.getOpeningBracketPos((o, c), curs.block(), oi - 1)
if opos == s or opos == -1:
mc = False
if mc:
curs.setPosition(e + len(o))
self.setTextCursor(curs)
self.insertPlainText(c)
curs.setPosition(s + len(o))
curs.setPosition(e + len(o), QtGui.QTextCursor.KeepAnchor)
curs.endEditBlock()
self.setTextCursor(curs)
def keyPressEvent(self, event: QtGui.QKeyEvent):
paired = pluginloader.getPairedBrackets(self.wrapper.filetype.currentText())
if self.completer.popup().isVisible():
if event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return, QtCore.Qt.Key_Tab]:
completion = self.completer.popup().currentIndex().data(QtCore.Qt.UserRole)
self.insertCompletion(completion)
self.completer.popup().hide()
elif event.key() == QtCore.Qt.Key_Escape:
self.completer.popup().hide()
else:
QtWidgets.QPlainTextEdit.keyPressEvent(self, event)
self.completer.popup().hide()
self.complete()
elif event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
self.insertPlainText(Constants.LINE_ENDING)
self.autoIndent()
cursor = self.textCursor()
cursor.beginEditBlock()
pos = cursor.position()
cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor)
txt = cursor.selectedText().lstrip()
cursor.setPosition(pos)
cursor.movePosition(QtGui.QTextCursor.StartOfLine, QtGui.QTextCursor.KeepAnchor)
if len(txt) > 0 and txt[0] in [x[1] for x in paired] and len(cursor.selectedText()) == 0:
cursor.setPosition(pos)
cursor.insertText(Constants.LINE_ENDING)
cursor.movePosition(QtGui.QTextCursor.Up, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
self.autoIndent()
cursor.setPosition(pos)
cursor.movePosition(QtGui.QTextCursor.EndOfLine)
self.setTextCursor(cursor)
cursor.endEditBlock()
elif event.key() == QtCore.Qt.Key_Home:
cursor = self.textCursor()
end = cursor.selectionEnd()
cursor.movePosition(QtGui.QTextCursor.StartOfLine, QtGui.QTextCursor.KeepAnchor)
txt = cursor.selectedText()
if len(txt.strip()) == 0 != len(txt):
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
else:
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.movePosition(QtGui.QTextCursor.NextWord)
cursor.setPosition(end, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
elif event.key() == QtCore.Qt.Key_Backspace:
if bool(Config.value("editor/pairedBrackets")):
curs = self.textCursor()
s = curs.selectionStart()
e = curs.selectionEnd()
if s == e:
curs.movePosition(QtGui.QTextCursor.PreviousCharacter, QtGui.QTextCursor.KeepAnchor)
prev = curs.selectedText()
curs.setPosition(s)
curs.movePosition(QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor)
next = curs.selectedText()
curs.setPosition(s)
if (prev, next) in paired:
curs.movePosition(QtGui.QTextCursor.PreviousCharacter)
curs.movePosition(QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor, 2)
self.setTextCursor(curs)
QtWidgets.QPlainTextEdit.keyPressEvent(self, event)
elif event.key() not in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Tab]:
if bool(Config.value("editor/pairedBrackets")):
et = event.text()
if et in [x[0] for x in paired]: # OPEN
pair = [x for x in paired if et in x][0]
if pair[0] == pair[1]:
curs = self.textCursor()
s = curs.selectionStart()
e = curs.selectionEnd()
if s == e:
curs.setPosition(e)
curs.movePosition(QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor)
txt = curs.selectedText()
if txt != pair[0]:
self.encapsulateText(*pair)
else:
curs.setPosition(e)
curs.movePosition(QtGui.QTextCursor.NextCharacter)
self.setTextCursor(curs)
else:
self.encapsulateText(*pair)
else:
self.encapsulateText(*pair)
elif et in [x[1] for x in paired]: # CLOSE
pair = [x for x in paired if et in x][0]
curs = self.textCursor()
s = curs.selectionStart()
e = curs.selectionEnd()
if s == e:
curs.setPosition(e)
curs.movePosition(QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor)
txt = curs.selectedText()
if txt != pair[1]:
self.insertPlainText(pair[1])
else:
curs.setPosition(e)
curs.movePosition(QtGui.QTextCursor.NextCharacter)
self.setTextCursor(curs)
else:
self.insertPlainText(pair[1])
else:
QtWidgets.QPlainTextEdit.keyPressEvent(self, event)
else:
QtWidgets.QPlainTextEdit.keyPressEvent(self, event)
def event(self, event: QtCore.QEvent):
if event.type() == QtCore.QEvent.ShortcutOverride:
return False
return QtWidgets.QPlainTextEdit.event(self, event)
def setText(self, text):
self.document().setPlainText(text)
def mouseMoveEvent(self, event: QtGui.QMouseEvent):
pos = event.pos()
cursor = self.cursorForPosition(pos)
cursor.select(QtGui.QTextCursor.WordUnderCursor)
tpos = cursor.position()
for start, size, msg in self.errors:
if start <= tpos <= start + size:
QtWidgets.QToolTip.showText(event.globalPos(), msg, self)
return QtWidgets.QPlainTextEdit.mouseMoveEvent(self, event)
def delete(self):
cursor = self.textCursor()
txt = cursor.selectedText()
if txt == "":
cursor.movePosition(QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
self.insertPlainText("")
def _cc(self):
cursor = self.textCursor()
txt = cursor.selectedText()
if txt == "":
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor)
cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def copy(self):
self._cc()
QtWidgets.QPlainTextEdit.copy(self)
def cut(self):
self._cc()
QtWidgets.QPlainTextEdit.cut(self)
def duplicate(self):
cursor = self.textCursor()
txt = cursor.selectedText()
posE = cursor.selectionEnd()
if txt == "": # Duplicate line
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor)
txt = cursor.selectedText()
cursor.insertText(txt + Constants.LINE_ENDING + txt)
else: # Duplicate selection
cursor.insertText(txt + txt)
cursor.setPosition(posE)
cursor.setPosition(posE + len(txt), QtGui.QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
def lines(self, func, state=None):
cursor = self.textCursor()
posS = cursor.selectionStart()
posE = cursor.selectionEnd()
cursor.beginEditBlock()
cursor.setPosition(posS)
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.setPosition(posE, QtGui.QTextCursor.KeepAnchor)
cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor)
otxt = cursor.selectedText()
txt = otxt.split(Constants.LINE_ENDING)
add = 0
for i in range(len(txt)):
state, line = func(txt[i], state)
if i == 0:
add = len(line) - len(txt[i])
txt[i] = line
ntxt = Constants.LINE_ENDING.join(txt)
cursor.setPosition(posS)
cursor.movePosition(QtGui.QTextCursor.StartOfLine)
cursor.setPosition(posE, QtGui.QTextCursor.KeepAnchor)
if cursor.selectedText() == Constants.LINE_ENDING or posS == posE:
cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor)
cursor.insertText(ntxt)
cursor.setPosition(posS + add)
cursor.setPosition(posE + len(ntxt) - len(otxt), QtGui.QTextCursor.KeepAnchor)
cursor.endEditBlock()
self.setTextCursor(cursor)
return cursor
def comment(self):
# TODO: identify indents before/after comment signs
def cmnt(line, state):
txt = line
if txt[:2] == "//" and state in [True, None]:
txt = txt[2:]
state = True
elif len(txt) > 0 and txt[0] == "#" and state in [True, None]:
txt = txt[1:]
state = True
elif state in [False, None]:
txt = "//" + txt
state = False
return state, txt
self.lines(cmnt)
def indent(self):
tab = '\t'
if bool(Config.value("editor/spacesOverTabs")):
tab = ' ' * int(Config.value("editor/tabwidth"))
def func(line, state):
return state, tab + line
cursor = self.textCursor()
txt = cursor.selectedText()
if txt != "":
self.lines(func)
else:
self.insertPlainText(tab)
def unindent(self):
# 1) take all left whitespace;
# 2) replace tabs with spaces (w.r.t. Config);
# 3) remove 1 character;
# 4) reduce string length until length % tablength == 0;
# 5) this is new whitespace length
ws = (' ', '\t')
useSpaces = bool(Config.value("editor/spacesOverTabs"))
tablength = int(Config.value("editor/tabwidth"))
def func(line, state):
txt = line
lft = left(txt, ws)
if useSpaces:
lft | |
<reponame>milesluigi/ipv6gaw<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*
"""
IPv6 Global Address Watcher. This is a script that keeps global IPv6 addresses consistent. The circumstances for
using this script are:
A) The system has interface(s) that will only have a single global IPv6 prefix.
B) The system is on a network with an ISP / Network Provider that changes that interface's IPv6 prefix often.
C) The server admin would like the system to have a consistent IPv6 address (hostbits) despite those circumstances.
Must have the ip command installed! typically located at /sbin/ip. Also must have permission to add and remove ip
addresses from the system.
The "ip token set" command, which normally is intended for this scenario, doesn't update when ISP frequently changes
the IPv6 prefix. Also, this script will delete all of the old global ipv6 addresses when a new prefix is assigned if
configured to do so; therefore, there is no chance the system keeps wrongly using the old prefixes and encounters
unnecessary downtime.
The basic overview of the script is that it starts the "ip monitor address" command and watches for new ipv6 address
assignments. If it sees a new ipv6 address that also has a new ipv6 prefix (detected by the valid lifetime) it will
assign a new ipv6 address with the new prefix and token (hostbits) configured in class Settings, and purge all global
ipv6 addresses with the old prefix, assumed to be invalid because of the new prefix.
It will not remove any additional addresses the server assigns such as addresses from privacy extensions or EUI-64, nor
will it affect IPv6 addresses that aren't global, such as ULA or link-local addresses.
"""
try:
import sys
import os
import logging
import time
import subprocess
import select
import ipaddress
import json
except ImportError as e:
logging.error("Could not import a module! Please ensure all libraries are installed.",
exc_info=True)
sys.exit(1)
__author__ = "<NAME>"
__copyright__ = "Copyright 2020"
__credits__ = ["<NAME>", ]
__license__ = "MIT"
__version__ = "0.618034"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Initial Git Upload"
class Settings:
# Initialization
log = logging.getLogger('ipv6globaladdressmonitor')
set_log_level = False
config_file = ""
config = {}
_ip_command = "/sbin/ip"
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="IPv6 Global Address Monitor.")
parser.add_argument("-c", "--config", help="JSON Config File to Use")
parser.add_argument("-v", "--verbose",
help="Sets logging level to logging.DEBUG instead of logging.INFO", action='store_true')
cmdargs = parser.parse_args()
if cmdargs.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
set_log_level = True
if cmdargs.config:
config_file = cmdargs.config
if not set_log_level:
log.setLevel(logging.INFO)
set_log_level = True
if not config_file:
_config_file_name = "ipv6gaw.json"
_config_file_locations = [
os.path.join(os.getcwd(), _config_file_name),
os.path.join(os.path.expanduser("~"), ".config", "."+_config_file_name),
os.path.join("/etc/", _config_file_name),
]
else:
_config_file_locations = [config_file]
for _config_file_location in _config_file_locations:
try:
with open(_config_file_location, "r") as f:
config = json.loads(f.read())
break
except FileNotFoundError as e:
continue
except json.JSONDecodeError as e:
raise UserWarning(
f"Could not read configuration file {_config_file_location}"
)
else:
raise UserWarning(
f"No Settings File was found. Please specify config file with -c or create one in a default directory.",
)
if not set_log_level:
log.setLevel(logging.WARNING)
set_log_level = True
if not config:
logging.warning("You may need to define settings in ipv6gaw.Settings.config prior to use via import.")
def _which_ip_command() -> list:
return [Settings._ip_command, "monitor", "address"]
def _list_ip_command(interface: str) -> list:
return [Settings._ip_command, "addr", "show", interface]
def _add_ip_command(ip_address: str, interface: str) -> list:
cmdsudo = []
if Settings.config["Settings"]["use_sudo"]:
cmdsudo = ["sudo"]
return cmdsudo + [Settings._ip_command, "addr", "add", ip_address, "dev", interface]
def _del_ip_command(ip_address: str, interface: str) -> list:
cmdsudo = []
if Settings.config["Settings"]["use_sudo"]:
cmdsudo = ["sudo"]
return cmdsudo + [Settings._ip_command, "addr", "del", ip_address, "dev", interface]
def poll_ip_monitor(time_sleep=1):
"""
Starts a process to monitor ip addresses "ip monitor command"
:param time_sleep: how long to sleep between checking output on the monitor_ip_command.
:return:
"""
f = subprocess.Popen(
Settings._which_ip_command(),
encoding="utf8",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
p = select.poll()
p.register(f.stdout)
while True:
if p.poll(1):
process_new_ip_monitor_line(f.stdout.readline())
time.sleep(time_sleep)
def process_new_ip_monitor_line(logline: str):
"""
Monitor for new IPv6 Address Assignments
:param logline: line from ip monitor command
:return:
"""
# ip monitor command shows new ipv6 assignments like this:
# 4: eth0 inet6 2001:0db8:fa57:dead::1357:9bdf/64 scope global dynamic mngtmpaddr noprefixroute
# Check if log line has the keywords in it that indicate a new ipv6 address have been assigned.
# Keywords that we check for are: interface "inet6" "scope" "global"
interfaces = Settings.config["Settings"]["interfaces_to_watch"]
if any(
all(x in logline for x in [interface, "inet6", "scope", "global"])
for interface
in interfaces
):
# Pull out the interface that was found in this log line, and then review ipv6 addresses
ipv6_int_str = logline.split("inet6")[0].split()[-1].lstrip().rstrip()
logging.debug("This line was seen that appears to be a new IPv6 assignment: \n{0}\n".format(
logline,
) + " Proceeding with reviewing IPv6 addresses."
)
review_and_update_ipv6_addresses(ipv6_int_str)
# print(f"I see you: {ipv6_int_str}")
def review_and_update_ipv6_addresses(interface: str) -> tuple:
"""
Reviews all global IPv6 addresses, and assigns a new IPv6 address with the correct hostbits (token) if a new prefix
was found. Removes old IPv6 addresses with obsolete prefixes so the OS no longer uses them as well.
This is accomplished by listing all IPv6 addresses, then finding the one with the largest lifetime.
The function assumes that the IPv6 address with the biggest lifetime has the valid global IPv6 prefix. In typical
situations, this will always be the case. However, in the event either ISP or router changes the router
advertisements to use a different valid lifetime value, this script may behave incorrectly. Also, I recommend your
network has IPv6 snooping setup so your server isn't vulnerable from picking up a bad prefix from a malicious host,
something you should do regardless if you run this script or not.
:param interface: interface that is being reviewed
:return: tuple (ipaddress.IPv6Interface, bool) (
Assigned IPv6 address with correct hostbits (token),
if the assignment was new
)
"""
class LINE_ITERATOR_MODES:
mode_checking_for_inet6_addr = 0
mode_checking_for_lifetime_values = 1
# List the IP addresses from the interface so that we can get their lifetimes
logging.debug("Getting ip address list.")
ip_addr_stdout = subprocess.run(
Settings._list_ip_command(interface=interface),
stdout=subprocess.PIPE,
).stdout
# Iterate through the lines of output from ip addr command to get inet6 addresses and information.
inet6s = {}
inet6 = ""
line_iterator_mode = LINE_ITERATOR_MODES.mode_checking_for_inet6_addr
logging.debug("Parsing line-by-line the output of the IP address list" +
" and pulling out inet6 addresses with their lifetimes.")
for line in ip_addr_stdout.decode().splitlines():
if line_iterator_mode == LINE_ITERATOR_MODES.mode_checking_for_inet6_addr:
# Standard output if the ip addr show command has IPv6 address lines start with " inet6"
if " inet6" in line:
inet6 = line.split()[1]
# found an inet6, change iterator mode to get the lifetime values
line_iterator_mode = LINE_ITERATOR_MODES.mode_checking_for_lifetime_values
elif line_iterator_mode == LINE_ITERATOR_MODES.mode_checking_for_lifetime_values:
# Standard output for lifetime is " valid_lft 145961sec preferred_lft 0sec"
inet6_netaddr = ipaddress.ip_interface(inet6)
is_global = inet6_netaddr.is_global
# For this script's sake, consider any address with no lifetime the same as if the lifetime is 0.
if line.split()[1] == "forever":
valid_lft = 0
preferred_lft = 0
else:
valid_lft = int(line.split()[1].replace("sec", ""))
preferred_lft = int(line.split()[3].replace("sec", ""))
# Add the IPv6 address to a dictionary keyed by ip with useful information in the value.
inet6s[inet6] = (inet6_netaddr, is_global, valid_lft, preferred_lft)
# Go back to checking for more inet6 addresses
line_iterator_mode = LINE_ITERATOR_MODES.mode_checking_for_inet6_addr
# Find the IPv6 address that is a global address and has the biggest valid lifetime
logging.debug("Here are the IPv6 addresses I see: {}".format(str(inet6s)))
current_valid_ipv6, current_valid_ipv6_info = max(
[(k, inet6s[k]) for k in inet6s.keys() if inet6s[k][1] is True],
key=lambda x: (inet6s[x[0]][2])
)
logging.debug("Here is the IPv6 address with the longest valid lifetime: {0}".format(str(current_valid_ipv6)))
# Calculate wanted ipv6 address with network bits
inet6_to_add_address = current_valid_ipv6_info[0].network.network_address + \
int(ipaddress.ip_interface(Settings.config["Settings"]["ipv6_token"][interface]))
assigned_ipv6_check = [
inet_tuple[0]
for inet_tuple
in inet6s.values()
if inet_tuple[0].ip == inet6_to_add_address
]
if bool(assigned_ipv6_check):
# The expected IPv6 address has been found. No further action needed.
logging.debug("Expected IPv6 address {0} already assigned on interface {1}.".format(
str(inet6_to_add_address),
interface,
))
return (assigned_ipv6_check[0], False)
else:
# Use the IPv6 address with the longest valid lifetime and get its prefix
logging.info("Expected IPv6 address {0} not found on interface {1}.".format(
str(inet6_to_add_address),
interface,
)+" It seems a new prefix was assigned! Proceeding with update logic.")
inet6_to_add_prefix = current_valid_ipv6_info[0].network
inet6_to_add_interface = ipaddress.ip_interface(str(inet6_to_add_address) + \
"/" + str(inet6_to_add_prefix.prefixlen))
inet6_to_delete = []
inet6_to_add = str(inet6_to_add_interface)
| |
<filename>scripts/maya/nwFenixCommitter/unit_tests/test_Controller.py
# -*- coding: utf-8 -*-
"""Unit tests for the Fenix Committer."""
from nwave.effects.tools.nwFXTDTools.PipelineHelper import PipelineHelper
import zefir
from zefir.settings import get_setting_value
from fenix4maya.settings import MOTION_BLUR_SAMPLES_SETTING
import pytest
import maya.cmds as mc
import maya.mel
import os
from nwave.effects.tools.nwFenixCommitter.Settings import Settings
class TestFenixCommitter(object):
def test_init(self, mvc):
"""Test tool initialization."""
model, _, controller = mvc
assert model.assets == {}
assert model.user == PipelineHelper.getCurrentUser()
assert controller.valid
def test_add_remove_assets(self, mvc, scene):
"""Test adding and removing assets."""
model, _, controller = mvc
controller.add_assets([])
# Test add empty
assert not len(model.assets)
controller.add_assets(scene.assets)
# Test add assets
assert len(model.assets) == len(scene.assets)
# Test add non fx sim asset
camera = PipelineHelper.getAsset(
'camera',
zefir.STAGES.CAMERA_ANIMATION_DATA,
'635', '0170'
)
controller.add_assets([camera])
assert len(model.assets) == len(scene.assets)
controller.remove_assets([])
# Test removing no assets
assert len(model.assets) == len(scene.assets)
controller.remove_assets([asset.name for asset in scene.assets[:1]])
# Test removing one asset
assert len(model.assets) == len(scene.assets[1:])
controller.remove_assets([asset.name for asset in scene.assets[1:]])
# Test removing all assets
assert len(model.assets) == 0
controller.remove_assets(scene.assets)
# Test removing non loaded assets
assert len(model.assets) == 0
def test_udpate_asset(self, mvc, scene):
"""Test updating asset data."""
model, _, controller = mvc
controller.add_assets(scene.assets)
name = scene.assets[0].name
asset = model.assets[name]
# Test wrong asset name failing silently
controller.update_assets(['test'], commit_on_farm=False)
controller.update_assets([name], commit_on_farm=False)
assert not asset.commit_on_farm
controller.update_assets([name], commit_on_farm=True)
assert asset.commit_on_farm
controller.update_assets([name], commit_to_alembic_anim=False)
assert not asset.commit_to_alembic_anim
controller.update_assets([name], commit_to_alembic_anim=True)
assert asset.commit_to_alembic_anim
controller.update_assets([name], can_commit_to_fx_cache=False)
assert not asset.can_commit_to_fx_cache
controller.update_assets([name], can_commit_to_fx_cache=True)
assert asset.can_commit_to_fx_cache
controller.update_assets([name], commit_to_fx_cache=False)
assert not asset.commit_to_fx_cache
controller.update_assets([name], commit_to_fx_cache=True)
assert asset.commit_to_fx_cache
controller.update_assets([name], generate_alembic_from_geos=False)
assert not asset.generate_alembic_from_geos
controller.update_assets([name], generate_alembic_from_geos=True)
assert asset.generate_alembic_from_geos
controller.update_assets([name], use_local_space_for_alembic=False)
assert not asset.use_local_space_for_alembic
controller.update_assets([name], use_local_space_for_alembic=True)
assert asset.use_local_space_for_alembic
commit_text = 'test'
controller.update_assets([name], commit_text=commit_text)
assert asset.commit_text == commit_text
commit_text = ''
controller.update_assets([name], commit_text=commit_text)
assert asset.commit_text == commit_text
# Test setting all
controller.update_assets(
[scene_asset.name for scene_asset in scene.assets],
can_commit_to_fx_cache=True
)
assert all(
asset.can_commit_to_fx_cache
for asset in model.assets.values()
)
controller.update_assets(
[scene_asset.name for scene_asset in scene.assets],
can_commit_to_fx_cache=False
)
assert all(
not asset.can_commit_to_fx_cache
for asset in model.assets.values()
)
def test_get_alembic_effect_meshes(self, mvc, scene):
"""Test getting meshes in effects group."""
_, _, controller = mvc
# Test add non fx sim asset
camera = PipelineHelper.getAsset(
'camera',
zefir.STAGES.CAMERA_ANIMATION_DATA,
'635', '0170'
)
with pytest.raises(ValueError):
controller.get_alembic_from_geo_meshes(camera)
# Test error when no effects node
scene.assets[0].import_from_reference()
effects_node = scene.assets[0].get_effects_node()
mc.delete(effects_node.longName())
with pytest.raises(ValueError):
controller.get_alembic_from_geo_meshes(scene.assets[0])
# Test reporting of shape deform
scene.assets[1].import_from_reference()
box = mc.polyCube()[0]
mc.rename(mc.listRelatives(box, shapes=True)[0], 'boxShapeDeformed')
effects_node = scene.assets[1].get_effects_node()
mc.parent(box, str(effects_node))
assert controller.get_alembic_from_geo_meshes(scene.assets[1]) is None
# Test no geo nodes
scene.assets[2].import_from_reference()
assert controller.get_alembic_from_geo_meshes(scene.assets[2]) is None
# Test geo nodes
effects_node = scene.assets[2].get_effects_node()
nodes = [mc.polyCube()[0], mc.polySphere()[0]]
mc.parent(nodes, str(effects_node))
assert controller.get_alembic_from_geo_meshes(scene.assets[2]) == [
mc.listRelatives(node, shapes=True, fullPath=True)[0]
for node in nodes
]
def test_get_alembic_effect_attributes(self, mvc):
"""Test getting mesh render attributes."""
_, _, controller = mvc
transform = mc.createNode('transform')
box = mc.polyCube()[0]
box = mc.parent(box, transform)[0]
shape = mc.listRelatives(box, shapes=True)[0]
# Test return type
attributes = controller.get_render_attributes([transform])[0]
assert isinstance(attributes, dict)
# Test no render attributes
assert attributes.keys() == []
assert attributes.values() == []
attributes = controller.get_render_attributes([shape])[0].keys()
# Test arnold attributes
assert all(
'arnold' in mc.attributeQuery(
attribute, node=shape, categories=True
)
for attribute in attributes
)
# Test no message type attributes
assert not any(
'message' in mc.getAttr('{}.{}'.format(shape, attribute), typ=True)
for attribute in attributes
)
# Test no shader
shaders = controller.get_shaders([transform])[0]
assert shaders == []
# Test shader
shaders = controller.get_shaders([shape])[0]
assert shaders == ['initialShadingGroup']
# Test no parent
parents = controller.get_parents([transform])[0]
assert parents is None
# Test parent with mesh
parents = controller.get_parents([shape])[0]
assert transform in parents
# Test parent with transform
parents = controller.get_parents([box])[0]
assert transform in parents
# Test visibility on
mc.setAttr('{}.visibility'.format(box), 1)
value = controller.get_visibility_values([shape])[0]
assert value
# Test visibility off
mc.setAttr('{}.visibility'.format(box), 0)
value = controller.get_visibility_values([shape])[0]
assert not value
# Test no anim curve
curves = controller.get_visibility_curves([shape])[0]
assert curves is None
# Create a anim curve
mc.setKeyframe(box, at='visibility', v=1)
curves = controller.get_visibility_curves([shape])[0]
assert curves
def test_export_alembic(self, mvc, tmpdir, mocker):
"""Test exporting geometries to alembic."""
model, _, controller = mvc
model.frame_in = 1
model.frame_out = 3
model.motion_blur_in = -.25
model.motion_blur_out = .25
mocker.spy(mc, 'AbcExport')
path = tmpdir.join('cube.abc')
cube = mc.polyCube()[0]
controller.export_alembic(str(path), [cube])
# Test function call
assert mc.AbcExport.called
# Test call keyword arguments
args = mc.AbcExport.call_args[1]
job = args['jobArg'][0]
assert '-uv' in job
frame_range = '-frameRange {} {}'.format(
model.frame_in - 1, model.frame_out + 1
)
assert frame_range in job
assert '-frameRelativeSample {}'.format(model.motion_blur_in) in job
assert '-frameRelativeSample {}'.format(model.motion_blur_out) in job
assert '-file {}'.format(path) in job
assert '-root {}'.format(cube) in job
def test_import_alembic(self, mvc, tmpdir, mocker):
"""Test importing geometries from alembic."""
model, _, controller = mvc
path = '/home/mathiasc/tmp/invalid.abc'
if os.path.exists(path):
os.remove(path)
# Test wrong path
with pytest.raises(RuntimeError):
controller.import_alembic(path)
path = tmpdir.join('cube.abc')
cube = mc.polyCube()[0]
model.frame_in = 1
model.frame_out = 3
model.motion_blur_in = -.25
model.motion_blur_out = .25
controller.export_alembic(str(path), [cube])
mocker.spy(mc, 'AbcImport')
objects = controller.import_alembic(str(path))
# Test import
assert mc.AbcImport.called
args = mc.AbcImport.call_args
# Test correct path in call arguments
assert str(path) in args[0]
# Test reparent flag in call keyword arguments
assert 'reparent' in args[1]
# Test retruned geo
assert len(objects) == 1
# Test long path
assert '|' in objects[0]
def test_set_alembic_effect_attributes(self, mvc):
"""Test settings render attributes on mesh."""
_, _, controller = mvc
transform = mc.createNode('transform')
shader = mc.shadingNode('lambert', asShader=True)
mc.select(cl=True)
shading_engine = mc.sets(renderable=True, noSurfaceShader=True)
mc.connectAttr(
'{}.outColor'.format(shader),
'{}.surfaceShader'.format(shading_engine)
)
box = mc.polyCube()[0]
box = mc.parent(box, transform)[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
mc.setAttr('{}.visibility'.format(box), False)
mc.sets(shape, e=True, forceElement=shading_engine)
shape = mc.listRelatives(box, shapes=True)[0]
attribute_data = controller.get_alembic_effect_attributes([shape])
# Remove old geometries and create new ones on which to apply attrs
mc.delete(box)
box = mc.polyCube()[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
controller.set_alembic_effect_attributes([shape], attribute_data)
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
# Test proper parenting
assert mc.listRelatives(box, p=True, fullPath=True)[0] == \
attribute_data['parents'][0]
# Test visibility value
assert mc.getAttr('{}.visibility'.format(box)) == \
attribute_data['visibility_values'][0]
# Test assigning shader
assert mc.listConnections('{}.instObjGroups[0]'.format(shape)) == \
attribute_data['shaders'][0]
mc.delete(box)
box = mc.polyCube()[0]
box = mc.parent(box, transform)[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
# TODO test render attributes settings
mc.setAttr('{}.aiOpaque'.format(shape), 0)
mc.setKeyframe(box, at='visibility', v=1)
attribute_data = controller.get_alembic_effect_attributes([shape])
# Remove old geometries and create new ones on which to apply attrs
mc.delete(box)
box = mc.polyCube()[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
controller.set_alembic_effect_attributes([shape], attribute_data)
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
assert attribute_data['visibility_curves'][0] in \
mc.listConnections('{}.visibility'.format(box))
assert 'aiOpaque' in attribute_data['render_attrs'][0]
assert attribute_data['render_attrs'][0]['aiOpaque'] == 0
def test_clean_geometries(self, mvc):
"""Test cleaning geometries."""
_, _, controller = mvc
transform = mc.createNode('transform')
box = mc.polyCube()[0]
box = mc.parent(box, transform)[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
controller.clean_geometries([shape])
# Test in mesh connection
# Test blendshape connection
connection = mc.listConnections('{}.inMesh'.format(shape))
assert connection
assert 'blendShape' in mc.nodeType(connection[0])
# Test renaming
previous_shape = shape.split('|')[-1]
assert mc.objExists('{}_OM'.format(box))
assert mc.objExists('{}_OM'.format(previous_shape))
# Test parenting under original node parent
assert mc.listRelatives(
mc.listRelatives(shape, parent=True, fullPath=True)[0],
parent=True, fullPath=True
)[0] == '|{}'.format(transform)
def test_cache_geometries(self, mvc, mocker):
"""Test geocaching geometries."""
model, _, controller = mvc
mocker.spy(controller, 'cache_geometries')
mocker.spy(maya.mel, 'eval')
model.frame_in = 10
model.frame_out = 20
box = mc.polyCube()[0]
shape = mc.listRelatives(box, shapes=True, fullPath=True)[0]
controller.cache_geometries([shape])
# Test geo cleaned before making cache
assert controller.cache_geometries.called
assert controller.cache_geometries.call_args[0] == ([shape],)
# Test geocache method
assert maya.mel.eval.called
args = maya.mel.eval.call_args[0][0]
assert 'cacheFile' in args
assert '-directory "{}"'.format(Settings.TEMP_DIR) in args
assert '-startTime {}'.format(model.frame_in - 1)
assert '-endTime {}'.format(model.frame_out + 1)
assert '-worldSpace'
assert '-singleCache'
assert '-attachFile'
def test_generate_effect_alembic(self, mvc, scene, mocker):
"""Test generating clean geometries via an alembic cache."""
model, view, controller = mvc
seq, shot = 999, 10
mocker.patch.object(
PipelineHelper, 'getCurrentSeqShot',
mocker.MagicMock(return_value=(seq, shot))
)
frame_in, frame_out = 10, 20
mocker.patch.object(
PipelineHelper, 'getShotFrameRange',
mocker.MagicMock(return_value=(frame_in, frame_out))
)
existing_alembic_mock = mocker.MagicMock(return_value="Overwrite")
mocker.patch.object(view, 'dialog', existing_alembic_mock)
mocker.spy(controller, 'cache_geometries')
mocker.spy(controller, 'export_alembic')
mocker.spy(controller, 'update_geometries')
mocker.spy(controller, 'remove_alembic_file')
controller.add_assets([scene.assets[0]])
asset = model.assets[scene.assets[0].name]
controller.generate_effect_alembic(asset)
# Test cache command frame range
assert model.frame_in == frame_in
assert model.frame_out == frame_out
mo_blur_in, mo_blur_out = get_setting_value(
MOTION_BLUR_SAMPLES_SETTING,
asset.asset.get_maya_commit()
)
assert model.motion_blur_in == mo_blur_in
assert model.motion_blur_out == mo_blur_out
# test no geo in in effects group
assert not controller.export_alembic.called
effects_node = PipelineHelper.getAssetEffectsNode(scene.assets[0])
box = mc.polyCube()[0]
box = mc.parent(box, effects_node)
controller.generate_effect_alembic(asset)
# test alembic export
assert controller.export_alembic.called
args = controller.export_alembic.call_args[0]
assert len(args) == 3
path, nodes, local_space = args
# Test output path
assert PipelineHelper.getCachePath(seq, shot) in path
assert 'alembic' in path
assert asset.asset.name[:asset.asset.name.find(':')] in path
# Test geo nodes
assert mc.listRelatives(box, shapes=True, fullPath=True)[0] in nodes
# Test local space
assert asset.use_local_space_for_alembic == local_space
# Test alembic import
| |
<reponame>mzymzy/dgl
"""This file contains NodeFlow samplers."""
import sys
import numpy as np
import threading
from numbers import Integral
import traceback
from ..._ffi.function import _init_api
from ..._ffi.object import register_object, ObjectBase
from ..._ffi.ndarray import empty
from ... import utils
from ...nodeflow import NodeFlow
from ... import backend as F
from ...graph import DGLGraph
from ...base import NID, EID
try:
import Queue as queue
except ImportError:
import queue
__all__ = ['NeighborSampler', 'LayerSampler', 'EdgeSampler','ChunkSampler']
class SamplerIter(object):
def __init__(self, sampler):
super(SamplerIter, self).__init__()
self._sampler = sampler
self._batches = []
self._batch_idx = 0
def prefetch(self):
batches = self._sampler.fetch(self._batch_idx)
self._batches.extend(batches)
self._batch_idx += len(batches)
def __next__(self):
if len(self._batches) == 0:
self.prefetch()
if len(self._batches) == 0:
raise StopIteration
return self._batches.pop(0)
class PrefetchingWrapper(object):
"""Internal shared prefetcher logic. It can be sub-classed by a Thread-based implementation
or Process-based implementation."""
_dataq = None # Data queue transmits prefetched elements
_controlq = None # Control queue to instruct thread / process shutdown
_errorq = None # Error queue to transmit exceptions from worker to master
_checked_start = False # True once startup has been checkd by _check_start
def __init__(self, sampler_iter, num_prefetch):
super(PrefetchingWrapper, self).__init__()
self.sampler_iter = sampler_iter
assert num_prefetch > 0, 'Unbounded Prefetcher is unsupported.'
self.num_prefetch = num_prefetch
def run(self):
"""Method representing the process activity."""
# Startup - Master waits for this
try:
loader_iter = self.sampler_iter
self._errorq.put(None)
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
self._errorq.put((e, tb))
while True:
try: # Check control queue
c = self._controlq.get(False)
if c is None:
break
else:
raise RuntimeError('Got unexpected control code {}'.format(repr(c)))
except queue.Empty:
pass
except RuntimeError as e:
tb = traceback.format_exc()
self._errorq.put((e, tb))
self._dataq.put(None)
try:
data = next(loader_iter)
error = None
except Exception as e: # pylint: disable=broad-except
tb = traceback.format_exc()
error = (e, tb)
data = None
finally:
self._errorq.put(error)
self._dataq.put(data)
def __next__(self):
next_item = self._dataq.get()
next_error = self._errorq.get()
if next_error is None:
return next_item
else:
self._controlq.put(None)
if isinstance(next_error[0], StopIteration):
raise StopIteration
else:
return self._reraise(*next_error)
def _reraise(self, e, tb):
print('Reraising exception from Prefetcher', file=sys.stderr)
print(tb, file=sys.stderr)
raise e
def _check_start(self):
assert not self._checked_start
self._checked_start = True
next_error = self._errorq.get(block=True)
if next_error is not None:
self._reraise(*next_error)
def next(self):
return self.__next__()
class ThreadPrefetchingWrapper(PrefetchingWrapper, threading.Thread):
"""Internal threaded prefetcher."""
def __init__(self, *args, **kwargs):
super(ThreadPrefetchingWrapper, self).__init__(*args, **kwargs)
self._dataq = queue.Queue(self.num_prefetch)
self._controlq = queue.Queue()
self._errorq = queue.Queue(self.num_prefetch)
self.daemon = True
self.start()
self._check_start()
class NodeFlowSampler(object):
'''Base class that generates NodeFlows from a graph.
Class properties
----------------
immutable_only : bool
Whether the sampler only works on immutable graphs.
Subclasses can override this property.
'''
immutable_only = False
def __init__(
self,
g,
batch_size,
seed_nodes,
shuffle,
num_prefetch,
prefetching_wrapper_class):
self._g = g
if self.immutable_only and not g._graph.is_readonly():
raise NotImplementedError("This loader only support read-only graphs.")
self._batch_size = int(batch_size)
if seed_nodes is None:
self._seed_nodes = F.arange(0, g.number_of_nodes())
else:
self._seed_nodes = seed_nodes
if shuffle:
self._seed_nodes = F.rand_shuffle(self._seed_nodes)
self._seed_nodes = utils.toindex(self._seed_nodes)
if num_prefetch:
self._prefetching_wrapper_class = prefetching_wrapper_class
self._num_prefetch = num_prefetch
def fetch(self, current_nodeflow_index):
'''
Method that returns the next "bunch" of NodeFlows.
Each worker will return a single NodeFlow constructed from a single
batch.
Subclasses of NodeFlowSampler should override this method.
Parameters
----------
current_nodeflow_index : int
How many NodeFlows the sampler has generated so far.
Returns
-------
list[NodeFlow]
Next "bunch" of nodeflows to be processed.
'''
raise NotImplementedError
def __iter__(self):
it = SamplerIter(self)
if self._num_prefetch:
return self._prefetching_wrapper_class(it, self._num_prefetch)
else:
return it
@property
def g(self):
return self._g
@property
def seed_nodes(self):
return self._seed_nodes
@property
def batch_size(self):
return self._batch_size
class NeighborSampler(NodeFlowSampler):
r'''Create a sampler that samples neighborhood.
It returns a generator of :class:`~dgl.NodeFlow`. This can be viewed as
an analogy of *mini-batch training* on graph data -- the given graph represents
the whole dataset and the returned generator produces mini-batches (in the form
of :class:`~dgl.NodeFlow` objects).
A NodeFlow grows from sampled nodes. It first samples a set of nodes from the given
``seed_nodes`` (or all the nodes if not given), then samples their neighbors
and extracts the subgraph. If the number of hops is :math:`k(>1)`, the process is repeated
recursively, with the neighbor nodes just sampled become the new seed nodes.
The result is a graph we defined as :class:`~dgl.NodeFlow` that contains :math:`k+1`
layers. The last layer is the initial seed nodes. The sampled neighbor nodes in
layer :math:`i+1` are in layer :math:`i`. All the edges are from nodes
in layer :math:`i` to layer :math:`i+1`.
.. image:: https://data.dgl.ai/tutorial/sampling/NodeFlow.png
As an analogy to mini-batch training, the ``batch_size`` here is equal to the number
of the initial seed nodes (number of nodes in the last layer).
The number of nodeflow objects (the number of batches) is calculated by
``len(seed_nodes) // batch_size`` (if ``seed_nodes`` is None, then it is equal
to the set of all nodes in the graph).
Note: NeighborSampler currently only supprts immutable graphs.
Parameters
----------
g : DGLGraph
The DGLGraph where we sample NodeFlows.
batch_size : int
The batch size (i.e, the number of nodes in the last layer)
expand_factor : int
The number of neighbors sampled from the neighbor list of a vertex.
Note that no matter how large the expand_factor, the max number of sampled neighbors
is the neighborhood size.
num_hops : int, optional
The number of hops to sample (i.e, the number of layers in the NodeFlow).
Default: 1
neighbor_type: str, optional
Indicates the neighbors on different types of edges.
* "in": the neighbors on the in-edges.
* "out": the neighbors on the out-edges.
Default: "in"
transition_prob : str, optional
A 1D tensor containing the (unnormalized) transition probability.
The probability of a node v being sampled from a neighbor u is proportional to
the edge weight, normalized by the sum over edge weights grouping by the
destination node.
In other words, given a node v, the probability of node u and edge (u, v)
included in the NodeFlow layer preceding that of v is given by:
.. math::
p(u, v) = \frac{w_{u, v}}{\sum_{u', (u', v) \in E} w_{u', v}}
If neighbor type is "out", then the probability is instead normalized by the sum
grouping by source node:
.. math::
p(v, u) = \frac{w_{v, u}}{\sum_{u', (v, u') \in E} w_{v, u'}}
If a str is given, the edge weight will be loaded from the edge feature column with
the same name. The feature column must be a scalar column in this case.
Default: None
seed_nodes : Tensor, optional
A 1D tensor list of nodes where we sample NodeFlows from.
If None, the seed vertices are all the vertices in the graph.
Default: None
shuffle : bool, optional
Indicates the sampled NodeFlows are shuffled. Default: False
num_workers : int, optional
The number of worker threads that sample NodeFlows in parallel. Default: 1
prefetch : bool, optional
If true, prefetch the samples in the next batch. Default: False
add_self_loop : bool, optional
If true, add self loop to the sampled NodeFlow.
The edge IDs of the self loop edges are -1. Default: False
'''
immutable_only = True
def __init__(
self,
g,
batch_size,
expand_factor=None,
num_hops=1,
neighbor_type='in',
transition_prob=None,
seed_nodes=None,
shuffle=False,
num_workers=1,
prefetch=False,
add_self_loop=False):
super(NeighborSampler, self).__init__(
g, batch_size, seed_nodes, shuffle, num_workers * 2 if prefetch else 0,
ThreadPrefetchingWrapper)
assert g.is_readonly, "NeighborSampler doesn't support mutable graphs. " + \
"Please turn it into an immutable graph with DGLGraph.readonly"
assert isinstance(expand_factor, Integral), 'non-int expand_factor not supported'
self._expand_factor = int(expand_factor)
self._num_hops = int(num_hops)
self._add_self_loop = add_self_loop
self._num_workers = int(num_workers)
self._neighbor_type = neighbor_type
self._transition_prob = transition_prob
def fetch(self, current_nodeflow_index):
if self._transition_prob is None:
prob = F.tensor([], F.float32)
elif isinstance(self._transition_prob, str):
prob = self.g.edata[self._transition_prob]
else:
prob = self._transition_prob
nfobjs = _CAPI_NeighborSampling(
self.g._graph,
self.seed_nodes.todgltensor(),
current_nodeflow_index, # start batch id
self.batch_size, # batch size
self._num_workers, # num batches
self._expand_factor,
self._num_hops,
self._neighbor_type,
self._add_self_loop,
F.zerocopy_to_dgl_ndarray(prob))
nflows = [NodeFlow(self.g, obj) for obj in nfobjs]
return nflows
class LayerSampler(NodeFlowSampler):
'''Create a sampler that samples neighborhood.
This creates a NodeFlow loader that samples subgraphs from the input graph
with layer-wise sampling. This sampling method is implemented in C and can perform
sampling very efficiently.
The NodeFlow loader returns a list of NodeFlows.
The size of the NodeFlow list is the number of workers.
Note: LayerSampler currently only supprts immutable graphs.
| |
)
return display_config
def get(self, request, *args, **kwargs):
from samplesheets.plugins import get_irods_content
timeline = get_backend_api('timeline_backend')
irods_backend = get_backend_api('omics_irods', conn=False)
study = Study.objects.filter(sodar_uuid=self.kwargs['study']).first()
if not study:
return Response(
{
'render_error': 'Study not found with UUID "{}", '
'unable to render'.format(self.kwargs['study'])
},
status=404,
)
inv = study.investigation
project = inv.project
# Return extra edit mode data
edit = bool(request.GET.get('edit'))
allow_editing = app_settings.get_app_setting(
APP_NAME, 'allow_editing', project=project
)
if edit and not allow_editing:
return Response(
{
'render_error': 'Editing not allowed in the project, '
'unable to render'
},
status=403,
)
ret_data = {'study': {'display_name': study.get_display_name()}}
tb = SampleSheetTableBuilder()
try:
ret_data['tables'] = tb.build_study_tables(
study, edit=edit, ui=True
)
except Exception as ex:
# Raise if we are in debug mode
if settings.DEBUG:
raise ex
# TODO: Log error
ret_data['render_error'] = str(ex)
return Response(ret_data, status=200)
# Get iRODS content if NOT editing and collections have been created
if not edit:
ret_data = get_irods_content(inv, study, irods_backend, ret_data)
# Get/build sheet config
sheet_config = conf_api.get_sheet_config(inv)
# Get/build display config
if request.user and request.user.is_authenticated:
display_config = self._get_display_config(
inv, request.user, sheet_config
)
ret_data['display_config'] = display_config['studies'][
str(study.sodar_uuid)
]
# Set up editing
if edit:
ontology_backend = get_backend_api('ontologyaccess_backend')
# Get study config
ret_data['study_config'] = sheet_config['studies'][
str(study.sodar_uuid)
]
# Set up study edit context
ret_data['edit_context'] = {
'sodar_ontologies': ontology_backend.get_obo_dict(key='name')
if ontology_backend
else {},
'samples': {},
'protocols': [],
}
# Add sample info
s_assays = {}
for assay in study.assays.all().order_by('pk'):
a_uuid = str(assay.sodar_uuid)
for n in [a[0] for a in assay.arcs]:
if '-sample-' in n:
if n not in s_assays:
s_assays[n] = []
if a_uuid not in s_assays[n]:
s_assays[n].append(a_uuid)
for sample in GenericMaterial.objects.filter(
study=study, item_type='SAMPLE'
).order_by('name'):
ret_data['edit_context']['samples'][str(sample.sodar_uuid)] = {
'name': sample.name,
'assays': s_assays[sample.unique_name]
if sample.unique_name in s_assays
else [],
}
# Add Protocol info
for protocol in Protocol.objects.filter(study=study).order_by(
'name'
):
ret_data['edit_context']['protocols'].append(
{'uuid': str(protocol.sodar_uuid), 'name': protocol.name}
)
if timeline:
timeline.add_event(
project=project,
app_name=APP_NAME,
user=request.user,
event_name='sheet_edit_start',
description='start editing sheets',
status_type='OK',
)
return Response(ret_data, status=200)
class StudyLinksAjaxView(SODARBaseProjectAjaxView):
"""View to retrieve data for shortcut links from study apps"""
# TODO: Also do this for assay apps?
permission_required = 'samplesheets.view_sheet'
def get(self, request, *args, **kwargs):
study = Study.objects.filter(sodar_uuid=self.kwargs['study']).first()
study_plugin = study.get_plugin()
if not study_plugin:
return Response(
{'detail': 'Plugin not found for study'}, status=404
)
ret_data = {'study': {'display_name': study.get_display_name()}}
tb = SampleSheetTableBuilder()
try:
study_tables = tb.build_study_tables(study, ui=False)
except Exception as ex:
# TODO: Log error
ret_data['render_error'] = str(ex)
return Response(ret_data, status=200)
ret_data = study_plugin.get_shortcut_links(
study, study_tables, **request.GET
)
return Response(ret_data, status=200)
class SheetWarningsAjaxView(SODARBaseProjectAjaxView):
"""View to retrieve parser warnings for sample sheets"""
permission_required = 'samplesheets.view_sheet'
def get(self, request, *args, **kwargs):
inv = Investigation.objects.filter(project=self.get_project()).first()
if not inv:
return Response(
{'detail': 'Investigation not found for project'}, status=404
)
logger.debug(
'Parser Warnings: {}'.format(json.dumps(inv.parser_warnings))
)
return Response({'warnings': inv.parser_warnings}, status=200)
class SheetCellEditAjaxView(BaseSheetEditAjaxView):
"""Ajax view to edit sample sheet cells"""
@transaction.atomic
def _update_cell(self, node_obj, cell, save=False):
"""
Update a single cell in an object.
:param node_obj: GenericMaterial or Process object
:param cell: Cell update data from the client (dict)
:param save: If True, save object after successful call (boolean)
:return: String
:raise: SheetEditException if the operation fails.
"""
ok_msg = None
logger.debug(
'Editing {} "{}" ({})'.format(
node_obj.__class__.__name__,
node_obj.unique_name,
node_obj.sodar_uuid,
)
)
# TODO: Provide the original header as one string instead
header_type = cell['header_type']
header_name = cell['header_name']
# Plain fields
if not header_type and header_name.lower() in EDIT_FIELD_MAP:
attr_name = EDIT_FIELD_MAP[header_name.lower()]
attr = getattr(node_obj, attr_name)
if isinstance(attr, str):
setattr(node_obj, attr_name, cell['value'])
elif isinstance(attr, dict):
attr['name'] = cell['value']
# TODO: Set accession and ontology once editing is allowed
ok_msg = 'Edited field: {}'.format(attr_name)
# Name field (special case)
elif header_type == 'name':
if len(cell['value']) == 0 and cell.get('item_type') != 'DATA':
self._raise_ex('Empty name not allowed for non-data node')
node_obj.name = cell['value']
# TODO: Update unique name here if needed
ok_msg = 'Edited node name: {}'.format(cell['value'])
# Process name and name type (special case)
elif header_type == 'process_name':
node_obj.name = cell['value']
if cell['header_name'] in th.PROCESS_NAME_HEADERS:
node_obj.name_type = cell['header_name']
ok_msg = 'Edited process name: {}{}'.format(
cell['value'],
' ({})'.format(cell['header_name'])
if cell['header_name'] in th.PROCESS_NAME_HEADERS
else '',
)
# Protocol field (special case)
elif header_type == 'protocol':
protocol = Protocol.objects.filter(
sodar_uuid=cell['uuid_ref']
).first()
if not protocol:
self._raise_ex(
'Protocol not found: "{}" ({})'.format(
cell['value'], cell['uuid_ref']
)
)
node_obj.protocol = protocol
ok_msg = 'Edited protocol ref: "{}" ({})'.format(
cell['value'], cell['uuid_ref']
)
# Performer (special case)
elif header_type == 'performer':
node_obj.performer = cell['value']
# Perform date (special case)
elif header_type == 'perform_date':
if cell['value']:
try:
node_obj.perform_date = dt.strptime(
cell['value'], '%Y-%m-%d'
)
except ValueError as ex:
self._raise_ex(ex)
else:
node_obj.perform_date = None
# Extract label (special case)
elif header_type == 'extract_label':
node_obj.extract_label = cell['value']
# JSON Attributes
elif header_type in MODEL_JSON_ATTRS:
attr = getattr(node_obj, header_type)
# TODO: Is this actually a thing nowadays?
if isinstance(attr[header_name], str):
attr[header_name] = cell['value']
else:
attr[header_name]['value'] = self._get_attr_value(
node_obj, cell, header_name, header_type
)
# TODO: Support ontology ref in unit
if node_obj.has_ontology_unit(
header_name, header_type
) and isinstance(attr[header_name]['unit'], dict):
attr[header_name]['unit']['name'] = cell.get('unit')
elif node_obj.has_unit(header_name, header_type):
attr[header_name]['unit'] = cell.get('unit')
ok_msg = 'Edited JSON attribute: {}[{}]'.format(
header_type, header_name
)
else:
self._raise_ex(
'Editing not implemented '
'(header_type={}; header_name={})'.format(
header_type, header_name
)
)
if save:
node_obj.save()
if ok_msg:
logger.debug(ok_msg)
return ok_msg
def post(self, request, *args, **kwargs):
inv = Investigation.objects.filter(
project=self.get_project(), active=True
).first()
updated_cells = request.data.get('updated_cells', [])
for cell in updated_cells:
logger.debug('Cell update: {}'.format(cell))
node_obj = get_node_obj(sodar_uuid=cell['uuid'])
# TODO: Make sure given object actually belongs in project etc.
if not node_obj:
err_msg = 'Object not found: {} ({})'.format(
cell['uuid'], cell['obj_cls']
)
logger.error(err_msg)
# TODO: Return list of errors when processing in batch
return Response({'detail': err_msg}, status=500)
# Update cell, save immediately (now we are only editing one cell)
try:
self._update_cell(node_obj, cell, save=True)
except self.SheetEditException as ex:
return Response({'detail': str(ex)}, status=500)
# Update investigation ontology refs
if updated_cells:
try:
self._update_ontology_refs(
inv, self._get_ontology_names(cells=updated_cells)
)
except Exception as ex:
return Response({'detail': str(ex)}, status=500)
# TODO: Log edits in timeline here, once saving in bulk
return Response(self.ok_data, status=200)
class SheetRowInsertAjaxView(BaseSheetEditAjaxView):
"""Ajax view for inserting rows into sample sheets"""
@classmethod
def _get_name(cls, node):
"""
Return non-unique name for a node retrieved from the editor for a new
row, or None if the name does not exist.
:param node: Dict
:return: String or None
"""
if node['cells'][0]['obj_cls'] == 'Process':
for cell in node['cells']:
if cell['header_type'] == 'process_name':
return cell['value']
else: # Material
return node['cells'][0]['value']
@classmethod
def _add_node_attr(cls, node_obj, cell):
"""
Add common node attribute from cell in a new row node.
:param node_obj: GenericMaterial or Process
:param cell: Dict
"""
header_name = cell['header_name']
header_type = cell['header_type']
if header_type in MODEL_JSON_ATTRS:
attr = getattr(node_obj, header_type)
# Check if we have ontology refs and alter value
attr[header_name] = {
'value': cls._get_attr_value(
node_obj, cell, header_name, header_type
)
}
# TODO: Support ontology ref in unit for real
if node_obj.has_ontology_unit(header_name, header_type):
attr[header_name]['unit'] = {
'name': cell.get('unit'),
'ontology_name': None,
'accession': None,
}
elif (
node_obj.has_unit(header_name, header_type)
and cell.get('unit') != ''
):
attr[header_name]['unit'] = cell.get('unit')
else:
attr[header_name]['unit'] = None
logger.debug(
'Set {}: {} = {}'.format(
header_type, header_name, attr[header_name]
)
)
elif header_type == 'performer' and cell['value']:
node_obj.performer = cell['value']
logger.debug('Set performer: {}'.format(node_obj.performer))
elif header_type == 'perform_date' and cell['value']:
node_obj.perform_date = dt.strptime(cell['value'], '%Y-%m-%d')
logger.debug('Set perform date: {}'.format(cell['value']))
elif header_type == 'extract_label':
node_obj.extract_label = cell['value']
@classmethod
def _collapse_process(cls, row_nodes, node, node_idx, comp_table, node_obj):
"""
Collapse process into an existing one.
:param row_nodes: List of dicts from editor UI
:param node: Dict from editor UI
:param comp_table: Study/assay table generated by
SampleSheetTableBuilder (dict)
:param node_obj: Unsaved Process object
:return: UUID of collapsed process (String or None)
"""
# First get the UUIDs of existing nodes in the current row
prev_new_uuid = None
next_new_uuid = None
iter_idx = 0
while iter_idx < node_idx:
if cls._get_name(row_nodes[iter_idx]):
prev_new_uuid = row_nodes[iter_idx]['cells'][0].get('uuid')
iter_idx += 1
if not prev_new_uuid:
logger.debug(
'Collapse: Previous named node in current row not found'
)
return None
iter_idx = node_idx + 1
while not next_new_uuid and iter_idx < len(row_nodes):
if cls._get_name(row_nodes[iter_idx]):
next_new_uuid = row_nodes[iter_idx]['cells'][0].get('uuid')
iter_idx += 1
if not next_new_uuid:
logger.debug('Collapse: Next named node in current row not found')
return None
# HACK: | |
<filename>lettuce/moments.py
"""
Moments and cumulants of the distribution function.
"""
import warnings
import torch
import lettuce
from lettuce.util import LettuceException, InefficientCodeWarning, get_subclasses, ExperimentalWarning
from lettuce.stencils import Stencil, D1Q3, D2Q9, D3Q27
import numpy as np
__all__ = [
"moment_tensor", "get_default_moment_transform", "Moments", "Transform", "D1Q3Transform",
"D2Q9Lallemand", "D2Q9Dellar", "D3Q27Hermite"
]
_ALL_STENCILS = get_subclasses(Stencil, module=lettuce)
def moment_tensor(e, multiindex):
if isinstance(e, torch.Tensor):
return torch.prod(torch.pow(e, multiindex[..., None, :]), dim=-1)
else:
return np.prod(np.power(e, multiindex[..., None, :]), axis=-1)
def get_default_moment_transform(lattice):
if lattice.stencil == D1Q3:
return D1Q3Transform(lattice)
if lattice.stencil == D2Q9:
return D2Q9Lallemand(lattice)
else:
raise LettuceException(f"No default moment transform for lattice {lattice}.")
class Moments:
def __init__(self, lattice):
self.rho = moment_tensor(lattice.e, lattice.convert_to_tensor(np.zeros(lattice.D)))
self.j = moment_tensor(lattice.e, lattice.convert_to_tensor(np.eye(lattice.D)))
# ... TODO ...
class Transform:
"""Base class that defines the signature for all moment (and cumulant) transforms.
"""
def __init__(self, lattice, names=None):
self.lattice = lattice
self.names = [f"m{i}" for i in range(lattice.Q)] if names is None else names
def __getitem__(self, moment_names):
if not isinstance(moment_names, tuple):
moment_names = [moment_names]
return [self.names.index(name) for name in moment_names]
def transform(self, f):
return f
def inverse_transform(self, m):
return m
def equilibrium(self, m):
"""A very inefficient and basic implementation of the equilibrium moments.
"""
warnings.warn(
"Transform.equilibrium is a poor man's implementation of the moment equilibrium."
"Please consider implementing the equilibrium moments for your transform by hand.",
InefficientCodeWarning
)
f = self.inverse_transform(m)
feq = self.lattice.equilibrium(self.lattice.rho(f), self.lattice.u(f))
return self.transform(feq)
class D1Q3Transform(Transform):
matrix = np.array([
[1, 1, 1],
[0, 1, -1],
[0, 1, 1]
])
inverse = np.array([
[1, 0, -1],
[0, 1 / 2, 1 / 2],
[0, -1 / 2, 1 / 2]
])
names = ["rho", "j", "e"]
supported_stencils = [D1Q3]
def __init__(self, lattice):
super(D1Q3Transform, self).__init__(lattice, self.names)
self.matrix = self.lattice.convert_to_tensor(self.matrix)
self.inverse = self.lattice.convert_to_tensor(self.inverse)
def transform(self, f):
return self.lattice.mv(self.matrix, f)
def inverse_transform(self, m):
return self.lattice.mv(self.inverse, m)
# def equilibrium(self, m):
# # TODO
# raise NotImplementedError
class D2Q9Dellar(Transform):
matrix = np.array(
[[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, -1, 0, 1, -1, -1, 1],
[0, 0, 1, 0, -1, 1, 1, -1, -1],
[-3 / 2, 3, -3 / 2, 3, -3 / 2, 3, 3, 3, 3],
[0, 0, 0, 0, 0, 9, -9, 9, -9],
[-3 / 2, -3 / 2, 3, -3 / 2, 3, 3, 3, 3, 3],
[1, -2, -2, -2, -2, 4, 4, 4, 4],
[0, -2, 0, 2, 0, 4, -4, -4, 4],
[0, 0, -2, 0, 2, 4, 4, -4, -4]]
)
inverse = np.array(
[[4 / 9, 0, 0, -4 / 27, 0, -4 / 27, 1 / 9, 0, 0],
[1 / 9, 1 / 3, 0, 2 / 27, 0, -1 / 27, -1 / 18, -1 / 12, 0],
[1 / 9, 0, 1 / 3, -1 / 27, 0, 2 / 27, -1 / 18, 0, -1 / 12],
[1 / 9, -1 / 3, 0, 2 / 27, 0, -1 / 27, -1 / 18, 1 / 12, 0],
[1 / 9, 0, -1 / 3, -1 / 27, 0, 2 / 27, -1 / 18, 0, 1 / 12],
[1 / 36, 1 / 12, 1 / 12, 1 / 54, 1 / 36, 1 / 54, 1 / 36, 1 / 24, 1 / 24],
[1 / 36, -1 / 12, 1 / 12, 1 / 54, -1 / 36, 1 / 54, 1 / 36, -1 / 24, 1 / 24],
[1 / 36, -1 / 12, -1 / 12, 1 / 54, 1 / 36, 1 / 54, 1 / 36, -1 / 24, -1 / 24],
[1 / 36, 1 / 12, -1 / 12, 1 / 54, -1 / 36, 1 / 54, 1 / 36, 1 / 24, -1 / 24]]
)
names = ['rho', 'jx', 'jy', 'Pi_xx', 'Pi_xy', 'PI_yy', 'N', 'Jx', 'Jy']
supported_stencils = [D2Q9]
def __init__(self, lattice):
super(D2Q9Dellar, self).__init__(
lattice, self.names
)
self.matrix = self.lattice.convert_to_tensor(self.matrix)
self.inverse = self.lattice.convert_to_tensor(self.inverse)
def transform(self, f):
return self.lattice.mv(self.matrix, f)
def inverse_transform(self, m):
return self.lattice.mv(self.inverse, m)
def equilibrium(self, m):
warnings.warn("I am not 100% sure if this equilibrium is correct.", ExperimentalWarning)
meq = torch.zeros_like(m)
rho = m[0]
jx = m[1]
jy = m[2]
Pi_xx = jx * jx / rho * 9 / 2
Pi_xy = jx * jy / rho * 9
Pi_yy = jy * jy / rho * 9 / 2
meq[0] = rho
meq[1] = jx
meq[2] = jy
meq[3] = Pi_xx
meq[4] = Pi_xy
meq[5] = Pi_yy
return meq
class D2Q9Lallemand(Transform):
matrix = np.array(
[[1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 0, -1, 0, 1, -1, -1, 1],
[0, 0, 1, 0, -1, 1, 1, -1, -1],
[0, 1, -1, 1, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, -1, 1, -1],
[-4, -1, -1, -1, -1, 2, 2, 2, 2],
[0, -2, 0, 2, 0, 1, -1, -1, 1],
[0, 0, -2, 0, 2, 1, 1, -1, -1],
[4, -2, -2, -2, -2, 1, 1, 1, 1]]
)
inverse = np.array(
[[1 / 9, 0, 0, 0, 0, -1 / 9, 0, 0, 1 / 9],
[1 / 9, 1 / 6, 0, 1 / 4, 0, -1 / 36, -1 / 6, 0, -1 / 18],
[1 / 9, 0, 1 / 6, -1 / 4, 0, -1 / 36, 0, -1 / 6, -1 / 18],
[1 / 9, -1 / 6, 0, 1 / 4, 0, -1 / 36, 1 / 6, 0, -1 / 18],
[1 / 9, 0, -1 / 6, -1 / 4, 0, -1 / 36, 0, 1 / 6, -1 / 18],
[1 / 9, 1 / 6, 1 / 6, 0, 1 / 4, 1 / 18, 1 / 12, 1 / 12, 1 / 36],
[1 / 9, -1 / 6, 1 / 6, 0, -1 / 4, 1 / 18, -1 / 12, 1 / 12, 1 / 36],
[1 / 9, -1 / 6, -1 / 6, 0, 1 / 4, 1 / 18, -1 / 12, -1 / 12, 1 / 36],
[1 / 9, 1 / 6, -1 / 6, 0, -1 / 4, 1 / 18, 1 / 12, -1 / 12, 1 / 36]]
)
names = ['rho', 'jx', 'jy', 'pxx', 'pxy', 'e', 'qx', 'qy', 'eps']
supported_stencils = [D2Q9]
def __init__(self, lattice):
super(D2Q9Lallemand, self).__init__(
lattice, self.names
)
self.matrix = self.lattice.convert_to_tensor(self.matrix)
self.inverse = self.lattice.convert_to_tensor(self.inverse)
def transform(self, f):
return self.lattice.mv(self.matrix, f)
def inverse_transform(self, m):
return self.lattice.mv(self.inverse, m)
def equilibrium(self, m):
"""From Lallemand and Luo"""
warnings.warn("I am not 100% sure if this equilibrium is correct.", ExperimentalWarning)
meq = torch.zeros_like(m)
rho = m[0]
jx = m[1]
jy = m[2]
c1 = -2
alpha2 = -8
alpha3 = 4
gamma1 = 2 / 3
gamma2 = 18
gamma3 = 2 / 3
gamma4 = -18
e = 1 / 4 * alpha2 * rho + 1 / 6 * gamma2 * (jx ** 2 + jy ** 2)
eps = 1 / 4 * alpha3 * rho + 1 / 6 * gamma4 * (jx ** 2 + jy ** 2)
qx = 1 / 2 * c1 * jx
qy = 1 / 2 * c1 * jy
pxx = 1 / 2 * gamma1 * (jx ** 2 - jy ** 2)
pxy = 1 / 2 * gamma3 * (jx * jy)
meq[0] = rho
meq[1] = jx
meq[2] = jy
meq[3] = pxx
meq[4] = pxy
meq[5] = e
meq[6] = qx
meq[7] = qy
meq[8] = eps
return meq
"""
D3Q19 is not implemented, yet. Also, the moments should be ordered so that 1...D+1 correspond to momentum,
which is no the case for this matrix.
"""
# class D3Q19DHumieres(NaturalMomentTransform):
# matrix = np.array(
# [[1 / 1, 1, 1, 1, 1, 1, 1, 1, 1 / 1, 1, 1, 1, 1, 1, 1, 1, 1 / 1, 1, 1],
# [-30, -11, -11, -11 / 1, -11, -11, -11, 8, 8, 8, 8 / 1, 8, 8, 8, | |
RequestContext, ops_metadata_arn: OpsMetadataArn
) -> DeleteOpsMetadataResult:
raise NotImplementedError
@handler("DeleteParameter")
def delete_parameter(
self, context: RequestContext, name: PSParameterName
) -> DeleteParameterResult:
raise NotImplementedError
@handler("DeleteParameters")
def delete_parameters(
self, context: RequestContext, names: ParameterNameList
) -> DeleteParametersResult:
raise NotImplementedError
@handler("DeletePatchBaseline")
def delete_patch_baseline(
self, context: RequestContext, baseline_id: BaselineId
) -> DeletePatchBaselineResult:
raise NotImplementedError
@handler("DeleteResourceDataSync")
def delete_resource_data_sync(
self,
context: RequestContext,
sync_name: ResourceDataSyncName,
sync_type: ResourceDataSyncType = None,
) -> DeleteResourceDataSyncResult:
raise NotImplementedError
@handler("DeregisterManagedInstance")
def deregister_managed_instance(
self, context: RequestContext, instance_id: ManagedInstanceId
) -> DeregisterManagedInstanceResult:
raise NotImplementedError
@handler("DeregisterPatchBaselineForPatchGroup")
def deregister_patch_baseline_for_patch_group(
self, context: RequestContext, baseline_id: BaselineId, patch_group: PatchGroup
) -> DeregisterPatchBaselineForPatchGroupResult:
raise NotImplementedError
@handler("DeregisterTargetFromMaintenanceWindow")
def deregister_target_from_maintenance_window(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
window_target_id: MaintenanceWindowTargetId,
safe: Boolean = None,
) -> DeregisterTargetFromMaintenanceWindowResult:
raise NotImplementedError
@handler("DeregisterTaskFromMaintenanceWindow")
def deregister_task_from_maintenance_window(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
window_task_id: MaintenanceWindowTaskId,
) -> DeregisterTaskFromMaintenanceWindowResult:
raise NotImplementedError
@handler("DescribeActivations")
def describe_activations(
self,
context: RequestContext,
filters: DescribeActivationsFilterList = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeActivationsResult:
raise NotImplementedError
@handler("DescribeAssociation")
def describe_association(
self,
context: RequestContext,
name: DocumentARN = None,
instance_id: InstanceId = None,
association_id: AssociationId = None,
association_version: AssociationVersion = None,
) -> DescribeAssociationResult:
raise NotImplementedError
@handler("DescribeAssociationExecutionTargets")
def describe_association_execution_targets(
self,
context: RequestContext,
association_id: AssociationId,
execution_id: AssociationExecutionId,
filters: AssociationExecutionTargetsFilterList = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeAssociationExecutionTargetsResult:
raise NotImplementedError
@handler("DescribeAssociationExecutions")
def describe_association_executions(
self,
context: RequestContext,
association_id: AssociationId,
filters: AssociationExecutionFilterList = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeAssociationExecutionsResult:
raise NotImplementedError
@handler("DescribeAutomationExecutions")
def describe_automation_executions(
self,
context: RequestContext,
filters: AutomationExecutionFilterList = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeAutomationExecutionsResult:
raise NotImplementedError
@handler("DescribeAutomationStepExecutions")
def describe_automation_step_executions(
self,
context: RequestContext,
automation_execution_id: AutomationExecutionId,
filters: StepExecutionFilterList = None,
next_token: NextToken = None,
max_results: MaxResults = None,
reverse_order: Boolean = None,
) -> DescribeAutomationStepExecutionsResult:
raise NotImplementedError
@handler("DescribeAvailablePatches")
def describe_available_patches(
self,
context: RequestContext,
filters: PatchOrchestratorFilterList = None,
max_results: PatchBaselineMaxResults = None,
next_token: NextToken = None,
) -> DescribeAvailablePatchesResult:
raise NotImplementedError
@handler("DescribeDocument")
def describe_document(
self,
context: RequestContext,
name: DocumentARN,
document_version: DocumentVersion = None,
version_name: DocumentVersionName = None,
) -> DescribeDocumentResult:
raise NotImplementedError
@handler("DescribeDocumentPermission")
def describe_document_permission(
self,
context: RequestContext,
name: DocumentName,
permission_type: DocumentPermissionType,
max_results: DocumentPermissionMaxResults = None,
next_token: NextToken = None,
) -> DescribeDocumentPermissionResponse:
raise NotImplementedError
@handler("DescribeEffectiveInstanceAssociations")
def describe_effective_instance_associations(
self,
context: RequestContext,
instance_id: InstanceId,
max_results: EffectiveInstanceAssociationMaxResults = None,
next_token: NextToken = None,
) -> DescribeEffectiveInstanceAssociationsResult:
raise NotImplementedError
@handler("DescribeEffectivePatchesForPatchBaseline")
def describe_effective_patches_for_patch_baseline(
self,
context: RequestContext,
baseline_id: BaselineId,
max_results: PatchBaselineMaxResults = None,
next_token: NextToken = None,
) -> DescribeEffectivePatchesForPatchBaselineResult:
raise NotImplementedError
@handler("DescribeInstanceAssociationsStatus")
def describe_instance_associations_status(
self,
context: RequestContext,
instance_id: InstanceId,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeInstanceAssociationsStatusResult:
raise NotImplementedError
@handler("DescribeInstanceInformation")
def describe_instance_information(
self,
context: RequestContext,
instance_information_filter_list: InstanceInformationFilterList = None,
filters: InstanceInformationStringFilterList = None,
max_results: MaxResultsEC2Compatible = None,
next_token: NextToken = None,
) -> DescribeInstanceInformationResult:
raise NotImplementedError
@handler("DescribeInstancePatchStates")
def describe_instance_patch_states(
self,
context: RequestContext,
instance_ids: InstanceIdList,
next_token: NextToken = None,
max_results: PatchComplianceMaxResults = None,
) -> DescribeInstancePatchStatesResult:
raise NotImplementedError
@handler("DescribeInstancePatchStatesForPatchGroup")
def describe_instance_patch_states_for_patch_group(
self,
context: RequestContext,
patch_group: PatchGroup,
filters: InstancePatchStateFilterList = None,
next_token: NextToken = None,
max_results: PatchComplianceMaxResults = None,
) -> DescribeInstancePatchStatesForPatchGroupResult:
raise NotImplementedError
@handler("DescribeInstancePatches")
def describe_instance_patches(
self,
context: RequestContext,
instance_id: InstanceId,
filters: PatchOrchestratorFilterList = None,
next_token: NextToken = None,
max_results: PatchComplianceMaxResults = None,
) -> DescribeInstancePatchesResult:
raise NotImplementedError
@handler("DescribeInventoryDeletions")
def describe_inventory_deletions(
self,
context: RequestContext,
deletion_id: UUID = None,
next_token: NextToken = None,
max_results: MaxResults = None,
) -> DescribeInventoryDeletionsResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowExecutionTaskInvocations")
def describe_maintenance_window_execution_task_invocations(
self,
context: RequestContext,
window_execution_id: MaintenanceWindowExecutionId,
task_id: MaintenanceWindowExecutionTaskId,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowExecutionTaskInvocationsResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowExecutionTasks")
def describe_maintenance_window_execution_tasks(
self,
context: RequestContext,
window_execution_id: MaintenanceWindowExecutionId,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowExecutionTasksResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowExecutions")
def describe_maintenance_window_executions(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowExecutionsResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowSchedule")
def describe_maintenance_window_schedule(
self,
context: RequestContext,
window_id: MaintenanceWindowId = None,
targets: Targets = None,
resource_type: MaintenanceWindowResourceType = None,
filters: PatchOrchestratorFilterList = None,
max_results: MaintenanceWindowSearchMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowScheduleResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowTargets")
def describe_maintenance_window_targets(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowTargetsResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowTasks")
def describe_maintenance_window_tasks(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowTasksResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindows")
def describe_maintenance_windows(
self,
context: RequestContext,
filters: MaintenanceWindowFilterList = None,
max_results: MaintenanceWindowMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowsResult:
raise NotImplementedError
@handler("DescribeMaintenanceWindowsForTarget")
def describe_maintenance_windows_for_target(
self,
context: RequestContext,
targets: Targets,
resource_type: MaintenanceWindowResourceType,
max_results: MaintenanceWindowSearchMaxResults = None,
next_token: NextToken = None,
) -> DescribeMaintenanceWindowsForTargetResult:
raise NotImplementedError
@handler("DescribeOpsItems")
def describe_ops_items(
self,
context: RequestContext,
ops_item_filters: OpsItemFilters = None,
max_results: OpsItemMaxResults = None,
next_token: String = None,
) -> DescribeOpsItemsResponse:
raise NotImplementedError
@handler("DescribeParameters")
def describe_parameters(
self,
context: RequestContext,
filters: ParametersFilterList = None,
parameter_filters: ParameterStringFilterList = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribeParametersResult:
raise NotImplementedError
@handler("DescribePatchBaselines")
def describe_patch_baselines(
self,
context: RequestContext,
filters: PatchOrchestratorFilterList = None,
max_results: PatchBaselineMaxResults = None,
next_token: NextToken = None,
) -> DescribePatchBaselinesResult:
raise NotImplementedError
@handler("DescribePatchGroupState")
def describe_patch_group_state(
self, context: RequestContext, patch_group: PatchGroup
) -> DescribePatchGroupStateResult:
raise NotImplementedError
@handler("DescribePatchGroups")
def describe_patch_groups(
self,
context: RequestContext,
max_results: PatchBaselineMaxResults = None,
filters: PatchOrchestratorFilterList = None,
next_token: NextToken = None,
) -> DescribePatchGroupsResult:
raise NotImplementedError
@handler("DescribePatchProperties")
def describe_patch_properties(
self,
context: RequestContext,
operating_system: OperatingSystem,
property: PatchProperty,
patch_set: PatchSet = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> DescribePatchPropertiesResult:
raise NotImplementedError
@handler("DescribeSessions")
def describe_sessions(
self,
context: RequestContext,
state: SessionState,
max_results: SessionMaxResults = None,
next_token: NextToken = None,
filters: SessionFilterList = None,
) -> DescribeSessionsResponse:
raise NotImplementedError
@handler("DisassociateOpsItemRelatedItem")
def disassociate_ops_item_related_item(
self,
context: RequestContext,
ops_item_id: OpsItemId,
association_id: OpsItemRelatedItemAssociationId,
) -> DisassociateOpsItemRelatedItemResponse:
raise NotImplementedError
@handler("GetAutomationExecution")
def get_automation_execution(
self, context: RequestContext, automation_execution_id: AutomationExecutionId
) -> GetAutomationExecutionResult:
raise NotImplementedError
@handler("GetCalendarState")
def get_calendar_state(
self,
context: RequestContext,
calendar_names: CalendarNameOrARNList,
at_time: ISO8601String = None,
) -> GetCalendarStateResponse:
raise NotImplementedError
@handler("GetCommandInvocation")
def get_command_invocation(
self,
context: RequestContext,
command_id: CommandId,
instance_id: InstanceId,
plugin_name: CommandPluginName = None,
) -> GetCommandInvocationResult:
raise NotImplementedError
@handler("GetConnectionStatus")
def get_connection_status(
self, context: RequestContext, target: SessionTarget
) -> GetConnectionStatusResponse:
raise NotImplementedError
@handler("GetDefaultPatchBaseline")
def get_default_patch_baseline(
self, context: RequestContext, operating_system: OperatingSystem = None
) -> GetDefaultPatchBaselineResult:
raise NotImplementedError
@handler("GetDeployablePatchSnapshotForInstance")
def get_deployable_patch_snapshot_for_instance(
self,
context: RequestContext,
instance_id: InstanceId,
snapshot_id: SnapshotId,
baseline_override: BaselineOverride = None,
) -> GetDeployablePatchSnapshotForInstanceResult:
raise NotImplementedError
@handler("GetDocument")
def get_document(
self,
context: RequestContext,
name: DocumentARN,
version_name: DocumentVersionName = None,
document_version: DocumentVersion = None,
document_format: DocumentFormat = None,
) -> GetDocumentResult:
raise NotImplementedError
@handler("GetInventory")
def get_inventory(
self,
context: RequestContext,
filters: InventoryFilterList = None,
aggregators: InventoryAggregatorList = None,
result_attributes: ResultAttributeList = None,
next_token: NextToken = None,
max_results: MaxResults = None,
) -> GetInventoryResult:
raise NotImplementedError
@handler("GetInventorySchema")
def get_inventory_schema(
self,
context: RequestContext,
type_name: InventoryItemTypeNameFilter = None,
next_token: NextToken = None,
max_results: GetInventorySchemaMaxResults = None,
aggregator: AggregatorSchemaOnly = None,
sub_type: IsSubTypeSchema = None,
) -> GetInventorySchemaResult:
raise NotImplementedError
@handler("GetMaintenanceWindow")
def get_maintenance_window(
self, context: RequestContext, window_id: MaintenanceWindowId
) -> GetMaintenanceWindowResult:
raise NotImplementedError
@handler("GetMaintenanceWindowExecution")
def get_maintenance_window_execution(
self, context: RequestContext, window_execution_id: MaintenanceWindowExecutionId
) -> GetMaintenanceWindowExecutionResult:
raise NotImplementedError
@handler("GetMaintenanceWindowExecutionTask")
def get_maintenance_window_execution_task(
self,
context: RequestContext,
window_execution_id: MaintenanceWindowExecutionId,
task_id: MaintenanceWindowExecutionTaskId,
) -> GetMaintenanceWindowExecutionTaskResult:
raise NotImplementedError
@handler("GetMaintenanceWindowExecutionTaskInvocation")
def get_maintenance_window_execution_task_invocation(
self,
context: RequestContext,
window_execution_id: MaintenanceWindowExecutionId,
task_id: MaintenanceWindowExecutionTaskId,
invocation_id: MaintenanceWindowExecutionTaskInvocationId,
) -> GetMaintenanceWindowExecutionTaskInvocationResult:
raise NotImplementedError
@handler("GetMaintenanceWindowTask")
def get_maintenance_window_task(
self,
context: RequestContext,
window_id: MaintenanceWindowId,
window_task_id: MaintenanceWindowTaskId,
) -> GetMaintenanceWindowTaskResult:
raise NotImplementedError
@handler("GetOpsItem")
def get_ops_item(self, context: RequestContext, ops_item_id: OpsItemId) -> GetOpsItemResponse:
raise NotImplementedError
@handler("GetOpsMetadata")
def get_ops_metadata(
self,
context: RequestContext,
ops_metadata_arn: OpsMetadataArn,
max_results: GetOpsMetadataMaxResults = None,
next_token: NextToken = None,
) -> GetOpsMetadataResult:
raise NotImplementedError
@handler("GetOpsSummary")
def get_ops_summary(
self,
context: RequestContext,
sync_name: ResourceDataSyncName = None,
filters: OpsFilterList = None,
aggregators: OpsAggregatorList = None,
result_attributes: OpsResultAttributeList = None,
next_token: NextToken = None,
max_results: MaxResults = None,
) -> GetOpsSummaryResult:
raise NotImplementedError
@handler("GetParameter")
def get_parameter(
self, context: RequestContext, name: PSParameterName, with_decryption: Boolean = None
) -> GetParameterResult:
raise NotImplementedError
@handler("GetParameterHistory")
def get_parameter_history(
self,
context: RequestContext,
name: PSParameterName,
with_decryption: Boolean = None,
max_results: MaxResults = None,
next_token: NextToken = None,
) -> GetParameterHistoryResult:
raise NotImplementedError
| |
group into
the right (fundamental group) factor in "WF" style.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['E',6,1],print_tuple=True); WF = E.WF(); F = E.fundamental_group()
sage: [(x,WF.from_fundamental(x)) for x in F]
[(pi[0], (1, pi[0])), (pi[1], (1, pi[1])), (pi[6], (1, pi[6]))]
"""
return self((self.cartesian_factors()[0].one(),f))
class ExtendedAffineWeylGroupFWElement(GroupSemidirectProduct.Element):
r"""
The element class for the "FW" realization.
"""
def has_descent(self, i, side='right', positive=False):
r"""
Return whether ``self`` has descent at `i`.
INPUT:
- `i` -- an affine Dynkin index.
OPTIONAL:
- ``side`` -- 'left' or 'right' (default: 'right')
- ``positive`` -- True or False (default: False)
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1])
sage: x = E.FW().an_element(); x
pi[2] * S0*S1*S2
sage: [(i, x.has_descent(i)) for i in E.cartan_type().index_set()]
[(0, False), (1, False), (2, True)]
"""
if side == 'left':
self = ~self
if positive:
return not self.has_descent(i, side='right')
return self.cartesian_projection(1).has_descent(i, side='right')
def to_fundamental_group(self):
r"""
Return the projection of ``self`` to the fundamental group in the "FW" style.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1])
sage: x = E.FW().from_translation(E.lattice_basis()[2]); x
pi[2] * S1*S2
sage: x.to_fundamental_group()
pi[2]
"""
return self.cartesian_projection(0)
def to_affine_weyl_right(self):
r"""
Project ``self`` to the right (affine Weyl group) factor in the "FW" style.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1])
sage: x = E.FW().from_translation(E.lattice_basis()[1]); x
pi[1] * S2*S1
sage: x.to_affine_weyl_right()
S2*S1
"""
return self.cartesian_projection(1)
def action_on_affine_roots(self, beta):
r"""
Act by ``self`` on the affine root lattice element ``beta``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1],affine="s")
sage: x = E.FW().an_element(); x
pi[2] * s0*s1*s2
sage: v = RootSystem(['A',2,1]).root_lattice().an_element(); v
2*alpha[0] + 2*alpha[1] + 3*alpha[2]
sage: x.action_on_affine_roots(v)
alpha[0] + alpha[1]
"""
g = self.cartesian_projection(0)
w = self.cartesian_projection(1)
return g.act_on_affine_lattice(w.action(beta))
class ExtendedAffineWeylGroupFW(GroupSemidirectProduct, BindableClass):
r"""
Extended affine Weyl group, realized as the semidirect product of the affine Weyl group
by the fundamental group.
INPUT:
- `E` -- A parent with realization in :class:`ExtendedAffineWeylGroup_Class`
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',2,1]).FW()
Extended affine Weyl group of type ['A', 2, 1] realized by Semidirect product of Fundamental group of type ['A', 2, 1] acting on Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root lattice)
"""
def __init__(self, E):
r"""
EXAMPLES::
sage: FW = ExtendedAffineWeylGroup(['D',3,2]).FW()
sage: TestSuite(FW).run()
"""
def twist(g,w):
return g.act_on_affine_weyl(w)
GroupSemidirectProduct.__init__(self, E.fundamental_group(), E.affine_weyl(), twist = twist, act_to_right=True, print_tuple = E._print_tuple, category=E.Realizations())
self._style = "FW"
def _repr_(self):
r"""
A string representing ``self``.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',4,2]).FW()._repr_()
"Extended affine Weyl group of type ['BC', 2, 2] realized by Semidirect product of Fundamental group of type ['BC', 2, 2] acting on Weyl Group of type ['BC', 2, 2] (as a matrix group acting on the root lattice)"
"""
return self.realization_of()._repr_() + " realized by " + super(ExtendedAffineWeylGroup_Class.ExtendedAffineWeylGroupFW, self)._repr_()
@cached_method
def simple_reflections(self):
r"""
Return the family of simple reflections of ``self``.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',2,1],print_tuple=True).FW().simple_reflections()
Finite family {0: (pi[0], S0), 1: (pi[0], S1), 2: (pi[0], S2)}
"""
E = self.realization_of()
W = E.affine_weyl()
return Family(E.cartan_type().index_set(), lambda i: self.from_affine_weyl(W.simple_reflection(i)))
def from_affine_weyl(self, w):
r"""
Return the image of `w` under the map of the affine Weyl group into the right
(affine Weyl group) factor in the "FW" style.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1],print_tuple=True)
sage: E.FW().from_affine_weyl(E.affine_weyl().from_reduced_word([0,2,1]))
(pi[0], S0*S2*S1)
"""
return self((self.cartesian_factors()[0].one(),w))
@cached_method
def from_fundamental(self, f):
r"""
Return the image of the fundamental group element `f` into ``self``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1],print_tuple=True)
sage: E.FW().from_fundamental(E.fundamental_group()(2))
(pi[2], 1)
"""
return self((f,self.cartesian_factors()[1].one()))
class ExtendedAffineWeylGroupPvW0Element(GroupSemidirectProduct.Element):
r"""
The element class for the "PvW0" realization.
"""
def has_descent(self, i, side='right', positive=False):
r"""
Return whether ``self`` has `i` as a descent.
INPUT:
- `i` - an affine Dynkin index
OPTIONAL:
- ``side`` -- 'left' or 'right' (default: 'right')
- ``positive`` -- True or False (default: False)
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',4,2])
sage: w = E.PvW0().from_reduced_word([0,1]); w
t[Lambda[1]] * s1*s2
sage: [(i, w.has_descent(i, side='left')) for i in E.cartan_type().index_set()]
[(0, True), (1, False), (2, False)]
"""
return self.parent().realization_of().PW0()(self).has_descent(i, side=side, positive=positive)
def dual_action(self, la):
r"""
Return the action of ``self`` on an element ``la`` of the dual version of the translation lattice.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1])
sage: x = E.PvW0().an_element(); x
t[2*Lambda[1] + 2*Lambda[2]] * s1*s2
sage: la = E.dual_lattice().an_element(); la
2*Lambda[1] + 2*Lambda[2]
sage: x.dual_action(la)
-2*Lambda[1] + 4*Lambda[2]
"""
w = self.cartesian_projection(1)
assert la in w.parent().domain()
return self.cartesian_projection(0).value + w.action(la)
def to_dual_translation_left(self):
r"""
The image of ``self`` under the map that projects to the dual translation lattice
factor after factoring it to the left as in style "PvW0".
EXAMPLES::
sage: s = ExtendedAffineWeylGroup(['A',2,1]).PvW0().simple_reflection(0); s
t[Lambda[1] + Lambda[2]] * s1*s2*s1
sage: s.to_dual_translation_left()
Lambda[1] + Lambda[2]
"""
return self.cartesian_projection(0).value # undo the GroupExp
def to_dual_classical_weyl(self):
r"""
Return the image of ``self`` under the homomorphism that projects to the dual classical
Weyl group factor after rewriting it in either style "PvW0" or "W0Pv".
EXAMPLES::
sage: s = ExtendedAffineWeylGroup(['A',2,1]).PvW0().simple_reflection(0); s
t[Lambda[1] + Lambda[2]] * s1*s2*s1
sage: s.to_dual_classical_weyl()
s1*s2*s1
"""
return self.cartesian_projection(1)
def is_translation(self):
r"""
Return whether ``self`` is a translation element or not.
EXAMPLES::
sage: PvW0 = ExtendedAffineWeylGroup(['A',2,1]).PvW0()
sage: t = PvW0.from_reduced_word([1,2,1,0])
sage: t.is_translation()
True
sage: PvW0.simple_reflection(0).is_translation()
False
"""
w = self.to_dual_classical_weyl()
return w == w.parent().one()
class ExtendedAffineWeylGroupPvW0(GroupSemidirectProduct, BindableClass):
r"""
Extended affine Weyl group, realized as the semidirect product of the dual form of the translation lattice
by the finite Weyl group.
INPUT:
- `E` -- A parent with realization in :class:`ExtendedAffineWeylGroup_Class`
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',2,1]).PvW0()
Extended affine Weyl group of type ['A', 2, 1] realized by Semidirect product of Multiplicative form of Weight lattice of the Root system of type ['A', 2] acted upon by Weyl Group of type ['A', 2] (as a matrix group acting on the weight lattice)
"""
def __init__(self, E):
r"""
EXAMPLES::
sage: PvW0 = ExtendedAffineWeylGroup(['D',3,2]).PvW0()
sage: TestSuite(PvW0).run()
"""
# note that we have to use the multiplicative version of the translation lattice
# and change the twist to deal with this
def twist(w,l):
return E.exp_dual_lattice()(w.action(l.value))
GroupSemidirectProduct.__init__(self, E.exp_dual_lattice(), E.dual_classical_weyl(), twist = twist, act_to_right=False, prefix0=E._prefixt, print_tuple = E._print_tuple, category=E.Realizations())
self._style = "PvW0"
def _repr_(self):
r"""
A string representing ``self``.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',4,2]).PvW0()._repr_()
"Extended affine Weyl group of type ['BC', 2, 2] realized by Semidirect product of Multiplicative form of Weight lattice of the Root system of type ['C', 2] acted upon by Weyl Group of type ['C', 2] (as a matrix group acting on the weight lattice)"
"""
return self.realization_of()._repr_() + " realized by " + super(ExtendedAffineWeylGroup_Class.ExtendedAffineWeylGroupPvW0, self)._repr_()
def from_dual_translation(self, la):
r"""
Map the dual translation lattice element ``la`` into ``self``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1], translation="tau", print_tuple = True)
sage: la = E.dual_lattice().an_element(); la
2*Lambda[1] + 2*Lambda[2]
sage: E.PvW0().from_dual_translation(la)
(tau[2*Lambda[1] + 2*Lambda[2]], 1)
"""
E = self.realization_of()
return self((E.exp_dual_lattice()(la),self.cartesian_factors()[1].one()))
@cached_method
def simple_reflections(self):
r"""
Return a family for the simple reflections of ``self``.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).PvW0().simple_reflections()
Finite family {0: t[Lambda[1] + Lambda[3]] * s1*s2*s3*s2*s1, 1: s1, 2: s2, 3: s3}
"""
E = self.realization_of()
return Family(E.cartan_type().index_set(), lambda i: self(E.PW0().simple_reflection(i)))
def from_dual_classical_weyl(self, w):
r"""
Return the image of `w` under the homomorphism of the dual form of the classical Weyl group into ``self``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1],print_tuple=True)
sage: E.PvW0().from_dual_classical_weyl(E.dual_classical_weyl().from_reduced_word([1,2]))
(t[0], s1*s2)
"""
return self((self.cartesian_factors()[0].one(),w))
class ExtendedAffineWeylGroupW0PvElement(GroupSemidirectProduct.Element):
r"""
The element class for the "W0Pv" realization.
"""
def dual_action(self, la):
r"""
Return the action of ``self`` on an element ``la`` of the dual version of the translation lattice.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',2,1])
sage: x = E.W0Pv().an_element(); x
s1*s2 * t[2*Lambda[1] + 2*Lambda[2]]
sage: la = E.dual_lattice().an_element(); la
2*Lambda[1] + 2*Lambda[2]
sage: x.dual_action(la)
-8*Lambda[1] + 4*Lambda[2]
"""
w = self.cartesian_projection(0)
assert la in w.parent().domain()
return w.action(self.cartesian_projection(1).value + la)
def has_descent(self, i, side='right', positive=False):
r"""
Return whether ``self`` has `i` as a descent.
INPUT:
- `i` - an affine Dynkin index
OPTIONAL:
- ``side`` - 'left' or 'right' (default: 'right')
- ``positive`` - True or False (default: False)
EXAMPLES::
sage: w = ExtendedAffineWeylGroup(['A',4,2]).W0Pv().from_reduced_word([0,1]); w
s1*s2 * t[Lambda[1] - Lambda[2]]
sage: w.has_descent(0, side='left')
True
"""
return self.parent().realization_of().W0P()(self).has_descent(i, side=side, positive=positive)
def to_dual_translation_right(self):
r"""
The image of ``self`` under the map that projects to the dual translation lattice
factor after factoring it to the right as in style "W0Pv".
EXAMPLES::
sage: | |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
This provides a small set of effect handlers in NumPyro that are modeled
after Pyro's `poutine <http://docs.pyro.ai/en/stable/poutine.html>`_ module.
For a tutorial on effect handlers more generally, readers are encouraged to
read `Poutine: A Guide to Programming with Effect Handlers in Pyro
<http://pyro.ai/examples/effect_handlers.html>`_. These simple effect handlers
can be composed together or new ones added to enable implementation of custom
inference utilities and algorithms.
**Example**
As an example, we are using :class:`~numpyro.handlers.seed`, :class:`~numpyro.handlers.trace`
and :class:`~numpyro.handlers.substitute` handlers to define the `log_likelihood` function below.
We first create a logistic regression model and sample from the posterior distribution over
the regression parameters using :func:`~numpyro.infer.MCMC`. The `log_likelihood` function
uses effect handlers to run the model by substituting sample sites with values from the posterior
distribution and computes the log density for a single data point. The `log_predictive_density`
function computes the log likelihood for each draw from the joint posterior and aggregates the
results for all the data points, but does so by using JAX's auto-vectorize transform called
`vmap` so that we do not need to loop over all the data points.
.. doctest::
>>> import jax.numpy as jnp
>>> from jax import random, vmap
>>> from jax.scipy.special import logsumexp
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro import handlers
>>> from numpyro.infer import MCMC, NUTS
>>> N, D = 3000, 3
>>> def logistic_regression(data, labels):
... coefs = numpyro.sample('coefs', dist.Normal(jnp.zeros(D), jnp.ones(D)))
... intercept = numpyro.sample('intercept', dist.Normal(0., 10.))
... logits = jnp.sum(coefs * data + intercept, axis=-1)
... return numpyro.sample('obs', dist.Bernoulli(logits=logits), obs=labels)
>>> data = random.normal(random.PRNGKey(0), (N, D))
>>> true_coefs = jnp.arange(1., D + 1.)
>>> logits = jnp.sum(true_coefs * data, axis=-1)
>>> labels = dist.Bernoulli(logits=logits).sample(random.PRNGKey(1))
>>> num_warmup, num_samples = 1000, 1000
>>> mcmc = MCMC(NUTS(model=logistic_regression), num_warmup=num_warmup, num_samples=num_samples)
>>> mcmc.run(random.PRNGKey(2), data, labels) # doctest: +SKIP
sample: 100%|██████████| 1000/1000 [00:00<00:00, 1252.39it/s, 1 steps of size 5.83e-01. acc. prob=0.85]
>>> mcmc.print_summary() # doctest: +SKIP
mean sd 5.5% 94.5% n_eff Rhat
coefs[0] 0.96 0.07 0.85 1.07 455.35 1.01
coefs[1] 2.05 0.09 1.91 2.20 332.00 1.01
coefs[2] 3.18 0.13 2.96 3.37 320.27 1.00
intercept -0.03 0.02 -0.06 0.00 402.53 1.00
>>> def log_likelihood(rng_key, params, model, *args, **kwargs):
... model = handlers.substitute(handlers.seed(model, rng_key), params)
... model_trace = handlers.trace(model).get_trace(*args, **kwargs)
... obs_node = model_trace['obs']
... return obs_node['fn'].log_prob(obs_node['value'])
>>> def log_predictive_density(rng_key, params, model, *args, **kwargs):
... n = list(params.values())[0].shape[0]
... log_lk_fn = vmap(lambda rng_key, params: log_likelihood(rng_key, params, model, *args, **kwargs))
... log_lk_vals = log_lk_fn(random.split(rng_key, n), params)
... return jnp.sum(logsumexp(log_lk_vals, 0) - jnp.log(n))
>>> print(log_predictive_density(random.PRNGKey(2), mcmc.get_samples(),
... logistic_regression, data, labels)) # doctest: +SKIP
-874.89813
"""
from collections import OrderedDict
import warnings
import numpy as np
from jax import random
import jax.numpy as jnp
import numpyro
from numpyro.distributions.distribution import COERCIONS
from numpyro.primitives import (
_PYRO_STACK,
CondIndepStackFrame,
Messenger,
apply_stack,
plate,
)
from numpyro.util import find_stack_level, not_jax_tracer
__all__ = [
"block",
"collapse",
"condition",
"infer_config",
"lift",
"mask",
"reparam",
"replay",
"scale",
"scope",
"seed",
"substitute",
"trace",
"do",
]
class trace(Messenger):
"""
Returns a handler that records the inputs and outputs at primitive calls
inside `fn`.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import seed, trace
>>> import pprint as pp
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> pp.pprint(exec_trace) # doctest: +SKIP
OrderedDict([('a',
{'args': (),
'fn': <numpyro.distributions.continuous.Normal object at 0x7f9e689b1eb8>,
'is_observed': False,
'kwargs': {'rng_key': DeviceArray([0, 0], dtype=uint32)},
'name': 'a',
'type': 'sample',
'value': DeviceArray(-0.20584235, dtype=float32)})])
"""
def __enter__(self):
super(trace, self).__enter__()
self.trace = OrderedDict()
return self.trace
def postprocess_message(self, msg):
if "name" not in msg:
# skip recording helper messages e.g. `control_flow`, `to_data`, `to_funsor`
# which has no name
return
assert not (
msg["type"] == "sample" and msg["name"] in self.trace
), "all sites must have unique names but got `{}` duplicated".format(
msg["name"]
)
self.trace[msg["name"]] = msg.copy()
def get_trace(self, *args, **kwargs):
"""
Run the wrapped callable and return the recorded trace.
:param `*args`: arguments to the callable.
:param `**kwargs`: keyword arguments to the callable.
:return: `OrderedDict` containing the execution trace.
"""
self(*args, **kwargs)
return self.trace
class replay(Messenger):
"""
Given a callable `fn` and an execution trace `guide_trace`,
return a callable which substitutes `sample` calls in `fn` with
values from the corresponding site names in `guide_trace`.
:param fn: Python callable with NumPyro primitives.
:param guide_trace: an OrderedDict containing execution metadata.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.handlers import replay, seed, trace
>>> def model():
... numpyro.sample('a', dist.Normal(0., 1.))
>>> exec_trace = trace(seed(model, random.PRNGKey(0))).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> replayed_trace = trace(replay(model, exec_trace)).get_trace()
>>> print(exec_trace['a']['value']) # doctest: +SKIP
-0.20584235
>>> assert replayed_trace['a']['value'] == exec_trace['a']['value']
"""
def __init__(self, fn=None, trace=None, guide_trace=None):
if guide_trace is not None:
warnings.warn(
"`guide_trace` argument is deprecated. Please replace it by `trace`.",
FutureWarning,
stacklevel=find_stack_level(),
)
if guide_trace is not None:
trace = guide_trace
assert trace is not None
self.trace = trace
super(replay, self).__init__(fn)
def process_message(self, msg):
if msg["type"] in ("sample", "plate") and msg["name"] in self.trace:
msg["value"] = self.trace[msg["name"]]["value"]
class block(Messenger):
"""
Given a callable `fn`, return another callable that selectively hides
primitive sites where `hide_fn` returns True from other effect handlers
on the stack.
:param callable fn: Python callable with NumPyro primitives.
:param callable hide_fn: function which when given a dictionary containing
site-level metadata returns whether it should be blocked.
:param list hide: list of site names to hide.
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers import block, seed, trace
>>> import numpyro.distributions as dist
>>> def model():
... a = numpyro.sample('a', dist.Normal(0., 1.))
... return numpyro.sample('b', dist.Normal(a, 1.))
>>> model = seed(model, random.PRNGKey(0))
>>> block_all = block(model)
>>> block_a = block(model, lambda site: site['name'] == 'a')
>>> trace_block_all = trace(block_all).get_trace()
>>> assert not {'a', 'b'}.intersection(trace_block_all.keys())
>>> trace_block_a = trace(block_a).get_trace()
>>> assert 'a' not in trace_block_a
>>> assert 'b' in trace_block_a
"""
def __init__(self, fn=None, hide_fn=None, hide=None):
if hide_fn is not None:
self.hide_fn = hide_fn
elif hide is not None:
self.hide_fn = lambda msg: msg.get("name") in hide
else:
self.hide_fn = lambda msg: True
super(block, self).__init__(fn)
def process_message(self, msg):
if self.hide_fn(msg):
msg["stop"] = True
class collapse(trace):
"""
EXPERIMENTAL Collapses all sites in the context by lazily sampling and
attempting to use conjugacy relations. If no conjugacy is known this will
fail. Code using the results of sample sites must be written to accept
Funsors rather than Tensors. This requires ``funsor`` to be installed.
"""
_coerce = None
def __init__(self, *args, **kwargs):
if collapse._coerce is None:
import funsor
from funsor.distribution import CoerceDistributionToFunsor
funsor.set_backend("jax")
collapse._coerce = CoerceDistributionToFunsor("jax")
super().__init__(*args, **kwargs)
def process_message(self, msg):
from funsor.terms import Funsor
if msg["type"] == "sample":
if msg["value"] is None:
msg["value"] = msg["name"]
if isinstance(msg["fn"], Funsor) or isinstance(msg["value"], (str, Funsor)):
msg["stop"] = True
def __enter__(self):
self.preserved_plates = frozenset(
h.name for h in _PYRO_STACK if isinstance(h, plate)
)
COERCIONS.append(self._coerce)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
import funsor
_coerce = COERCIONS.pop()
assert _coerce is self._coerce
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
# Convert delayed statements to pyro.factor()
reduced_vars = []
log_prob_terms = []
plates = frozenset()
for name, site in self.trace.items():
if site["type"] != "sample":
continue
if not site["is_observed"]:
reduced_vars.append(name)
dim_to_name = {f.dim: f.name for f in site["cond_indep_stack"]}
fn = funsor.to_funsor(site["fn"], funsor.Real, dim_to_name)
value = site["value"]
if not isinstance(value, str):
value = funsor.to_funsor(site["value"], fn.inputs["value"], dim_to_name)
log_prob_terms.append(fn(value=value))
plates |= frozenset(f.name for f in site["cond_indep_stack"])
assert log_prob_terms, "nothing to collapse"
reduced_plates = plates - self.preserved_plates
log_prob = funsor.sum_product.sum_product(
funsor.ops.logaddexp,
funsor.ops.add,
log_prob_terms,
eliminate=frozenset(reduced_vars) | reduced_plates,
plates=plates,
)
name = reduced_vars[0]
numpyro.factor(name, log_prob.data)
class condition(Messenger):
"""
Conditions unobserved sample sites to values from `data` or `condition_fn`.
Similar to :class:`~numpyro.handlers.substitute` except that it only affects
`sample` sites and changes the `is_observed` property to `True`.
:param fn: Python callable with NumPyro primitives.
:param dict data: dictionary of `numpy.ndarray` values keyed by
site names.
:param condition_fn: callable that takes in a site dict and returns
a numpy array or `None` (in which case the handler has no side
effect).
**Example:**
.. doctest::
>>> from jax import random
>>> import numpyro
>>> from numpyro.handlers | |
<reponame>desmoteo/swiss-army-keras
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
import abc
import contextlib
import functools
import six
import tensorflow as tf
import numpy as np
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import parameter_server_strategy
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.distribute import values as ds_values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Optimizer
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from typeguard import typechecked
from typing import Union, Callable, List, Dict
# TODO: Remove once https://github.com/tensorflow/tensorflow/issues/44613 is resolved
if tf.__version__[:3] > "2.5":
from keras.engine import keras_tensor
else:
from tensorflow.python.keras.engine import keras_tensor
Number = Union[
float,
int,
np.float16,
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
Initializer = Union[None, dict, str, Callable,
tf.keras.initializers.Initializer]
Regularizer = Union[None, dict, str, Callable,
tf.keras.regularizers.Regularizer]
Constraint = Union[None, dict, str, Callable, tf.keras.constraints.Constraint]
Activation = Union[None, str, Callable]
Optimizer = Union[tf.keras.optimizers.Optimizer, str]
TensorLike = Union[
List[Union[Number, list]],
tuple,
Number,
np.ndarray,
tf.Tensor,
tf.SparseTensor,
tf.Variable,
keras_tensor.KerasTensor,
]
FloatTensorLike = Union[tf.Tensor, float, np.float16, np.float32, np.float64]
AcceptableDTypes = Union[tf.DType, np.dtype, type, int, str, None]
def _solve(a, b, c):
"""Return solution of a quadratic minimization.
The optimization equation is:
f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}
we get optimal solution w*:
w* = -(b - sign(b)*c)/a if |b| > c else w* = 0
REQUIRES: Dimensionality of a and b must be same
Args:
a: A Tensor
b: A Tensor
c: A Tensor with one element.
Returns:
A Tensor w, which is solution for the equation
"""
w = (c * tf.sign(b) - b) / a
w = tf.cast(tf.abs(b) > c, dtype=b.dtype) * w
return w
<EMAIL>.register_keras_serializable(package="Addons")
class Yogi(tf.keras.optimizers.Optimizer):
"""Optimizer that implements the Yogi algorithm in Keras.
See Algorithm 2 of
https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf.
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable] = 0.01,
beta1: FloatTensorLike = 0.9,
beta2: FloatTensorLike = 0.999,
epsilon: FloatTensorLike = 1e-3,
l1_regularization_strength: FloatTensorLike = 0.0,
l2_regularization_strength: FloatTensorLike = 0.0,
initial_accumulator_value: FloatTensorLike = 1e-6,
activation: str = "sign",
name: str = "Yogi",
**kwargs,
):
"""Construct a new Yogi optimizer.
Args:
learning_rate: A Tensor or a floating point value.
The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
epsilon: A constant trading off adaptivity and noise.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
activation: Use hard sign or soft tanh to determin sign.
name: Optional name for the operations created when applying
gradients. Defaults to "Yogi".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,
`lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`
is clip gradients by value, `decay` is included for backward
compatibility to allow time inverse decay of learning rate. `lr`
is included for backward compatibility, recommended to use
`learning_rate` instead.
"""
super().__init__(name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("decay", self._initial_decay)
self._set_hyper("beta_1", beta1)
self._set_hyper("beta_2", beta2)
self._set_hyper("epsilon", epsilon)
self._set_hyper("l1_regularization_strength",
l1_regularization_strength)
self._set_hyper("l2_regularization_strength",
l2_regularization_strength)
self._beta1 = beta1
self._activation = activation
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
def _create_slots(self, var_list):
"""See `tf.train.Optimizer._create_slots()`."""
# Create slots for the first and second moments, and maximum second moments.
for var in var_list:
init = tf.constant_initializer(self._initial_accumulator_value)
self.add_slot(var, "v", init)
if self._beta1 > 0.0:
self.add_slot(var, "m")
def _resource_apply_dense(self, grad, var):
"""See `tf.train.Optimizer._apply_dense()`."""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(v_t)
else:
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_t = m.assign(
m * beta1_t + grad * (1 - beta1_t), use_locking=self._use_locking
)
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
if self._activation == "sign":
sign = tf.sign(grad2 - v)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_t = v.assign_add(
(1 - beta2_t) * sign * grad2, use_locking=self._use_locking
)
v_sqrt = tf.sqrt(v_t)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
new_var = var - per_coord_lr * m_t
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = var.assign(new_var, use_locking=self._use_locking)
update_vs.append(var_update)
update_vs.append(m_t)
update_vs.append(v_t)
# Create an op that groups all the above operations
return tf.group(*update_vs)
def _resource_apply_sparse(self, grad, var, indices):
"""Applies sparse gradients to a variable.
Args:
grad: A tensor for the `values` of `tf.IndexedSlices`.
var: A `tf.Variable` object.
indices: A tensor for the `indices` of `tf.IndexedSlices`.
Returns:
An op which updates `var` with `grad` and `indices`.
"""
var_dtype = var.dtype.base_dtype
lr_t = self._decayed_lr(var_dtype)
beta1_t = self._get_hyper("beta_1", var_dtype)
beta2_t = self._get_hyper("beta_2", var_dtype)
epsilon_t = self._get_hyper("epsilon", var_dtype)
l1_t = self._get_hyper("l1_regularization_strength", var_dtype)
l2_t = self._get_hyper("l2_regularization_strength", var_dtype)
local_step = tf.cast(self.iterations + 1, var_dtype)
beta1_power = tf.pow(beta1_t, local_step)
beta2_power = tf.pow(beta2_t, local_step)
lr = lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)
update_vs = []
if self._beta1 == 0.0:
# v_t = v + sign(g_t^2-v)(g_t^2)
v = self.get_slot(var, "v")
grad2 = grad * grad
v_slice = tf.gather(v, indices)
if self._activation == "sign":
sign = tf.sign(grad2 - v_slice)
elif self._activation == "tanh":
sign = tf.tanh(10 * (grad2 - v_slice))
else:
raise NotImplementedError(
"Activation function can be sign or tanh")
v_scaled_g_values = v_slice + (1 - beta2_t) * sign * grad2
v_t = self._resource_scatter_update(v, indices, v_scaled_g_values)
v_sqrt = tf.sqrt(v_scaled_g_values)
# Yogi effective LR
per_coord_lr = lr / (v_sqrt + epsilon_t)
# Variable update
# Step 1: Gradient descent
var_slice = tf.gather(var, indices)
new_var = var_slice - per_coord_lr * grad
# Step 2: Prox operator
if self._l1_regularization_strength > 0:
new_var = _solve(1 + l2_t * per_coord_lr, -
new_var, l1_t * per_coord_lr)
elif self._l2_regularization_strength > 0:
new_var = new_var / (1 + l2_t * per_coord_lr)
# Step 3: Update
var_update = self._resource_scatter_update(var, indices, | |
pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Agent
"""
kwargs['_return_http_data_only'] = True
return self.update_agent_with_http_info(owner, agent_uuid, body, **kwargs) # noqa: E501
def update_agent_with_http_info(self, owner, agent_uuid, body, **kwargs): # noqa: E501
"""Update agent # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_agent_with_http_info(owner, agent_uuid, body, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param agent_uuid: UUID (required)
:type agent_uuid: str
:param body: Agent body (required)
:type body: V1Agent
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1Agent, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'agent_uuid',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_agent" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update_agent`") # noqa: E501
# verify the required parameter 'agent_uuid' is set
if self.api_client.client_side_validation and ('agent_uuid' not in local_var_params or # noqa: E501
local_var_params['agent_uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `agent_uuid` when calling `update_agent`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_agent`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'agent_uuid' in local_var_params:
path_params['agent.uuid'] = local_var_params['agent_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {
200: "V1Agent",
204: "object",
403: "object",
404: "object",
}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{agent.uuid}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_agent_config(self, owner, agent_uuid, body, **kwargs): # noqa: E501
"""Update agent config # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_agent_config(owner, agent_uuid, body, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param agent_uuid: UUID (required)
:type agent_uuid: str
:param body: Agent body (required)
:type body: V1Agent
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Agent
"""
kwargs['_return_http_data_only'] = True
return self.update_agent_config_with_http_info(owner, agent_uuid, body, **kwargs) # noqa: E501
def update_agent_config_with_http_info(self, owner, agent_uuid, body, **kwargs): # noqa: E501
"""Update agent config # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_agent_config_with_http_info(owner, agent_uuid, body, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param agent_uuid: UUID (required)
:type agent_uuid: str
:param body: Agent body (required)
:type body: V1Agent
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(V1Agent, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'owner',
'agent_uuid',
'body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_agent_config" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'owner' is set
if self.api_client.client_side_validation and ('owner' not in local_var_params or # noqa: E501
local_var_params['owner'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `owner` when calling `update_agent_config`") # noqa: E501
# verify the required parameter 'agent_uuid' is set
if self.api_client.client_side_validation and ('agent_uuid' not in local_var_params or # noqa: E501
local_var_params['agent_uuid'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `agent_uuid` when calling `update_agent_config`") # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and ('body' not in local_var_params or # noqa: E501
local_var_params['body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `body` when calling `update_agent_config`") # noqa: E501
collection_formats = {}
path_params = {}
if 'owner' in local_var_params:
path_params['owner'] = local_var_params['owner'] # noqa: E501
if 'agent_uuid' in local_var_params:
path_params['agent.uuid'] = local_var_params['agent_uuid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['ApiKey'] # noqa: E501
response_types_map = {
200: "V1Agent",
204: "object",
403: "object",
404: "object",
}
return self.api_client.call_api(
'/api/v1/orgs/{owner}/agents/{agent.uuid}/config', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def update_agent_token(self, owner, entity, body, **kwargs): # noqa: E501
"""Update agent token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_agent_token(owner, entity, body, async_req=True)
>>> result = thread.get()
:param owner: Owner of the namespace (required)
:type owner: str
:param entity: Rntity (required)
:type entity: str
:param body: Token body (required)
:type body: V1Token
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: V1Token
"""
kwargs['_return_http_data_only'] = True
| |
import numpy as np
import scipy as sc
import scipy.fftpack
from collections import deque
import CustomPrincetonSPE_v2 as SPE
import matplotlib as mp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from itertools import islice, tee
from numba import jit
import datetime as dt
import time as ti
def binData(data, pixel_energy, bin_size, start_col=None, end_col=None):
"""Takes in data that is row averaged as a 2D array. Uses pixel to energy
conversion along with given bin size to bin data."""
if (start_col is not None) and (end_col is not None):
print("Clipping Cols")
pixel_energy = pixel_energy[start_col:end_col]
print("Now %d" % len(pixel_energy))
data = data[:,start_col:end_col]
energyBins = np.arange(int(min(pixel_energy)), int(max(pixel_energy)) + 1,
bin_size)
print("Made bins from %d,%d,%g" % (int(pixel_energy[0]),
int(pixel_energy[-1]) + 1, bin_size))
data_temp = np.zeros([len(data), np.size(energyBins)])
for frame in range(len(data)):
data_temp[frame] = np.interp(energyBins, pixel_energy[::-1],
data[frame][::-1])
data = data_temp
return energyBins, data
#@jit(nopython=False)
def collectSPE(data_sets, time_file, comment, xmin, xmax, scatter):
"""Collects all SPE files together into numpy array by time point.
Returns both pump on and pump off shots. Need to collect in dictionaries
since files could have different number of frames and I need to collect
frames by time point."""
times = np.genfromtxt(time_file)
num_times = len(times)
f = 0
pump_on_total = {time: [] for time in range(num_times)}
pump_off_total = {time: [] for time in range(num_times)}
for data_file in data_sets:
data = loadSPE(data_file)
pump_on = data[::2,:]
pump_off = data[1::2,:]
pump_on_times = []
pump_off_times = []
for time in range(num_times):
pump_on_times.append(pump_on[time::num_times])
pump_off_times.append(pump_off[time::num_times])
if f == 0:
pump_on_total[time] = pump_on_times[time]
pump_off_total[time] = pump_off_times[time]
else:
#Need to append so I collect all frames belonging to same time pt
pump_on_total[time] = np.append(pump_on_total[time],
pump_on_times[time], axis=0)
pump_off_total[time] = np.append(pump_off_total[time],
pump_off_times[time], axis=0)
f += 1
#Convert back to numpy array for further use
pump_on_total = np.asarray([pump_on_total[time] for time in range(num_times)])
pump_off_total = np.asarray([pump_off_total[time] for time in range(num_times)])
return pump_on_total, pump_off_total
def prepareTA(data_sets, time_file, comment, xmin=None, xmax=None, scatter_files=None):
"""Overall function used for averaging SPE files together for transient
absorption. Can trim x axis first. Can take pump scatter files and subtract
them frame by frame for each pixel. Saves TA as npy file."""
times = np.loadtxt(time_file)
num_times = len(times)
print('Collecting data files')
scatter_on = None
scatter_off = None
pump_on, pump_off = collectSPE(data_sets, time_file, comment, xmin, xmax,
scatter=False)
if scatter_files is not None:
print('Collecting scatter files')
scatter_on, scatter_off = collectSPE(scatter_files, time_file, comment,
xmin, xmax, scatter=True)
p_on_clean = avgCollectedSPE(pump_on, num_times, 'on', xmin, xmax, scatter_on)
p_off_clean = avgCollectedSPE(pump_off, num_times, 'off', xmin, xmax, scatter_off)
p_on_clean_avg = np.mean(p_on_clean, axis=1)
p_off_clean_avg = np.mean(p_off_clean, axis=1)
dA = np.log(p_on_clean_avg/p_off_clean_avg)
print('Saving dA file')
np.save('%s_dA' %comment, dA)
#@jit(nopython=False)
def avgCollectedSPE(shot_file, num_times, on_off, xmin=None, xmax=None,
scatter_file = None, save_shot = False):
"""Uses median and MAD as robust estimators of each pixel to filter data.
Can subtract pump scatter from each pixel at each frame. Can save pump on
or off shot separately if you want."""
print('There are %d times!'%num_times)
print('Robust averaging pump %s shots!' %on_off)
shot = shot_file
if scatter_file is not None:
scatter = scatter_file
print('Subtracting pump scatter!')
if xmin is not None and xmax is not None:
shot = np.array([shot[time][:,:,xmin:xmax] for time in range(num_times)])
if scatter_file is not None:
scatter = np.array([scatter[time][:,:,xmin:xmax]
for time in range(num_times)])
num_y = len(shot[0][0])
num_x = len(shot[0][0][0])
clean_shot = np.zeros((num_times, num_y, num_x))
for time in range(num_times):
for y_coord in range(num_y):
for x_coord in range(num_x):
pixel = shot[time][:,y_coord][:,x_coord]
d_pixel = np.abs(pixel - np.median(pixel))
MAD = np.median(d_pixel)
z = d_pixel/(MAD if MAD else 1.) #modified Z score
clean_pixel = pixel[z<2.]
robust_avg = np.mean(clean_pixel)
clean_shot[time][y_coord][x_coord] = robust_avg
if scatter_file is not None:
clean_shot[time] = clean_shot[time] - np.mean(scatter[time], axis=0)
ts = ti.time()
st = dt.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
print('Time: %d' %time, st)
if save_shot is True:
np.save('clean_%s' %on_off, clean_shot)
return clean_shot
def FFTFilter(data, low_cut = None, high_cut = None, order = None, fs = None):
"""Fast fourier transform filter for """
nyq = 0.5 * fs
low = low_cut / nyq
high = high_cut / nyq
b, a = sc.signal.butter(order, [low, high], btype = 'stop')
w, h = sc.signal.freqs(b, a)
y = sc.signal.lfilter(b, a, data)
plt.figure(103)
plt.plot(w, np.log10(abs(h)))
plt.xscale('log')
plt.title('Butterworth filter frequency response')
plt.xlabel('Frequency [radians / second]')
plt.ylabel('Amplitude [dB]')
plt.margins(0, 0.1)
plt.grid(which = 'both', axis = 'both')
plt.show()
return y
def groundStateAbsOneFile(data_filename, rows, pixel_energy, comment,
bckg_file = None, hamp = 0, t = 0, mean = 0,
bin_size = 0, xmin = None, xmax = None, sig=None,
low_cut = 0, high_cut = 0, order = 0, u=None,
window_size = None, ymin = None, ymax = None, raw=False,
compare = False, save_harm = False):
"""Takes in SPE file for the ground state data, where even frames are
on blank and odd frames are on sample. Can take in background SPE file.
Can choose which rows to look at. Give it the pixel to energy conversion.
Comment is title of plot. Can give it area of interest to plot."""
data = loadSPE(data_filename)
energy_axis = pixel_energy
print('Summing over rows!')
row_sum_data = np.sum(data[:, rows, :], 1)
if bckg_file is not None:
bckg = loadSPE(bckg_file)
row_sum_bckg = np.sum(bckg[:, rows, :], 1)
if bin_size > 0:
print('Binning data!')
energy_axis, row_sum_data = binData(row_sum_data, pixel_energy, bin_size)
if bckg_file is not None:
energy_axis, row_sum_bckg = binData(row_sum_bckg, pixel_energy, bin_size)
samp = row_sum_data[1::2]
blank = row_sum_data[::2]
num_sets = len(samp)
print('There are %d sets!' % num_sets)
samp_avg = np.mean(samp, 0)
blank_avg = np.mean(blank, 0)
if bckg_file is not None:
print('Subtracting background scatter!')
bckg_samp = np.mean(row_sum_bckg[1::2], 0)
bckg_blank = np.mean(row_sum_bckg[::2], 0)
samp_avg -= bckg_samp
blank_avg -= bckg_blank
print('Calculating absorption!')
dA = -np.log10(samp_avg/blank_avg)
if order > 0:
print('Fourier filtering absorption!')
print('high_cut_max %d' %(len(energy_axis)/2))
dAF = FFTFilter(dA, low_cut, high_cut, order, len(energy_axis))
if hamp > 0:
print('Hampel filtering absorption!')
dAH = hampelFilt(dA, t, hamp)
if order > 0:
dAFH = hampelFilt(dAF, t, hamp)
if mean > 0:
print('Mean filtering absorption!')
dAM = rollingMean(dA, mean)
if order > 0:
dAFM = rollingMean(dAF, mean)
if hamp > 0:
dAHM = rollingMean(dAH, mean)
if order > 0:
dAFHM = rollingMean(dAFH, mean)
if save_harm is True:
np.save('Harmonics_samp_%s' %comment, samp_avg)
np.save('Harmonics_blank_%s' %comment, blank_avg)
idxmin = (np.abs(energy_axis-xmin)).argmin()
xmin = energy_axis[idxmin]
idxmax = (np.abs(energy_axis-xmax)).argmin()
xmax = energy_axis[idxmax]
print(xmin, xmax)
np.savetxt('%s_dA.txt' %comment, dAHM[idxmax:idxmin])
print('Here is your plot!')
plt.figure(101)
if compare is True:
plt.plot(energy_axis, dA, label = 'No filter')
if order > 0:
plt.plot(energy_axis, dAF, label = 'Fourier filter only')
if hamp > 0:
plt.plot(energy_axis, dAH, label = 'Hampel filter only')
if order > 0:
plt.plot(energy_axis, dAFH, label = 'Fourier + Hampel filter')
if mean > 0:
plt.plot(energy_axis, dAM, label = 'Mean filter only')
if order > 0:
plt.plot(energy_axis, dAFM, label = 'Fourier + mean filter')
if hamp > 0:
plt.plot(energy_axis, dAHM, label = 'Hampel + median filter')
if (hamp > 0 and mean > 0) and (order > 0):
plt.plot(energy_axis, dAFHM, label = 'Fourier + Hampel + meadian filter')
else:
if hamp == 0 and mean == 0 and order == 0:
plt.plot(energy_axis, dA, label = 'No filter')
if order > 0:
plt.plot(energy_axis, dAF, label = 'Fourier filter only')
if hamp > 0 and mean == 0:
plt.plot(energy_axis, dAH, label = 'Hampel filter only')
if mean > 0 and hamp == 0:
plt.plot(energy_axis, dAM, label = 'Mean filter only')
if mean > 0 and hamp == 0 and order > 0:
plt.plot(energy_axis, dAFM, label = 'Fourier + mean filter')
if hamp > 0 and mean == 0 and order > 0:
plt.plot(energy_axis, dAFH, label = 'Fourier + Hampel filter')
if hamp > 0 and mean > 0:
plt.plot(energy_axis, dAHM, label = 'Hampel + mean filter')
if (hamp > 0 and mean > 0) and (order > 0):
plt.plot(energy_axis, dAFHM, label = 'Fourier + Hampel + mean filter')
plt.title('Ground State Absorption of %s' % (comment))
plt.xlabel('Energy (eV)')
plt.ylabel('Absobance (OD)')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.legend()
plt.show()
def groundStateAbsTwoFile(samp_file, blank_file, rows, pixel_energy, comment,
bckg_samp_file = None, bckg_blank_file = None,
bin_size = 0, xmin = None, xmax = None, sig=None,
low_cut = 0, high_cut = 0, order = 0, u=None,
window_size = None, ymin = None, ymax = None, raw=False,
compare = False, hamp = 0, t = 0, mean = 0):
"""Takes in | |
<reponame>gtca/mofax
from .core import mofa_model
from .utils import *
import sys
from warnings import warn
from typing import Union, Optional, List, Iterable, Sequence
from functools import partial
import numpy as np
from scipy.stats import pearsonr
import pandas as pd
from pandas.api.types import is_numeric_dtype
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
from .utils import maybe_factor_indices_to_factors, _make_iterable, _is_iter
from .plot_utils import _plot_grid
### FACTORS ###
def plot_factors_scatter(
model: mofa_model,
x="Factor1",
y="Factor2",
dist=False,
groups=None,
group_label="group",
color=None,
zero_line_x=False,
zero_line_y=False,
linewidth=0,
zero_linewidth=1,
size=20,
legend=True,
legend_prop=None,
palette=None,
ncols=4,
sharex=False,
sharey=False,
**kwargs,
):
"""
Plot samples features such as factor values,
samples metadata or covariates
Parameters
----------
model : mofa_model
Factor model
x : optional
Factor or variable (metadata column, feature, or covariate) to plot along X axis (Factor1 by default).
A list can be provided to create a grid of plots.
y : optional
Factor or variable (metadata column, feature, or covariate) to plot along Y axis (Factor2 by default).
A list can be provided to create a grid of plots.
dist : optional
Boolean value if to add marginal distributions or histograms to the scatterplot (jointplot)
groups : optional
Subset of groups to consider
group_label : optional
Sample (cell) metadata column to be used as group assignment ('group' by default)
color : optional
Grouping variable by default, alternatively a feature name can be provided (when no kde).
If a list of features is provided, they will be plot on one figure.
Use palette argument to provide a colour map.
zero_line : optional
Boolean values if to add Z=0 line
linewidth : optional
Linewidth argument for dots (default is 0)
zero_linewidth : optional
Linewidth argument for the zero line (default is 1)
size : optional
Size argument for dots (ms for plot, s for jointplot and scatterplot; default is 5)
legend : optional bool
If to show the legend (e.g. colours matching groups)
legend_prop : optional
The font properties of the legend
palette : optional
cmap describing colours, default is None (cubehelix)
Example palette: seaborn.cubehelix_palette(8, start=.5, rot=-.75. as_cmap=True)
ncols : optional
Number of columns if multiple colours are defined (4 by default)
sharex: optional
Common X axis across plots on the grid
sharey: optional
Common Y axis across plots on the grid
"""
# Process input arguments
if group_label == "group" and color is None:
color = "group"
color_vars = maybe_factor_indices_to_factors(_make_iterable(color))
assert not (
len(color_vars) > 1 and dist
), "When plotting distributions, only one color can be provided"
assert not (
(_is_iter(x) or _is_iter(y)) and dist
), "When plotting distributions, only scalar x and y axes can be defined"
if dist:
# Get values
z = model.fetch_values([x, y], unique=True)
# Add group and colour information
vars = [group_label, *color_vars]
vars = [v for v in vars if v not in z.columns]
if any([not (not (i)) for i in vars]):
meta = model.fetch_values(variables=vars)
z = z.rename_axis("sample").reset_index()
z = z.set_index("sample").join(meta).reset_index()
# Subset groups (incl. custom groups of samples)
if group_label and groups is not None:
z = z[z[group_label].isin(groups)]
g = sns.jointplot(
x=x,
y=y,
hue=color_vars[0],
data=z.sort_values(color_vars[0]) if color_vars[0] else z,
linewidth=linewidth,
s=size,
legend=legend_str,
palette=palette,
**kwargs,
)
sns.despine(offset=10, trim=True, ax=g.ax_joint)
g.ax_joint.set(
xlabel=f"{x_factor_label} value", ylabel=f"{y_factor_label} value"
)
if legend:
g.ax_joint.legend(
bbox_to_anchor=(1.4, 1), loc=2, borderaxespad=0.0, prop=legend_prop
)
if zero_line_y:
g.axhline(0, ls="--", color="lightgrey", linewidth=linewidth, zorder=0)
if zero_line_x:
g.axvline(0, ls="--", color="lightgrey", linewidth=linewidth, zorder=0)
plt.tight_layout()
else:
plot = partial(
sns.scatterplot,
s=size,
)
g = _plot_factors(
plot,
model,
x,
y,
color,
groups=groups,
group_label=group_label,
zero_line_x=zero_line_x,
zero_line_y=zero_line_y,
linewidth=linewidth,
zero_linewidth=zero_linewidth,
legend=legend,
legend_prop=legend_prop,
palette=palette,
ncols=ncols,
sharex=sharex,
sharey=sharey,
**kwargs,
)
return g
plot_factors = plot_factors_scatter
def _plot_factors(
plot_func,
model: mofa_model,
x="Factor1",
y="Factor2",
color=None,
groups=None,
group_label="group",
zero_line_x=False,
zero_line_y=False,
linewidth=0,
zero_linewidth=1,
legend=True,
legend_prop=None,
palette=None,
ncols=4,
sharex=False,
sharey=False,
**kwargs,
):
"""
Plot samples features such as factor values,
samples metadata or covariates
Parameters
----------
model : mofa_model
Factor model
x : optional
Factor or variable (metadata column, feature, or covariate) to plot along X axis (Factor1 by default).
A list can be provided to create a grid of plots.
y : optional
Factor or variable (metadata column, feature, or covariate) to plot along Y axis (Factor2 by default).
A list can be provided to create a grid of plots.
dist : optional
Boolean value if to add marginal distributions or histograms to the scatterplot (jointplot)
groups : optional
Subset of groups to consider
group_label : optional
Sample (cell) metadata column to be used as group assignment ('group' by default)
color : optional
Grouping variable by default, alternatively a feature name can be provided (when no kde).
If a list of features is provided, they will be plot on one figure.
Use palette argument to provide a colour map.
zero_line : optional
Boolean values if to add Z=0 line
linewidth : optional
Linewidth argument for dots (default is 0)
zero_linewidth : optional
Linewidth argument for the zero line (default is 1)
size : optional
Size argument for dots (ms for plot, s for jointplot and scatterplot; default is 5)
legend : optional bool
If to show the legend (e.g. colours matching groups)
legend_prop : optional
The font properties of the legend
palette : optional
cmap describing colours, default is None (cubehelix)
Example palette: seaborn.cubehelix_palette(8, start=.5, rot=-.75. as_cmap=True)
ncols : optional
Number of columns if multiple colours are defined (4 by default)
sharex: optional
Common X axis across plots on the grid
sharey: optional
Common Y axis across plots on the grid
"""
# Process input arguments
if group_label == "group" and color is None:
color = "group"
color_vars = maybe_factor_indices_to_factors(_make_iterable(color))
# Accept x and y as lists to create a grid
x_vars = maybe_factor_indices_to_factors(_make_iterable(x))
y_vars = maybe_factor_indices_to_factors(_make_iterable(y))
# Get values
z = model.fetch_values([*x_vars, *y_vars], unique=True)
# Add group and colour information
vars = [group_label, *color_vars]
vars = [v for v in vars if v not in z.columns]
if any([not (not (i)) for i in vars]):
meta = model.fetch_values(variables=vars)
z = z.rename_axis("sample").reset_index()
z = z.set_index("sample").join(meta).reset_index()
# Subset groups (incl. custom groups of samples)
if group_label and groups is not None:
z = z[z[group_label].isin(groups)]
g = _plot_grid(
plot_func,
z,
x,
y,
color,
zero_line_x=zero_line_x,
zero_line_y=zero_line_y,
linewidth=linewidth,
zero_linewidth=zero_linewidth,
legend=legend,
legend_prop=legend_prop,
palette=palette,
ncols=ncols,
sharex=sharex,
sharey=sharey,
**kwargs,
)
return g
def plot_factors_violin(
model: mofa_model,
factors: Union[int, List[int]] = None,
color="group",
violins=True,
dots=False,
zero_line=True,
group_label="group",
groups=None,
linewidth=0,
zero_linewidth=1,
size=20,
legend=True,
legend_prop=None,
palette=None,
alpha=None,
violins_alpha=None,
ncols=4,
sharex=False,
sharey=False,
**kwargs,
):
"""
Plot factor values as violinplots or stripplots (jitter plots)
Parameters
----------
model : mofa_model
Factor model
factors : optional
Index of a factor (or indices of factors) to use (all factors by default)
x : optional
Variable to plot along X axis (factor identity by default)
y : optional
Variable to plot along Y axis (factor value by default)
color : optional
Variable to split & colour dots by (cell group by default)
groups : optional
Subset of groups to consider
group_label : optional
Sample (cell) metadata column to be used as group assignment ('group' by default)
violins : optional
Boolean value if to add violin plots
dots : optional
Boolean value if to add dots to the plots
zero_line : optional
Boolean values if to add Z=0 line
linewidth : optional
Linewidth argument for dots (default is 0)
zero_linewidth : optional
Linewidth argument for the zero line (default is 1)
size : optional
Size argument for dots (ms for plot, s for jointplot and scatterplot; default is 5)
legend : optional bool
If to show the legend (e.g. colours matching groups)
legend_prop : optional
The font properties of the legend
palette : optional
cmap describing colours, default is None (cubehelix)
Example palette: seaborn.cubehelix_palette(8, start=.5, rot=-.75. as_cmap=True)
alpha : optional
Dots opacity
violins_alpha : optional
Violins opacity
ncols : optional
Number of columns if multiple colours are defined (4 by default)
sharex: optional
Common X axis across plots on the grid
sharey: optional
Common Y axis across plots on the grid
"""
# Process input arguments
if group_label == "group" and color is None:
color = "group"
color_vars = | |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import networkx as nx
from warnings import warn
from PySide2.QtWidgets import *
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtSvg import QSvgGenerator
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.Devices.bus import Bus
from GridCal.Engine.Devices.line import Line
from GridCal.Engine.Devices.dc_line import DcLine
from GridCal.Engine.Devices.transformer import Transformer2W
from GridCal.Engine.Devices.vsc import VSC
from GridCal.Engine.Devices.hvdc_line import HvdcLine
from GridCal.Gui.GridEditorWidget.terminal_item import TerminalItem
from GridCal.Gui.GridEditorWidget.bus_graphics import BusGraphicItem
from GridCal.Gui.GridEditorWidget.line_graphics import LineGraphicItem
from GridCal.Gui.GridEditorWidget.dc_line_graphics import DcLineGraphicItem
from GridCal.Gui.GridEditorWidget.transformer2w_graphics import TransformerGraphicItem
from GridCal.Gui.GridEditorWidget.hvdc_graphics import HvdcGraphicItem
from GridCal.Gui.GridEditorWidget.vsc_graphics import VscGraphicItem
'''
Dependencies:
GridEditor
|
- EditorGraphicsView (Handles the drag and drop)
| |
---- DiagramScene
|
- MultiCircuit (Calculation engine)
|
- Graphic Objects: (BusGraphicItem, BranchGraphicItem, LoadGraphicItem, ...)
The graphic objects need to call the API objects and functions inside the MultiCircuit instance.
To do this the graphic objects call "parent.circuit.<function or object>"
'''
class EditorGraphicsView(QGraphicsView):
def __init__(self, scene, parent=None, editor=None):
"""
Editor where the diagram is displayed
@param scene: DiagramScene object
@param parent:
@param editor:
"""
QGraphicsView.__init__(self, scene, parent)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.setRubberBandSelectionMode(Qt.IntersectsItemShape)
self.setMouseTracking(True)
self.setInteractive(True)
self.scene_ = scene
self.setRenderHints(QPainter.Antialiasing | QPainter.SmoothPixmapTransform)
self.editor = editor
self.setAlignment(Qt.AlignCenter)
def adapt_map_size(self):
w = self.size().width()
h = self.size().height()
print('EditorGraphicsView size: ', w, h)
self.map.change_size(w, h)
def dragEnterEvent(self, event):
"""
@param event:
@return:
"""
if event.mimeData().hasFormat('component/name'):
event.accept()
def dragMoveEvent(self, event):
"""
Move element
@param event:
@return:
"""
if event.mimeData().hasFormat('component/name'):
event.accept()
def dropEvent(self, event):
"""
Create an element
@param event:
@return:
"""
if event.mimeData().hasFormat('component/name'):
obj_type = event.mimeData().data('component/name')
elm = None
data = QByteArray()
stream = QDataStream(data, QIODevice.WriteOnly)
stream.writeQString('Bus')
if obj_type == data:
name = 'Bus ' + str(len(self.scene_.circuit.buses))
obj = Bus(name=name)
elm = BusGraphicItem(diagramScene=self.scene(), name=name, editor=self.editor, bus=obj)
obj.graphic_obj = elm
self.scene_.circuit.add_bus(obj) # weird but it's the only way to have graphical-API communication
if elm is not None:
elm.setPos(self.mapToScene(event.pos()))
self.scene_.addItem(elm)
def wheelEvent(self, event):
"""
Zoom
@param event:
@return:
"""
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
# Scale the view / do the zoom
scale_factor = 1.15
# print(event.angleDelta().x(), event.angleDelta().y(), event.angleDelta().manhattanLength() )
if event.angleDelta().y() > 0:
# Zoom in
self.scale(scale_factor, scale_factor)
else:
# Zooming out
self.scale(1.0 / scale_factor, 1.0 / scale_factor)
def add_bus(self, bus: Bus, explode_factor=1.0):
"""
Add bus
Args:
bus: GridCal Bus object
explode_factor: factor to position the node
"""
elm = BusGraphicItem(diagramScene=self.scene(), name=bus.name, editor=self.editor, bus=bus)
x = int(bus.x * explode_factor)
y = int(bus.y * explode_factor)
elm.setPos(self.mapToScene(QPoint(x, y)))
self.scene_.addItem(elm)
return elm
class LibraryModel(QStandardItemModel):
"""
Items model to host the draggable icons
"""
def __init__(self, parent=None):
"""
Items model to host the draggable icons
@param parent:
"""
QStandardItemModel.__init__(self, parent)
def mimeTypes(self):
"""
@return:
"""
return ['component/name']
def mimeData(self, idxs):
"""
@param idxs:
@return:
"""
mimedata = QMimeData()
for idx in idxs:
if idx.isValid():
txt = self.data(idx, Qt.DisplayRole)
data = QByteArray()
stream = QDataStream(data, QIODevice.WriteOnly)
stream.writeQString(txt)
mimedata.setData('component/name', data)
return mimedata
class DiagramScene(QGraphicsScene):
def __init__(self, parent=None, circuit: MultiCircuit = None):
"""
@param parent:
"""
super(DiagramScene, self).__init__(parent)
self.parent_ = parent
self.circuit = circuit
# self.setBackgroundBrush(QtCore.Qt.red)
def mouseMoveEvent(self, mouseEvent):
"""
@param mouseEvent:
@return:
"""
self.parent_.scene_mouse_move_event(mouseEvent)
super(DiagramScene, self).mouseMoveEvent(mouseEvent)
def mouseReleaseEvent(self, mouseEvent):
"""
@param mouseEvent:
@return:
"""
self.parent_.scene_mouse_release_event(mouseEvent)
# call mouseReleaseEvent on "me" (continue with the rest of the actions)
super(DiagramScene, self).mouseReleaseEvent(mouseEvent)
class ObjectFactory(object):
def get_box(self):
"""
@return:
"""
pixmap = QPixmap(40, 40)
pixmap.fill()
painter = QPainter(pixmap)
painter.fillRect(0, 0, 40, 40, Qt.black)
painter.end()
return QIcon(pixmap)
def get_circle(self):
"""
@return:
"""
pixmap = QPixmap(40, 40)
pixmap.fill()
painter = QPainter(pixmap)
painter.setBrush(Qt.red)
painter.drawEllipse(0, 0, 40, 40)
painter.end()
return QIcon(pixmap)
class GridEditor(QSplitter):
def __init__(self, circuit: MultiCircuit):
"""
Creates the Diagram Editor
Args:
circuit: Circuit that is handling
"""
QSplitter.__init__(self)
# store a reference to the multi circuit instance
self.circuit = circuit
# nodes distance "explosion" factor
self.expand_factor = 1.5
# Widget layout and child widgets:
self.horizontalLayout = QHBoxLayout(self)
self.object_editor_table = QTableView(self)
self.libraryBrowserView = QListView(self)
self.libraryModel = LibraryModel(self)
self.libraryModel.setColumnCount(1)
# Create an icon with an icon:
object_factory = ObjectFactory()
# initialize library of items
self.libItems = list()
self.libItems.append(QStandardItem(object_factory.get_box(), 'Bus'))
for i in self.libItems:
self.libraryModel.appendRow(i)
# set the objects list
self.object_types = [dev.device_type.value for dev in circuit.objects_with_profiles]
self.catalogue_types = ['Wires', 'Overhead lines', 'Underground lines', 'Sequence lines', 'Transformers']
# Actual libraryView object
self.libraryBrowserView.setModel(self.libraryModel)
self.libraryBrowserView.setViewMode(self.libraryBrowserView.ListMode)
self.libraryBrowserView.setDragDropMode(self.libraryBrowserView.DragOnly)
# create all the schematic objects and replace the existing ones
self.diagramScene = DiagramScene(self, circuit) # scene to add to the QGraphicsView
self.diagramView = EditorGraphicsView(self.diagramScene, parent=self, editor=self)
# create the grid name editor
self.frame1 = QFrame()
self.frame1_layout = QVBoxLayout()
self.frame1_layout.setContentsMargins(0, 0, 0, 0)
self.name_editor_frame = QFrame()
self.name_layout = QHBoxLayout()
self.name_layout.setContentsMargins(0, 0, 0, 0)
self.name_label = QLineEdit()
self.name_label.setText(self.circuit.name)
self.name_layout.addWidget(self.name_label)
self.name_editor_frame.setLayout(self.name_layout)
self.frame1_layout.addWidget(self.name_editor_frame)
self.frame1_layout.addWidget(self.libraryBrowserView)
self.frame1.setLayout(self.frame1_layout)
# Add the two objects into a layout
splitter2 = QSplitter(self)
splitter2.addWidget(self.frame1)
splitter2.addWidget(self.object_editor_table)
splitter2.setOrientation(Qt.Vertical)
self.addWidget(splitter2)
self.addWidget(self.diagramView)
# factor 1:10
splitter2.setStretchFactor(0, 1)
splitter2.setStretchFactor(1, 5)
self.started_branch = None
self.setStretchFactor(0, 0.1)
self.setStretchFactor(1, 2000)
def start_connection(self, port: TerminalItem):
"""
Start the branch creation
@param port:
@return:
"""
self.started_branch = LineGraphicItem(fromPort=port, toPort=None, diagramScene=self.diagramScene)
self.started_branch.bus_from = port.parent
port.setZValue(0)
port.process_callbacks(port.parent.pos() + port.pos())
def scene_mouse_move_event(self, event):
"""
@param event:
@return:
"""
if self.started_branch:
pos = event.scenePos()
self.started_branch.setEndPos(pos)
def scene_mouse_release_event(self, event):
"""
Finalize the branch creation if its drawing ends in a terminal
@param event:
@return:
"""
# Clear or finnish the started connection:
if self.started_branch:
pos = event.scenePos()
items = self.diagramScene.items(pos) # get the item (the terminal) at the mouse position
for item in items:
if type(item) is TerminalItem: # connect only to terminals
if item.parent is not self.started_branch.fromPort.parent: # forbid connecting to itself
self.started_branch.setToPort(item)
item.hosting_connections.append(self.started_branch)
self.started_branch.bus_to = item.parent
if self.started_branch.bus_from.api_object.is_dc != self.started_branch.bus_to.api_object.is_dc:
# different DC status -> VSC
name = 'VSC ' + str(len(self.circuit.vsc_converters) + 1)
obj = VSC(bus_from=self.started_branch.bus_from.api_object,
bus_to=self.started_branch.bus_to.api_object,
name=name)
obj.graphic_obj = VscGraphicItem(fromPort=self.started_branch.fromPort,
toPort=self.started_branch.toPort,
diagramScene=self.diagramScene,
branch=obj)
elif self.started_branch.bus_from.api_object.is_dc == True and self.started_branch.bus_to.api_object.is_dc == True:
# both buses are DC
name = 'Dc line ' + str(len(self.circuit.dc_lines) + 1)
obj = DcLine(bus_from=self.started_branch.bus_from.api_object,
bus_to=self.started_branch.bus_to.api_object,
name=name)
obj.graphic_obj = DcLineGraphicItem(fromPort=self.started_branch.fromPort,
toPort=self.started_branch.toPort,
diagramScene=self.diagramScene,
branch=obj)
else:
# Same DC status -> line / trafo
v1 = self.started_branch.bus_from.api_object.Vnom
v2 = self.started_branch.bus_to.api_object.Vnom
if abs(v1 - v2) > 1.0:
name = 'Transformer ' + str(len(self.circuit.transformers2w) + 1)
obj = Transformer2W(bus_from=self.started_branch.bus_from.api_object,
bus_to=self.started_branch.bus_to.api_object,
name=name)
obj.graphic_obj = TransformerGraphicItem(fromPort=self.started_branch.fromPort,
toPort=self.started_branch.toPort,
diagramScene=self.diagramScene,
branch=obj)
else:
name = 'Line ' + str(len(self.circuit.lines) + 1)
obj = Line(bus_from=self.started_branch.bus_from.api_object,
bus_to=self.started_branch.bus_to.api_object,
name=name)
obj.graphic_obj = LineGraphicItem(fromPort=self.started_branch.fromPort,
toPort=self.started_branch.toPort,
diagramScene=self.diagramScene,
branch=obj)
# add the new object to the circuit
self.circuit.add_branch(obj)
# update the connection placement
obj.graphic_obj.fromPort.update()
obj.graphic_obj.toPort.update()
# set the connection placement
obj.graphic_obj.setZValue(-1)
# if self.started_branch.toPort is None:
self.started_branch.remove_widget()
# release this pointer
self.started_branch = None
def bigger_nodes(self):
"""
Expand the grid
@return:
"""
min_x = sys.maxsize
min_y = sys.maxsize
max_x = -sys.maxsize
max_y = -sys.maxsize
if len(self.diagramScene.selectedItems()) > 0:
# expand selection
for item in self.diagramScene.selectedItems():
if type(item) is BusGraphicItem:
x = item.pos().x() * self.expand_factor
y = item.pos().y() * self.expand_factor
item.setPos(QPointF(x, y))
# apply changes to the API objects
if item.api_object is not None:
item.api_object.x = x
item.api_object.y = y
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
else:
# expand all
for item in self.diagramScene.items():
if type(item) is BusGraphicItem:
x = item.pos().x() * self.expand_factor
y = item.pos().y() * self.expand_factor
item.setPos(QPointF(x, y))
max_x = max(max_x, x)
min_x = min(min_x, x)
max_y = max(max_y, y)
min_y = min(min_y, y)
# apply changes to the API objects
if item.api_object is not None:
item.api_object.x = x
item.api_object.y = y
# set the limits of the view
self.set_limits(min_x, max_x, min_y, max_y)
def smaller_nodes(self):
"""
Contract the grid
@return:
"""
min_x = sys.maxsize
min_y = sys.maxsize
max_x = -sys.maxsize
max_y = -sys.maxsize
if len(self.diagramScene.selectedItems()) > 0:
# shrink selection only
for item in self.diagramScene.selectedItems():
if type(item) is BusGraphicItem:
x = item.pos().x() / self.expand_factor
y = item.pos().y() / self.expand_factor
item.setPos(QPointF(x, y))
# | |
that are being learned
# for which learning_enabled == True or ONLINE (i.e., not False or AFTER)
# Implementation Note: RecurrentTransferMechanisms are special cased as the
# AutoAssociativeMechanism should be handling learning - not the RTM itself.
if self._is_learning(context) and not isinstance(node, RecurrentTransferMechanism):
projections = set(self.projections).intersection(set(node.path_afferents))
if any([p for p in projections if
any([a for a in p.parameter_ports[MATRIX].mod_afferents
if (hasattr(a, 'learning_enabled') and a.learning_enabled in {True, ONLINE})])]):
context.replace_flag(ContextFlags.PROCESSING, ContextFlags.LEARNING)
# Execute Mechanism
if bin_execute:
_comp_ex.execute_node(node)
else:
if node is not self.controller:
if nested and node in self.get_nodes_by_role(NodeRole.INPUT):
for port in node.input_ports:
port._update(context=context)
node.execute(context=context,
runtime_params=execution_runtime_params,
)
# Reset runtim_params
# Reset any specified for Mechanism
if context.execution_id in node._runtime_params_reset:
for key in node._runtime_params_reset[context.execution_id]:
node._set_parameter_value(key, node._runtime_params_reset[context.execution_id][key],
context)
node._runtime_params_reset[context.execution_id] = {}
# Reset any specified for Mechanism's function
if context.execution_id in node.function._runtime_params_reset:
for key in node.function._runtime_params_reset[context.execution_id]:
node.function._set_parameter_value(
key,
node.function._runtime_params_reset[context.execution_id][key],
context)
node.function._runtime_params_reset[context.execution_id] = {}
# Set execution_phase for node's context back to IDLE
if self._is_learning(context):
context.replace_flag(ContextFlags.LEARNING, ContextFlags.PROCESSING)
context.remove_flag(ContextFlags.PROCESSING)
# EXECUTE A NESTED COMPOSITION ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
elif isinstance(node, Composition):
if bin_execute:
# Invoking nested composition passes data via Python
# structures. Make sure all sources get their latest values
srcs = (proj.sender.owner for proj in node.input_CIM.afferents)
for srnode in srcs:
if srnode is self.input_CIM or srnode in self.nodes:
data_loc = srnode
else:
# Consuming output from another nested composition
assert srnode.composition in self.nodes
assert srnode is srnode.composition.output_CIM
data_loc = srnode.composition
# Set current Python values to LLVM results
data = _comp_ex.extract_frozen_node_output(data_loc)
for op, v in zip(srnode.output_ports, data):
op.parameters.value._set(
v, context, skip_history=True, skip_log=True)
# Update afferent projections and input ports.
node.input_CIM._update_input_ports(context=context)
# Pass outer context to nested Composition
context.composition = node
if ContextFlags.SIMULATION_MODE in context.runmode:
is_simulating = True
context.remove_flag(ContextFlags.SIMULATION_MODE)
else:
is_simulating = False
# Run node-level compiled nested composition
# only if there are no control projections
nested_bin_execute = bin_execute \
if len(node.parameter_CIM.afferents) == 0 else False
ret = node.execute(context=context,
bin_execute=nested_bin_execute)
# Get output info from nested execution
if bin_execute:
# Update result in binary data structure
_comp_ex.insert_node_output(node, ret)
if is_simulating:
context.add_flag(ContextFlags.SIMULATION_MODE)
context.composition = self
# ANIMATE node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self._animate is not False and self._animate_unit == COMPONENT:
self._animate_execution(node, context)
# MANAGE INPUTS (for next execution_set)~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# FIX: 6/12/19 Deprecate?
# Handle input clamping
if node in input_nodes:
if clamp_input:
if node in pulse_clamp_inputs:
for input_port in node.input_ports:
# clamp = None --> "turn off" input node
self.input_CIM_ports[input_port][1].parameters.value._set(0, context)
# Store new value generated by node,
# then set back to frozen value for use by other nodes in execution_set
new_values[node] = node.get_output_values(context)
for i in range(len(node.output_ports)):
node.output_ports[i].parameters.value._set(frozen_values[node][i], context,
skip_history=True, skip_log=True)
# Set all nodes to new values
for node in next_execution_set:
for i in range(len(node.output_ports)):
node.output_ports[i].parameters.value._set(new_values[node][i], context,
skip_history=True, skip_log=True)
if self.controller_time_scale == TimeScale.TIME_STEP:
self._execute_controller(
relative_order=AFTER,
bin_execute=bin_execute,
_comp_ex=_comp_ex,
context=context
)
if call_after_time_step:
call_with_pruned_args(call_after_time_step, context=context)
context.remove_flag(ContextFlags.PROCESSING)
#Update matrix parameter of PathwayProjections being learned with learning_enabled==AFTER
from psyneulink.library.compositions.autodiffcomposition import AutodiffComposition
if self._is_learning(context) and not isinstance(self, AutodiffComposition):
context.execution_phase = ContextFlags.LEARNING
for projection in [p for p in self.projections if
hasattr(p, 'has_learning_projection') and p.has_learning_projection]:
matrix_parameter_port = projection.parameter_ports[MATRIX]
if any([lp for lp in matrix_parameter_port.mod_afferents if lp.learning_enabled == AFTER]):
matrix_parameter_port._update(context=context)
context.remove_flag(ContextFlags.LEARNING)
if call_after_pass:
call_with_pruned_args(call_after_pass, context=context)
if self.controller_time_scale == TimeScale.PASS:
self._execute_controller(
relative_order=AFTER,
bin_execute=bin_execute,
_comp_ex=_comp_ex,
context=context
)
# Animate output_CIM
# FIX: NOT SURE WHETHER IT CAN BE LEFT IN PROCESSING AFTER THIS -
# COORDINATE WITH REFACTORING OF PROCESSING/CONTROL CONTEXT
if self._animate is not False and SHOW_CIM in self._animate and self._animate[SHOW_CIM]:
self._animate_execution(self.output_CIM, context)
# FIX: END
# EXECUTE CONTROLLER (if controller_mode == AFTER) ************************************************************
if self.controller_time_scale == TimeScale.TRIAL:
self._execute_controller(
relative_order=AFTER,
bin_execute=bin_execute,
_comp_ex=_comp_ex,
context=context
)
execution_scheduler.get_clock(context)._increment_time(TimeScale.TRIAL)
# REPORT RESULTS ***********************************************************************************************
# Extract result here
if bin_execute:
_comp_ex.freeze_values()
_comp_ex.execute_node(self.output_CIM)
return _comp_ex.extract_node_output(self.output_CIM)
context.execution_phase = ContextFlags.PROCESSING
self.output_CIM.execute(context=context)
context.execution_phase = ContextFlags.IDLE
output_values = []
for port in self.output_CIM.output_ports:
output_values.append(port.parameters.value._get(context))
return output_values
def __call__(self, *args, **kwargs):
if not args and not kwargs:
if self.results:
return self.results[-1]
else:
return None
elif (args and isinstance(args[0],dict)) or INPUTS in kwargs:
from psyneulink.core.compositions.pathway import PathwayRole
if any(PathwayRole.LEARNING in p.roles and p.target in kwargs[INPUTS] for p in self.pathways):
return self.learn(*args, **kwargs)
else:
return self.run(*args, **kwargs)
else:
bad_args_str = ", ".join([str(arg) for arg in args] + list(kwargs.keys()))
raise CompositionError(f"Composition ({self.name}) called with illegal argument(s): {bad_args_str}")
def _update_learning_parameters(self, context):
pass
@handle_external_context(fallback_most_recent=True)
def reset(self, values=None, include_unspecified_nodes=True, context=NotImplemented):
if not values:
values = {}
for node in self.stateful_nodes:
if not include_unspecified_nodes and node not in values:
continue
reset_val = values.get(node)
node.reset(reset_val, context=context)
@handle_external_context(fallback_most_recent=True)
def initialize(self, values=None, include_unspecified_nodes=True, context=None):
"""
Initializes the values of nodes within cycles. If `include_unspecified_nodes` is True and a value is
provided for a given node, the node will be initialized to that value. If `include_unspecified_nodes` is
True and a value is not provided, the node will be initialized to its default value. If
`include_unspecified_nodes` is False, then all nodes must have corresponding initialization values. The
`DEFAULT` keyword can be used in lieu of a numerical value to reset a node's value to its default.
If a context is not provided, the most recent context under which the Composition has executed will be used.
Arguments
----------
values: Dict { Node: Node Value }
A dictionary contaning key-value pairs of Nodes and initialization values. Nodes within cycles that are
not included in this dict will be initialized to their default values.
include_unspecified_nodes: bool
Specifies whether all nodes within cycles should be initialized or only ones specified in the provided
values dictionary.
context: Context
The context under which the nodes should be initialized. context will be set to
self.most_recent_execution_context if one is not specified.
"""
# comp must be initialized from context before cycle values are initialized
self._initialize_from_context(context, override=False)
if not values:
values = {}
cycle_nodes = set(self.get_nodes_by_role(NodeRole.CYCLE) + self.get_nodes_by_role(NodeRole.FEEDBACK_SENDER))
for node in values:
if node not in self.nodes:
raise CompositionError(f"{node.name} "
f"(entry in initialize values arg) is not a node in '{self.name}'")
if node not in cycle_nodes:
warnings.warn(
f"A value is specified for {node.name} of {self.name} in the 'initialize_cycle_values' "
f"argument of call to run, but it is neither part of a cycle nor a FEEDBACK_SENDER. "
f"Its value will be overwritten when the node first executes, and therefore not used."
)
for node in cycle_nodes:
if not include_unspecified_nodes:
if node not in values:
continue
provided_value = values.get(node)
value = provided_value if not provided_value == DEFAULT else node.defaults.value
node.initialize(value, context)
def disable_all_history(self):
"""
When run, disables history tracking for all Parameters of all Components used in the Composition
"""
self._set_all_parameter_properties_recursively(history_max_length=0)
def _get_processing_condition_set(self, node):
dep_group = []
for group in self.scheduler.consideration_queue:
if node in group:
break
dep_group = group
# NOTE: This is not ideal we don't need to depend on
# the entire previous group. Only our dependencies
cond = [EveryNCalls(dep, 1) for dep in dep_group]
if node not in self.scheduler.conditions:
cond.append(Always())
else:
node_conds = self.scheduler.conditions[node]
cond.append(node_conds)
return All(*cond)
def _input_matches_variable(self, input_value, var):
var_shape = convert_to_np_array(var).shape
# input_value ports are uniform
if convert_to_np_array(input_value, dimension=2).shape == var_shape:
return "homogeneous"
# input_value ports have different lengths
elif len(var_shape) == 1 and isinstance(var[0], (list, np.ndarray)):
for i in range(len(input_value)):
if len(input_value[i]) != len(var[i]):
return False
return "heterogeneous"
return False
def _is_learning(self, context):
"""Returns true if the composition can learn in the given context"""
return (not self.disable_learning) and (ContextFlags.LEARNING_MODE in context.runmode)
def _build_variable_for_input_CIM(self, inputs):
"""
Assign values from input dictionary to the InputPorts of the Input CIM, then execute the Input CIM
"""
build_CIM_input = []
for input_port in self.input_CIM.input_ports:
# "input_port" is an InputPort on the input CIM
for key in self.input_CIM_ports:
# "key" is an InputPort on an origin Node of the Composition
if self.input_CIM_ports[key][0] == input_port:
origin_input_port = key
origin_node = key.owner
index = origin_node.input_ports.index(origin_input_port)
if isinstance(origin_node, CompositionInterfaceMechanism):
index = origin_node.input_ports.index(origin_input_port)
origin_node = origin_node.composition
if origin_node in inputs:
value = inputs[origin_node][index]
else:
value = origin_node.defaults.variable[index]
build_CIM_input.append(value)
return build_CIM_input
def _assign_execution_ids(self, context=None):
"""
assigns the same execution id to each Node in the composition's processing graph as well as the CIMs.
he execution id is either specified in the user's | |
<filename>LC709203F.py<gh_stars>0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""LC709203F: Smart LiB Gauge Battery Fuel Gauge LSI For 1‐Cell Lithium‐ion/Polymer (Li+)"""
__author__ = "ChISL"
__copyright__ = "TBD"
__credits__ = ["ON Semiconductor"]
__license__ = "TBD"
__version__ = "0.1"
__maintainer__ = "https://chisl.io"
__email__ = "<EMAIL>"
__status__ = "Test"
from LC709203F_constants import *
# name: LC709203F
# description: Smart LiB Gauge Battery Fuel Gauge LSI For 1‐Cell Lithium‐ion/Polymer (Li+)
# manuf: ON Semiconductor
# version: 0.1
# url: http://www.onsemi.com/pub/Collateral/LC709203F-D.PDF
# date: 2017-12-29
# Derive from this class and implement read and write
class LC709203F_Base:
"""Smart LiB Gauge Battery Fuel Gauge LSI For 1‐Cell Lithium‐ion/Polymer (Li+)"""
# Register BEFORE_RSOC
# Executes RSOC initialization with sampled maximum voltage when 'hAA55 is set.
# This LSI obtains Open Circuit Voltage (OCV) reading
# 10 ms after Power-on reset to initialize RSOC (See
# Figure 7).
# Or the LSI can be forced to initialize RSOC by sending the
# Before RSOC Command (0×04 = AA55) or the Initial
# RSOC Command (0×07 = AA55). The accuracy of the
# Initialization requires the OCV reading to be taken with
# minimal load or charge, under 0.025C, on the battery. (i.e.
# less than 75 mA for 3000 mAh design capacity battery.).
# The LSI initializes RSOC by the maximum voltage
# between initialize after Power-on reset and setting the
# command when the Before RSOC command is written. (See
# Figure 8).
def setBEFORE_RSOC(self, val):
"""Set register BEFORE_RSOC"""
self.write(REG.BEFORE_RSOC, val, 16)
def getBEFORE_RSOC(self):
"""Get register BEFORE_RSOC"""
return self.read(REG.BEFORE_RSOC, 16)
# Bits BEFORE_RSOC
# Register THERMISTOR_B
# Sets B−constant of the thermistor to be measured.
# Units: 1K
# Refer to
# the specification sheet of the thermistor for the set value to
# use.
def setTHERMISTOR_B(self, val):
"""Set register THERMISTOR_B"""
self.write(REG.THERMISTOR_B, val, 16)
def getTHERMISTOR_B(self):
"""Get register THERMISTOR_B"""
return self.read(REG.THERMISTOR_B, 16)
# Bits THERMISTOR_B
# Register INITIAL_RSOC
# Executes RSOC initialization when 0xAA55 is set.
# The LSI can be forced to initialize RSOC by sending the Before RSOC Command (0×04 = AA55)
# or the Initial RSOC Command (0×07 = AA55).
# The LSI initializes RSOC by the measured voltage at that time when the Initial RSOC command
# is written. (See Figure 9). The maximum time to initialize RSOC after the command is
# written is 1.5 ms.
def setINITIAL_RSOC(self, val):
"""Set register INITIAL_RSOC"""
self.write(REG.INITIAL_RSOC, val, 16)
def getINITIAL_RSOC(self):
"""Get register INITIAL_RSOC"""
return self.read(REG.INITIAL_RSOC, 16)
# Bits INIT_RSOC
# Register CELL_TEMPERATURE
# Displays Cell Temperature, 0x0000 to 0xFFFF.
# Units: 0.1K (0.0°C = 0x0AAC)
# This register contains the cell temperature from −20_C (0×09E4) to +60_C (0×0D04) measured in 0.1_C units.
# In the Thermistor mode (0×16 = 01) the LSI measures the attached thermistor and loads the temperature into the Cell Temperature register. In the Thermistor mode, the thermistor shall be connected to the LSI as shown in Figure 2. The temperature is measured by having TSW pin to provide power into the thermistor and TSENSE pin to sense the output voltage from the thermistor. Temperature measurement timing is controlled by the LSI, and the power to the thermistor is not supplied for other reasons except to measure the temperature.
def setCELL_TEMPERATURE(self, val):
"""Set register CELL_TEMPERATURE"""
self.write(REG.CELL_TEMPERATURE, val, 16)
def getCELL_TEMPERATURE(self):
"""Get register CELL_TEMPERATURE"""
return self.read(REG.CELL_TEMPERATURE, 16)
# Bits CELL_TEMPERATURE
# Register CELL_TEMPERATURE
# Sets Cell Temperature in I2C mode, h09E4 to 'h0D04.
# Units: 0.1K (0.0°C = 0x0AAC)
# This register contains the cell temperature from −20_C (0×09E4) to +60_C (0×0D04) measured in 0.1_C units.
# In the I2C mode (0×16 = 00) the temperature is provided by the host processor. During discharge/charge the register should be updates when the temperature changes more than 1_C
def setCELL_TEMPERATURE(self, val):
"""Set register CELL_TEMPERATURE"""
self.write(REG.CELL_TEMPERATURE, val, 16)
def getCELL_TEMPERATURE(self):
"""Get register CELL_TEMPERATURE"""
return self.read(REG.CELL_TEMPERATURE, 16)
# Bits CELL_TEMPERATURE
# Register CELL_VOLTAGGE
# Displays Cell Voltage, 'h0000 to 'hFFFF.
# Units: 1 mV Displays Cell Voltage
# This register contains the voltage on VDD 1 mV units.
def setCELL_VOLTAGGE(self, val):
"""Set register CELL_VOLTAGGE"""
self.write(REG.CELL_VOLTAGGE, val, 8)
def getCELL_VOLTAGGE(self):
"""Get register CELL_VOLTAGGE"""
return self.read(REG.CELL_VOLTAGGE, 8)
# Bits CELL_VOLTAGGE
# Register CURRENT_DIRECTION
# Selects Auto/Charge/Discharge mode
def setCURRENT_DIRECTION(self, val):
"""Set register CURRENT_DIRECTION"""
self.write(REG.CURRENT_DIRECTION, val, 16)
def getCURRENT_DIRECTION(self):
"""Get register CURRENT_DIRECTION"""
return self.read(REG.CURRENT_DIRECTION, 16)
# Bits CURRENT_DIRECTION
# Register APA
# Adjustment Pack Application: Sets Parasitic impedance, 'h0000 to 'h00FF.
# Units: 1 mΩ
# This register is used to control the reporting of RSOC. In Auto mode the RSOC is reported as it increases or decreases. In Charge mode the RSOC is not permitted to decrease. In Discharge mode the RSOC is not permitted to increase.
# With consideration of capacity influence by temperature, we recommend operating in Auto because RSOC is affected by the cell temperature. A warm cell has more capacity than a cold cell. Be sure not to charge in the Discharge mode and discharge in the Charge mode; it will create an error.
# An example of RSOC reporting is shown in Figures 10 and 11.
def setAPA(self, val):
"""Set register APA"""
self.write(REG.APA, val, 8)
def getAPA(self):
"""Get register APA"""
return self.read(REG.APA, 8)
# Bits APA
# Register APT
# Adjustment Pack Thermistor: Sets a value to adjust temperature measurement
# delay timing, 'h0000 to 'hFFFF
def setAPT(self, val):
"""Set register APT"""
self.write(REG.APT, val, 16)
def getAPT(self):
"""Get register APT"""
return self.read(REG.APT, 16)
# Bits APT
# Register RSOC
# Displays RSOC value based on a 0−100 scale, 'h0000 to 'h0064.
# Units: 1%.
def setRSOC(self, val):
"""Set register RSOC"""
self.write(REG.RSOC, val, 8)
def getRSOC(self):
"""Get register RSOC"""
return self.read(REG.RSOC, 8)
# Bits RSOC
# Register ITE
# Indicator to Empty: 1% Displays RSOC value based on a 0−100 scale, 'h0000 to 'h03E8 .
# Units: 0.1%.
# This is the same as RSOC with a resolution of 0.1% over the range 0.0% to 100.0%.
def setITE(self, val):
"""Set register ITE"""
self.write(REG.ITE, val, 8)
def getITE(self):
"""Get register ITE"""
return self.read(REG.ITE, 8)
# Bits ITE
# Register IC_VERSION
# Displays an ID number of an IC, 'h0000 to 'hFFFF.
def setIC_VERSION(self, val):
"""Set register IC_VERSION"""
self.write(REG.IC_VERSION, val, 8)
def getIC_VERSION(self):
"""Get register IC_VERSION"""
return self.read(REG.IC_VERSION, 8)
# Bits IC_VERSION
# Register CHANGE_OF_PARAM
# Change Of The Parameter Selects a battery profile, 0x0000 or 0x0001
# The LSI contains a data file comprised of two battery profiles. This register is used to select the battery profile to be used. See Table 8. Register Number of the Parameter (0x1A) contains identity of the data file.
# The Data file is loaded during final test depending on the part number ordered.
# Most of the time, battery nominal/rated voltage or charging voltage values are used to determine which profile data shall be used. Please contact ON Semiconductor if you cannot identify which profile to select.
def setCHANGE_OF_PARAM(self, val):
"""Set register CHANGE_OF_PARAM"""
self.write(REG.CHANGE_OF_PARAM, val, 16)
def getCHANGE_OF_PARAM(self):
"""Get register CHANGE_OF_PARAM"""
return self.read(REG.CHANGE_OF_PARAM, 16)
# Bits CHANGE_OF_PARAM
# Register ALARM_LOW_RSOC
# Alarm Low RSO: Disable Sets RSOC threshold to generate Alarm signal.
# Units: 1%
# The ALARMB pin will be set low when the RSOC value falls below this value, will be released from low when RSOC value rises than this value. Set to Zero to disable. Figure 14.
def setALARM_LOW_RSOC(self, val):
"""Set register ALARM_LOW_RSOC"""
self.write(REG.ALARM_LOW_RSOC, val, 16)
def getALARM_LOW_RSOC(self):
"""Get register ALARM_LOW_RSOC"""
return self.read(REG.ALARM_LOW_RSOC, 16)
# Bits ALARM_LOW_RSOC
# Register ALARM_LOW_CELL_VOLTAGE
# Alarm Low Cell: Disable 1 mV Sets Voltage threshold to generate Alarm signal.
# Units: 1mV.
# The ALARMB pin will be set low if VDD falls below this value, will be released from low if VDD rises than this value. Set to Zero to disable. Figure 15.
def setALARM_LOW_CELL_VOLTAGE(self, val):
"""Set register ALARM_LOW_CELL_VOLTAGE"""
self.write(REG.ALARM_LOW_CELL_VOLTAGE, val, 16)
def getALARM_LOW_CELL_VOLTAGE(self):
"""Get register ALARM_LOW_CELL_VOLTAGE"""
return self.read(REG.ALARM_LOW_CELL_VOLTAGE, 16)
# Bits ALARM_LOW_CELL_VOLTAGE
# Register IC_POWER_MODE
# IC Power Mode: Selects Power mode. See note 4.
# The LSI has two power modes. Sleep (0x15 = 02) or Operational mode (0x15 = 01). In the Sleep mode only I2C communication functions. In the Operational mode all functions operate with full calculation and tracking of RSOC during charge and discharge.
# If the battery is significantly charged or discharged during sleep mode, the RSOC will not be accurate. Moved charge is counted continuously to measure the RSOC in
# Operational mode. If battery is discharged or charged in the Sleep mode, the count breaks off.
# When it is switched from Sleep mode to Operational mode, RSOC calculation is continued by using the data which was measured in the previous Operational mode.
def setIC_POWER_MODE(self, val):
"""Set register IC_POWER_MODE"""
self.write(REG.IC_POWER_MODE, val, 16)
def getIC_POWER_MODE(self):
"""Get register IC_POWER_MODE"""
return self.read(REG.IC_POWER_MODE, 16)
# Bits IC_POWER_MODE
# Register STATUS_BIT
# Status Bit: Selects Temperature obtaining method.
# This selects the Thermistor mode. Thermistor mode (0x16 = 01) the | |
"""
Prepare data for Part-GPNN model.
Need:
Node feature at different scales
Edge feature for valid edges
Adjacency matrix GT (parse graph GT)
Edge weight (corresponds to node level)
Edge label GT
"""
import json
import os
import pickle
import warnings
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import cv2
import feature_model
import metadata
import skimage.io
import torch
import torchvision
part_ids = {'Torso': [1, 2],
'Right Hand': [3],
'Left Hand': [4],
'Left Foot': [5],
'Right Foot': [6],
'Upper Leg Right': [7, 9],
'Upper Leg Left': [8, 10],
'Lower Leg Right': [11, 13],
'Lower Leg Left': [12, 14],
'Upper Arm Left': [15, 17],
'Upper Arm Right': [16, 18],
'Lower Arm Left': [19, 21],
'Lower Arm Right': [20, 22],
'Head': [23, 24],
'Upper Body': [1, 2, 3, 4, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
'Lower Body': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
'Left Arm': [4, 15, 17, 19, 21],
'Right Arm': [3, 16, 18, 20, 22],
'Left Leg': [5, 8, 10, 12, 14],
'Right Leg': [6, 7, 9, 11, 13],
'Full Body': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
}
__PART_WEIGHT_L1 = 0.1 # hand
__PART_WEIGHT_L2 = 0.3 # arm
__PART_WEIGHT_L3 = 0.5 # upper body
__PART_WEIGHT_L4 = 1.0 # human
part_weights = {'Torso': __PART_WEIGHT_L1,
'Right Hand': __PART_WEIGHT_L1,
'Left Hand': __PART_WEIGHT_L1,
'Left Foot': __PART_WEIGHT_L1,
'Right Foot': __PART_WEIGHT_L1,
'Upper Leg Right': __PART_WEIGHT_L1,
'Upper Leg Left': __PART_WEIGHT_L1,
'Lower Leg Right': __PART_WEIGHT_L1,
'Lower Leg Left': __PART_WEIGHT_L1,
'Upper Arm Left': __PART_WEIGHT_L1,
'Upper Arm Right': __PART_WEIGHT_L1,
'Lower Arm Left': __PART_WEIGHT_L1,
'Lower Arm Right': __PART_WEIGHT_L1,
'Head': __PART_WEIGHT_L1,
'Upper Body': __PART_WEIGHT_L3,
'Lower Body': __PART_WEIGHT_L3,
'Left Arm': __PART_WEIGHT_L2,
'Right Arm': __PART_WEIGHT_L2,
'Left Leg': __PART_WEIGHT_L2,
'Right Leg': __PART_WEIGHT_L2,
'Full Body': __PART_WEIGHT_L4
}
part_names = list(part_ids.keys())
part_graph = {'Torso': [],
'Right Hand': [],
'Left Hand': [],
'Left Foot': [],
'Right Foot': [],
'Upper Leg Right': [],
'Upper Leg Left': [],
'Lower Leg Right': [],
'Lower Leg Left': [],
'Upper Arm Left': [],
'Upper Arm Right': [],
'Lower Arm Left': [],
'Lower Arm Right': [],
'Head': [],
'Upper Body': ['Head', 'Torso', 'Left Arm', 'Right Arm'],
'Lower Body': ['Left Leg', 'Right Leg'],
'Left Arm': ['Upper Arm Left', 'Lower Arm Left', 'Left Hand'],
'Right Arm': ['Upper Arm Right', 'Lower Arm Right', 'Right Hand'],
'Left Leg': ['Upper Leg Left', 'Lower Leg Left', 'Left Foot'],
'Right Leg': ['Upper Leg Right', 'Lower Leg Right', 'Right Foot'],
'Full Body': ['Head', 'Torso', 'Upper Body', 'Lower Body']
}
def get_intersection(box1, box2):
return np.hstack((np.maximum(box1[:2], box2[:2]), np.minimum(box1[2:], box2[2:])))
def compute_area(box):
side1 = box[2]-box[0]
side2 = box[3]-box[1]
if side1 > 0 and side2 > 0:
return side1 * side2
else:
return 0.0
def compute_iou(box1, box2):
intersection_area = compute_area(get_intersection(box1, box2))
iou = intersection_area / (compute_area(box1) + compute_area(box2) - intersection_area)
return iou
def get_node_index(bbox, det_boxes):
bbox = np.array(bbox, dtype=np.float32)
max_iou = 0.5 # Use 0.5 as a threshold for evaluation
max_iou_index = -1
for i_node in range(len(det_boxes)):
# check bbox overlap
iou = compute_iou(bbox, det_boxes[i_node])
if iou > max_iou:
max_iou = iou
max_iou_index = i_node
return max_iou_index
def combine_box(box1, box2):
return np.hstack((np.minimum(box1[:2], box2[:2]), np.maximum(box1[2:], box2[2:])))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
def img_to_torch(img):
"""
input: H x W x C img iterables with range 0-255
output: C x H x W img tensor with range 0-1, normalized
"""
img = np.array(img) / 255.
img = (img - mean) / std
if len(img.shape) == 3:
img = np.expand_dims(img.transpose([2,0,1]), axis=0)
elif len(img.shape) == 4:
img = img.transpose([0,3,1,2])
elif len(img.shape) == 5:
img = img.transpose([0,1,4,2,3])
img = torch.autograd.Variable(torch.Tensor(img)).cuda()
return img
if True:
img_dir = '/mnt/hdd-12t/share/HICO/hico_20160224_det/images'
densepose_path = '/mnt/hdd-12t/tengyu/DensePose/infer_out/hico-det/'
checkpoint_dir = '/mnt/hdd-12t/tengyu/github/Part-GPNN/data/hico/model'
save_data_path = '/mnt/hdd-12t/tengyu/github/Part-GPNN/data/hico/feature'
mmdetection_path = '/mnt/hdd-12t/tengyu/PartGPNN/gpnn/data/hico/mmdetection'
hico_anno_dir = '/mnt/hdd-12t/share/HICO/hico_20160224_det'
else:
img_dir = '/home/tengyu/Data/hico/hico_20160224_det/images'
densepose_path = '/home/tengyu/Documents/densepose/DensePoseData/infer_out/hico-det/'
checkpoint_dir = '/home/tengyu/Documents/github/Part-GPNN/data/hico/model'
save_data_path = '/home/tengyu/Documents/github/Part-GPNN/data/hico/feature'
mmdetection_path = '/home/tengyu/Documents/mmdetection/outputs'
hico_anno_dir = '/home/tengyu/Data/hico/hico_20160224_det'
feature_network = feature_model.Resnet152(num_classes=len(metadata.action_classes))
feature_network.cuda()
best_model_file = os.path.join(checkpoint_dir, 'model_best.pth')
checkpoint = torch.load(best_model_file)
for k in list(checkpoint['state_dict'].keys()):
if k[:7] == 'module.':
checkpoint['state_dict'][k[7:]] = checkpoint['state_dict'][k]
del checkpoint['state_dict'][k]
feature_network.load_state_dict(checkpoint['state_dict'])
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
input_h, input_w = 224, 224
part_eye = np.eye(21)
obj_eye = np.eye(81)
for imageset in ['test', 'train']:
hake_annotation = json.JSONDecoder().decode(open(os.path.join(os.path.dirname(__file__), 'annotation', 'hico-%sing-set-image-level.json'%imageset)).read())
hico_bbox_annotation = sio.loadmat(os.path.join(hico_anno_dir, 'anno_bbox.mat'))['bbox_{}'.format(imageset)]
mmdetection_result = pickle.load(open(os.path.join(mmdetection_path, 'hico-det.%s.pkl'%imageset), 'rb'))
for img_i in range(hico_bbox_annotation.shape[1]):
filename = hico_bbox_annotation[0,img_i][0][0]
# check if human detection exists
if not os.path.exists(os.path.join(densepose_path, imageset, filename + '.pkl')):
warnings.warn('human detection missing for ' + filename)
continue
# load image
try:
image = skimage.io.imread(os.path.join(img_dir, '%s2015'%imageset, filename))
except:
warnings.warn('Image missing ' + filename)
raise
continue
img_w = image.shape[0]
img_h = image.shape[1]
if len(image.shape) == 2:
image = np.tile(np.expand_dims(image, axis=-1), [1, 1, 3])
obj_boxes_all = np.empty((0,4))
obj_classes_all = list()
part_boxes_all = np.empty((0,4))
part_classes_all = list()
human_boxes = []
human_ids = []
part_human_ids = list()
edge_boxes_all = np.empty((0,4))
edge_human_id = list()
# object detection
for c in range(2, len(metadata.coco_classes)):
for detection in mmdetection_result[filename][c-1]:
if detection[4] > 0.7:
y0,x0,y1,x1 = detection[0], detection[1], detection[2], detection[3]
obj_boxes_all = np.vstack((obj_boxes_all, np.array(detection[:4])[np.newaxis, ...]))
obj_classes_all.append(c-1)
if len(obj_classes_all) == 0:
warnings.warn('object detection missing for ' + filename)
continue
# human detection
densepose_boxes, densepose_bodies = pickle.load(open(os.path.join(densepose_path, imageset, filename + '.pkl'), 'rb'), encoding='latin-1')
for human_id in range(len(densepose_boxes[1])):
if densepose_boxes[1][human_id][4] < 0.7:
continue
for part_id, part_name in enumerate(part_names):
x, y = np.where(np.isin(densepose_bodies[1][human_id], part_ids[part_name]))
x = x + densepose_boxes[1][human_id][1]
y = y + densepose_boxes[1][human_id][0]
if len(x) > 0:
x0, x1, y0, y1 = x.min(), x.max(), y.min(), y.max()
part_boxes_all = np.vstack([part_boxes_all, np.array([[y0,x0,y1,x1]])])
part_classes_all.append(part_id)
part_human_ids.append(human_id)
if part_names[part_id] == 'Full Body':
human_boxes.append([y0,x0,y1,x1])
human_ids.append(human_id)
# Load annotation
action_labels = defaultdict(list)
bbox_annotation = hico_bbox_annotation[0,img_i]
for hoi_i in range(len(hico_bbox_annotation[0,img_i][2][0])):
invis = hico_bbox_annotation[0,img_i][2][0][hoi_i][4][0,0]
if invis == 1: continue
action = metadata.hoi_to_action[hico_bbox_annotation[0,img_i][2][0][hoi_i][0][0,0]-1]
if metadata.action_classes[action] == 'no_interaction':
continue
bbox_h = hico_bbox_annotation[0,img_i][2][0][hoi_i][1]
bbox_o = hico_bbox_annotation[0,img_i][2][0][hoi_i][2]
h_idx = hico_bbox_annotation[0,img_i][2][0][hoi_i][3][0,0]
o_idx = hico_bbox_annotation[0,img_i][2][0][hoi_i][3][0,1]
x0_h,y0_h,x1_h,y1_h = int(bbox_h['x1'][0,0][0,0]), int(bbox_h['y1'][0,0][0,0]), int(bbox_h['x2'][0,0][0,0]), int(bbox_h['y2'][0,0][0,0])
x0_o,y0_o,x1_o,y1_o = int(bbox_o['x1'][0,0][0,0]), int(bbox_o['y1'][0,0][0,0]), int(bbox_o['x2'][0,0][0,0]), int(bbox_o['y2'][0,0][0,0])
# x0,y0,x1,y1 = min(x0_h, x0_o), min(y0_h, y0_o), max(x1_h, x1_o), max(y1_h, y1_o)
human_index = get_node_index([x0_h, y0_h, x1_h, y1_h], human_boxes)
object_index = get_node_index([x0_o, y0_o, x1_o, y1_o], obj_boxes_all)
if human_index < 0 or object_index < 0:
continue
action_labels[(human_ids[human_index], object_index)].append(action)
# Prepare data
part_num = len(part_boxes_all)
obj_num = len(obj_boxes_all)
human_num = len(human_boxes)
node_num = part_num + obj_num
node_features = np.zeros([node_num, 1000])
edge_features = np.zeros([node_num, node_num, 1216])
adj_mat = np.zeros([node_num, node_num])
gt_strength_level = np.zeros([node_num, node_num])
gt_action_labels = np.zeros([node_num, node_num, len(metadata.hoi_to_action)])
# for i_node in range(node_num):
# if i_node < part_num:
# box = [int(round(x)) for x in part_boxes_all[i_node]]
# print(box)
# patch = image[box[1] : box[3] + 1, box[0] : box[2] + 1, :]
# plt.subplot(121)
# plt.imshow(image)
# plt.plot([box[0], box[2], box[2], box[0], box[0]], [box[1], box[1], box[3], box[3], box[1]])
# plt.subplot(122)
# plt.imshow(patch)
# print(part_names[part_classes_all[i_node]])
# plt.show()
# else:
# box = [int(round(x)) for x in obj_boxes_all[i_node - part_num]]
# patch = image[box[1] : box[3] + 1, box[0] : box[2] + 1, :]
# plt.subplot(121)
# plt.imshow(image)
# plt.plot([box[0], box[2], box[2], box[0], box[0]], [box[1], box[1], box[3], box[3], box[1]])
# plt.subplot(122)
# plt.imshow(patch)
# print(metadata.coco_classes[obj_classes_all[i_node - part_num] + 1])
# plt.show()
# continue
# for human_index, obj_index in action_labels.keys():
# plt.imshow(image)
# box = human_boxes[human_ids.index(human_index)]
# plt.plot([box[0], box[2], box[2], box[0], box[0]], [box[1], box[1], box[3], box[3], box[1]])
# box = obj_boxes_all[obj_index]
# plt.plot([box[0], box[2], box[2], box[0], box[0]], [box[1], box[1], box[3], box[3], box[1]])
# print([metadata.action_classes[i] for i in action_labels[(human_index, obj_index)]])
# plt.show()
# continue
# extract node features
for i_node in range(node_num):
if i_node < part_num:
box = part_boxes_all[i_node]
else:
box = obj_boxes_all[i_node - part_num]
box = np.array(box).astype(int)
img_patch = image[box[1] : box[3] + 1, box[0] : box[2] + 1, :]
img_patch = transform(cv2.resize(img_patch, (input_h, input_w), interpolation=cv2.INTER_LINEAR))
img_patch = torch.autograd.Variable(img_patch).unsqueeze(0).cuda()
feat, pred = feature_network(img_patch)
node_features[i_node] = feat.data.cpu().numpy()
part_boxes_all = np.array(part_boxes_all)
obj_boxes_all = np.array(obj_boxes_all)
if len(part_boxes_all) == 0 or len(obj_boxes_all) == 0:
warnings.warn('Zero detection result for {}'.format(filename))
continue
node_features_appd = np.zeros([node_features.shape[0], 6 + 21 + 81])
node_features_appd[:part_num,0] = (part_boxes_all[:,2] - part_boxes_all[:,0]) / img_w # relative w
node_features_appd[:part_num,1] = (part_boxes_all[:,3] - part_boxes_all[:,1]) / img_h # relative h
node_features_appd[:part_num,2] = ((part_boxes_all[:,2] + part_boxes_all[:,0]) / 2) / img_w # relative cx
node_features_appd[:part_num,3] = ((part_boxes_all[:,3] + part_boxes_all[:,1]) / 2) / img_h # relative cy
node_features_appd[:part_num,4] = (part_boxes_all[:,2] - part_boxes_all[:,0]) * (part_boxes_all[:,3] - part_boxes_all[:,1]) / (img_w * img_h) # relative area
node_features_appd[:part_num,5] = (part_boxes_all[:,2] - part_boxes_all[:,0]) / | |
<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2018 Division of Medical Image Computing, German Cancer Research Center (DKFZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
'''
This preprocessing script loads nrrd files obtained by the data conversion tool: https://github.com/MIC-DKFZ/LIDC-IDRI-processing/tree/v1.0.1
After applying preprocessing, images are saved as numpy arrays and the meta information for the corresponding patient is stored
as a line in the dataframe saved as info_df.pickle.
'''
import os
import SimpleITK as sitk
import numpy as np
import random
from multiprocessing import Pool
import pandas as pd
import numpy.testing as npt
from skimage.transform import resize
import subprocess
import pickle
import json
import configs
cf = configs.configs()
def pp_patient(inputs):
#read image
ix, path = inputs
pid = path.split('/')[-1]
if cf.multiphase:
with open(os.path.abspath(cf.pp_mp_cf), 'r') as f:
pp_mp_cf = json.load(f)
phases = pp_mp_cf[cf.mp_setting]
concat_images = list()
for ii in phases:
img = sitk.ReadImage(os.path.join(path,'{}.nii.gz'.format(ii)))
img_arr = sitk.GetArrayFromImage(img)
print('processing {} {}'.format(pid,ii), img.GetSpacing(), img_arr.shape,np.mean(img_arr),np.std(img_arr))
#img_arr = resample_array(img_arr, img.GetSpacing(), cf.target_spacing) #already re-sampled in prior pre-processing
img_arr = np.clip(img_arr, -1200, 600)
#img_arr = (1200 + img_arr) / (600 + 1200) * 255 # a+x / (b-a) * (c-d) (c, d = new)
concat_images.append(img_arr)
mask = sitk.ReadImage(os.path.join(cf.raw_data_dir, pid, '01_mask.nii.gz'))
mask_arr = sitk.GetArrayFromImage(mask).astype(np.uint8)
mask_arr[mask_arr>10] = 0
concat_images.append(mask_arr)
# Find random patch of patch size with random offset, apply to images
if cf.pp_patches is not None:
concat_images = generate_positive_patches(mask_arr,concat_images,cf.pp_patches,40)
# Remove mask_arr from concat
mask_arr = concat_images.pop()
# Concatenate images into singe img array
concat = np.stack(concat_images,axis=3)
# Z normalization of concatenated images as one multi-dimensional array
concat = concat.astype(np.float32)
concat = (concat - np.mean(concat)) / np.std(concat).astype(np.float16)
print ("After concatenation ",np.mean(concat),np.std(concat),concat.dtype)
print ("Concatenated Img Shape "+str(concat.shape))
#Open Characteristics File
df = pd.read_csv(os.path.join(cf.root_dir, 'raw_characteristics_gi.csv'), sep=',',converters={'PatientID': lambda x: str(x)})
df = df[df.PatientID == pid]
#Make Masks Array, Grab Mask ID Per Patient
#final_rois = np.zeros_like(img_arr, dtype=np.uint8)
mal_labels = []
roi_ids = set([ii.split('.')[0].split('_')[0] for ii in os.listdir(path) if '01_mask.nii.gz' in ii])
print (roi_ids)
rix = 1
for rid in roi_ids:
#Grab Mask Paths and Nodule IDs
roi_id_paths = [ii for ii in os.listdir(path) if '01_mask.nii' in ii]
print ("ROI ID Paths:"+str(roi_id_paths))
nodule_ids = [ii.split('.')[0].split('_')[0].lstrip("0") for ii in roi_id_paths]
print ("Nodule ID:"+str(nodule_ids))
#Grab Severity Value From Characteristics file
rater_labels = [1] #[df[df.ROI_ID == int(ii)].Severity.values[0] for ii in nodule_ids]
print ("Rater Labels:"+str(rater_labels))
##Take Mean Severity Value
#rater_labels.extend([0] * (4-len(rater_labels)))
#mal_label = np.mean([ii for ii in rater_labels if ii > -1])
mal_label = rater_labels
mal_list = mal_label
print ("#############Mal Label: "+str(mal_list))
##Read Mask Paths
#roi_rater_list = []
# for rp in roi_id_paths:
rp = roi_id_paths[0]
# roi = sitk.ReadImage(os.path.join(cf.raw_data_dir, pid, rp))
# roi_arr = sitk.GetArrayFromImage(roi).astype(np.uint8)
# roi_arr[roi_arr>10] = 0
roi_arr = mask_arr
if cf.multiphase:
# Will need to change manually if two-phase ie img_arr = concat[:,:,0]
img_arr = concat[:,:,:,0]
else:
img_arr= concat
#roi_arr = resample_array(roi_arr, roi.GetSpacing(), cf.target_spacing)
assert roi_arr.shape == img_arr.shape, [roi_arr.shape, img_arr.shape, pid, mask.GetSpacing()]
for ix in range(len(img_arr.shape)):
npt.assert_almost_equal(mask.GetSpacing()[ix], img.GetSpacing()[ix])
#roi_rater_list.append(roi_arr)
final_rois = roi_arr
# roi_rater_list.extend([np.zeros_like(roi_rater_list[-1])]*(4-len(roi_id_paths)))
# roi_raters = np.array(roi_rater_list)
# roi_raters = np.mean(roi_raters, axis=0)
# roi_raters[roi_raters < 0.5] = 0
# if np.sum(roi_raters) > 0:
# mal_labels.append(mal_label)
# final_rois[roi_raters >= 0.5] = rix
# rix += 1
# else:
# # indicate rois suppressed by majority voting of raters
# print('suppressed roi!', roi_id_paths)
# with open(os.path.join(cf.pp_dir, 'suppressed_rois.txt'), 'a') as handle:
# handle.write(" ".join(roi_id_paths))
#Generate Foreground Slice Indices
final_rois = np.around(final_rois)
fg_slices = [ii for ii in np.unique(np.argwhere(final_rois != 0)[:, 0])]
#Make Array From Severity
#mal_labels = np.array(mal_label)
if mal_list[0] == [0]:
mal_labels_assert_test = []
else:
mal_labels_assert_test = mal_list
print ("Print Malignancy Labels:"+str(mal_list))
print ("Print Unique Values in ROI Array:"+str(len(np.unique(final_rois))))
assert len(mal_labels_assert_test) + 1 == len(np.unique(final_rois)), [len(mal_labels), np.unique(final_rois), pid]
np.save(os.path.join(cf.pp_dir, '{}_rois.npy'.format(pid)), final_rois)
np.save(os.path.join(cf.pp_dir, '{}_img.npy'.format(pid)), concat)
with open(os.path.join(cf.pp_dir, 'meta_info_{}.pickle'.format(pid)), 'wb') as handle:
meta_info_dict = {'pid': pid, 'class_target': mal_list, 'spacing': img.GetSpacing(), 'fg_slices': fg_slices}
print (meta_info_dict)
pickle.dump(meta_info_dict, handle)
def aggregate_meta_info(exp_dir):
files = [os.path.join(exp_dir, f) for f in os.listdir(exp_dir) if 'meta_info' in f]
df = pd.DataFrame(columns=['pid', 'class_target', 'spacing', 'fg_slices'])
for f in files:
with open(f, 'rb') as handle:
df.loc[len(df)] = pickle.load(handle)
df.to_pickle(os.path.join(exp_dir, 'info_df.pickle'))
print ("aggregated meta info to df with length", len(df))
def resample_array(src_imgs, src_spacing, target_spacing):
src_spacing = np.round(src_spacing, 3)
target_shape = [int(src_imgs.shape[ix] * src_spacing[::-1][ix] / target_spacing[::-1][ix]) for ix in range(len(src_imgs.shape))]
for i in range(len(target_shape)):
try:
assert target_shape[i] > 0
except:
raise AssertionError("AssertionError:", src_imgs.shape, src_spacing, target_spacing)
img = src_imgs.astype(float)
resampled_img = resize(img, target_shape, order=1, clip=True, mode='edge').astype('float16')
return resampled_img
def generate_positive_patches(mask_arr,studies,patch_size,random_center_displacement):
q = random_center_displacement
pos_patches = list()
where = np.where(mask_arr==1)
z_mid = int(where[0].mean())
y_mid = int(where[1].mean())
x_mid = int(where[2].mean())
print (x_mid,y_mid,z_mid)
repeat = True
while repeat == True:
z_start = random.randint(-(q),q)+z_mid-(patch_size[0]/2)
if z_start < 0:
z_start = 0
y_start = random.randint(-(q),q)+y_mid-(patch_size[1]/2)
if y_start < 0:
y_start = 0
x_start = random.randint(-(q),q)+x_mid-(patch_size[2]/2)
if x_start < 0:
x_start = 0
z_end = z_start+patch_size[0]
y_end = y_start+patch_size[1]
x_end = x_start+patch_size[2]
numbers = [ int(x) for x in [x_start,x_end,y_start,y_end,z_start,z_end] ]
pos_patches = [numbers]
print ("Positive Patches: ",pos_patches)
Z = pos_patches[0]
patches = list()
for arr in studies:
print (arr.shape,Z)
data = arr[Z[4]:Z[5],Z[2]:Z[3],Z[0]:Z[1]]
## Pad if doesn't fit correct full patch size
if np.any([data.shape[dim] < ps for dim, ps in enumerate(patch_size)]):
new_shape = [np.max([data.shape[dim], ps]) for dim, ps in enumerate(patch_size)]
data = pad_nd_image(data, new_shape, mode='constant')
print ("Patch Shape ",data.shape)
mean = np.mean(data)
if len(np.unique(data))==2:
if mean == 0.0:
print ("Mask missing "+str(mean))
repeat = True
break
if mean > -650:
print ("Appropriate Mean of Patch "+str(mean))
repeat = False
else:
print ("Inappropriate Mean of Patch "+str(mean))
repeat = True
break
patches.append(data)
return patches
def pad_nd_image(image, new_shape=None, mode="edge", kwargs=None, return_slicer=False, shape_must_be_divisible_by=None):
"""
one padder to pad them all. Documentation? Well okay. A little bit. by <NAME>
:param image: nd image. can be anything
:param new_shape: what shape do you want? new_shape does not have to have the same dimensionality as image. If
len(new_shape) < len(image.shape) then the last axes of image will be padded. If new_shape < image.shape in any of
the axes then we will not pad that axis, but also not crop! (interpret new_shape as new_min_shape)
Example:
image.shape = (10, 1, 512, 512); new_shape = (768, 768) -> result: (10, 1, 768, 768). Cool, huh?
image.shape = (10, 1, 512, 512); new_shape = (364, 768) -> result: (10, 1, 512, 768).
:param mode: see np.pad for documentation
:param return_slicer: if True then this function will also return what coords you will need to use when cropping back
to original shape
:param shape_must_be_divisible_by: for network prediction. After applying new_shape, make sure the new shape is
divisibly by that number (can also be a list with an entry for each axis). Whatever is missing to match that will
be padded (so the result may be larger than new_shape if shape_must_be_divisible_by is not None)
:param kwargs: see np.pad for documentation
"""
if kwargs is None:
kwargs = {}
if new_shape is not None:
old_shape = np.array(image.shape[-len(new_shape):])
else:
assert shape_must_be_divisible_by is not None
assert isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray))
new_shape = image.shape[-len(shape_must_be_divisible_by):]
old_shape = new_shape
num_axes_nopad = len(image.shape) - len(new_shape)
new_shape = [max(new_shape[i], old_shape[i]) for i in range(len(new_shape))]
if not isinstance(new_shape, np.ndarray):
new_shape = np.array(new_shape)
if shape_must_be_divisible_by is not None:
if not isinstance(shape_must_be_divisible_by, (list, tuple, np.ndarray)):
shape_must_be_divisible_by = [shape_must_be_divisible_by] * len(new_shape)
else:
assert len(shape_must_be_divisible_by) == len(new_shape)
for i in range(len(new_shape)):
if new_shape[i] % shape_must_be_divisible_by[i] == 0:
new_shape[i] -= shape_must_be_divisible_by[i]
new_shape = np.array([new_shape[i] + shape_must_be_divisible_by[i] - new_shape[i] % shape_must_be_divisible_by[i] for i in range(len(new_shape))])
difference = new_shape - old_shape
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = [[0, 0]]*num_axes_nopad + list([list(i) for i in zip(pad_below, pad_above)])
res = np.pad(image, pad_list, mode, **kwargs)
| |
import logging
from amuse.community import *
from amuse.test.amusetest import TestWithMPI
from omuse.community.dales.interface import Dales
from omuse.units import units
# kwargs = {}
kwargs = dict(channel_type="sockets", redirection="none")
# kwargs=dict(redirection="none",debugger="gdb")
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("code").setLevel(logging.DEBUG)
def cleanup_data(rundir):
if os.path.isdir(rundir):
for f in os.listdir(rundir):
os.remove(os.path.join(rundir, f))
os.rmdir(rundir)
elif os.path.isfile(rundir):
os.remove(rundir)
# Unsupported test cases: aerosolrad, chem, example, fog, heterogen, hireslapse, neutral
class TestDalesInterface(TestWithMPI):
def test_namopt_file_is_written(self):
rundir = "work0"
os.mkdir(rundir)
os.chdir(rundir)
try:
instance = Dales(**kwargs)
instance.commit_parameters()
assert os.path.isfile("namoptions.001")
instance.cleanup_code()
instance.stop()
finally:
for f in os.listdir("."):
os.remove(f)
os.chdir("..")
os.rmdir(rundir)
def test_prof_file_is_written(self):
rundir = "work1"
os.mkdir(rundir)
os.chdir(rundir)
try:
instance = Dales(**kwargs)
instance.commit_parameters()
assert os.path.isfile("prof.inp.001")
instance.cleanup_code()
instance.stop()
finally:
for f in os.listdir("."):
os.remove(f)
os.chdir("..")
os.rmdir(rundir)
def test_lscale_file_is_written(self):
rundir = "work2"
os.mkdir(rundir)
os.chdir(rundir)
try:
instance = Dales(**kwargs)
instance.commit_parameters()
assert os.path.isfile("lscale.inp.001")
instance.cleanup_code()
instance.stop()
finally:
for f in os.listdir("."):
os.remove(f)
os.chdir("..")
os.rmdir(rundir)
def test_set_workdir(self):
rundir = "work3"
instance = Dales(workdir=rundir, **kwargs)
instance.commit_parameters()
assert os.path.exists(rundir)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_namopt_file_written_in_workdir(self):
rundir = "work4"
instance = Dales(workdir=rundir, **kwargs)
instance.commit_parameters()
assert os.path.isfile(os.path.join(rundir, "namoptions.001"))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_load_rico(self):
rundir = "work-rico"
instance = Dales(case="rico", workdir=rundir, **kwargs)
instance.commit_parameters()
assert instance.get_ktot() == 126
assert os.path.exists(rundir)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_run_rico(self):
rundir = "work-rico2"
instance = Dales(case="rico", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_modify_rico(self):
rundir = "work-rico3"
instance = Dales(case="rico", workdir=rundir, z=numpy.arange(5, 2000, 10) | units.m, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_run_cblstrong(self):
rundir = "work-cblstrong"
instance = Dales(case="cblstrong", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (30 | units.s))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_run_cblweak(self):
rundir = "work-cblweak"
instance = Dales(case="cblweak", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (30 | units.s))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_load_bomex(self):
rundir = "work-bomex"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
instance.commit_parameters()
assert os.path.exists(rundir)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_run_bomex(self):
rundir = "work-bomex2"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (1 | units.minute))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_load_sp_case(self):
rundir = "work-sp"
instance = Dales(case="sp-testcase", workdir=rundir, **kwargs)
instance.commit_parameters()
assert os.path.exists(rundir)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_get_grid_dimensions(self):
rundir = "work-sp2"
instance = Dales(case="sp-testcase", workdir=rundir, **kwargs)
instance.commit_parameters()
assert (instance.get_itot(), instance.get_jtot(), instance.get_ktot()) == (200, 200, 160)
assert (instance.get_dx().value_in(units.m), instance.get_dy().value_in(units.m)) == (200, 200)
assert numpy.array_equal(instance.get_zf().value_in(units.m), numpy.arange(12.5, 4000., 25.))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_run_sp_case(self):
rundir = "work-sp3"
instance = Dales(case="sp-testcase", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (1 | units.s))
newtim = instance.get_model_time()
assert newtim > tim
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_upscale_arm_brown_case(self):
rundir = "work-arm-brown"
instance = Dales(case="arm_brown", workdir=rundir, **kwargs)
instance.parameters_DOMAIN.itot = 64
instance.parameters_DOMAIN.jtot = 64
instance.commit_parameters()
assert os.path.isfile(os.path.join(rundir, "ls_flux.inp.001"))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_grid_shapes(self):
rundir = "work-sp4"
instance = Dales(case="sp-testcase", workdir=rundir, **kwargs)
instance.parameters_DOMAIN.itot = 64
instance.parameters_DOMAIN.jtot = 64
tim = instance.get_model_time()
instance.evolve_model(tim + (1 | units.s))
temp_profile = instance.profiles.T
assert temp_profile.shape == (instance.get_ktot())
v_block = instance.fields.V
assert v_block.shape == (instance.get_itot(), instance.get_jtot(), instance.get_ktot())
u_block = instance.get_field("THL")
assert u_block.shape == (instance.get_itot(), instance.get_jtot(), instance.get_ktot())
twp_slab = instance.surface_fields.LWP
assert twp_slab.shape == (instance.get_itot(), instance.get_jtot())
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_u_profile_bomex(self):
rundir = "work-bomex3"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (5 | units.s))
u_profile1 = instance.profiles.U.value_in(units.m / units.s)
u_profile2 = instance.get_profile_U().value_in(units.m / units.s)
u_profile3 = instance.get_profile('U').value_in(units.m / units.s)
#~ print("Lengths:", len(u_profile1), len(u_profile2), len(u_profile3))
assert numpy.allclose(u_profile1, u_profile2, rtol=1.e-16)
assert numpy.allclose(u_profile1, u_profile3, rtol=1.e-16)
u_field = instance.fields.U.value_in(units.m / units.s)
assert numpy.allclose(u_profile1, numpy.mean(u_field, axis=(0, 1)), rtol=1.e-9)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_qt_profile_sp(self):
rundir = "work-sp5"
instance = Dales(case="sp-testcase", workdir=rundir, number_of_workers=4, **kwargs)
instance.parameters_DOMAIN.itot = 64
instance.parameters_DOMAIN.jtot = 64
tim = instance.get_model_time()
instance.evolve_model(tim + (5 | units.s))
q_profile = instance.profiles.QT.value_in(units.mfu)
q_field = instance.fields.QT.value_in(units.mfu)
assert numpy.allclose(q_profile, numpy.mean(q_field, axis=(0, 1)), rtol=1.e-9)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_grid_profiles_sp(self):
rundir = "work-sp6"
instance = Dales(case="sp-testcase", workdir=rundir, number_of_workers=4, **kwargs)
instance.parameters_DOMAIN.itot = 64
instance.parameters_DOMAIN.jtot = 64
tim = instance.get_model_time()
instance.evolve_model(tim + (5 | units.s))
a1 = instance.profiles[0:10].P.value_in(units.Pa)
a2 = instance.get_presf_(numpy.arange(1, 11)).value_in(units.Pa)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].rho.value_in(units.kg / units.m**3)
a2 = instance.get_rhof_(numpy.arange(1, 11)).value_in(units.kg / units.m**3)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].rhob.value_in(units.kg / units.m**3)
a2 = instance.get_rhobf_(numpy.arange(1, 11)).value_in(units.kg / units.m**3)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].A.value_in(units.m**2 / units.m**2)
a2 = instance.get_cloudfraction(numpy.arange(1, 11)).value_in(units.m**2 / units.m**2)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].QL.value_in(units.kg**2 / units.kg**2)
a2 = instance.get_profile_QL(numpy.arange(1, 11)).value_in(units.kg**2 / units.kg**2)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].QL_ice.value_in(units.kg**2 / units.kg**2)
a2 = instance.get_profile_QL_ice(numpy.arange(1, 11)).value_in(units.kg**2 / units.kg**2)
assert numpy.array_equal(a1, a2)
a1 = instance.profiles[0:10].QR.value_in(units.kg**2 / units.kg**2)
a2 = instance.get_profile_QR(numpy.arange(1, 11)).value_in(units.kg**2 / units.kg**2)
assert numpy.array_equal(a1, a2)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_set_bomex_t_field(self):
rundir = "work-bomex4"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
thlfld = numpy.copy(instance.fields.THL.value_in(units.K))
thlfld[:, :, 1] = 304.
instance.fields.THL = thlfld | units.K
instance.evolve_model(tim + (60 | units.s))
thl1 = numpy.mean(instance.fields.THL.value_in(units.K)[:, :, 1], axis=(0, 1))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (70 | units.s))
thl2 = numpy.mean(instance.fields.THL.value_in(units.K)[:, :, 1], axis=(0, 1))
assert thl2 < thl1 < 304.
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_bomex_3d_grid_interface(self):
rundir = "work-bomex45"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (2 | units.s))
qblock = numpy.full((3, 3, 1), 1.234e-5) | units.mfu
instance.set_field("QT", qblock, imin=8, jmin=4, kmin=10)
qt = instance.get_field("QT", imin=8, imax=11, jmin=4, jmax=7, kmin=10, kmax=11)
qtgrid = instance.fields[7:10, 3:6, 9:10].QT.value_in(units.mfu)
assert numpy.array_equal(qblock, qt.value_in(units.mfu))
assert numpy.array_equal(qtgrid, qt.value_in(units.mfu))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_set_bomex_t_value(self):
rundir = "work-bomex5"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
instance.fields[1::2, fdf8:f53e:61e4::18, 1].THL = 302. | units.K
instance.fields[0::2, fc00:db20:35b:7399::5, 1].THL = 302. | units.K
instance.fields[0::2, fdf8:f53e:61e4::18, 1].THL = 298. | units.K
instance.fields[1::2, fc00:db20:35b:7399::5, 1].THL = 298. | units.K
thl1 = instance.fields.THL.value_in(units.K)[5, 6, 1]
thl2 = instance.fields.THL.value_in(units.K)[8, 7, 1]
thl3 = instance.fields.THL.value_in(units.K)[4, 2, 1]
thl4 = instance.fields.THL.value_in(units.K)[3, 3, 1]
assert thl1 == thl2 == 302.
assert thl3 == thl4 == 298.
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_set_bomex_q_forcing(self):
rundir = "work-bomex6"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
zf = instance.get_zf().value_in(units.km)
instance.forcing_profiles[numpy.argwhere(zf < 2.2)].QT = 1.e-5 | (units.mfu / units.s)
instance.forcing_profiles[numpy.argwhere(zf >= 2.2)].QT = 1.e-8 | (units.mfu / units.s)
assert instance.forcing_profiles.QT[0].value_in(units.mfu / units.s) == 1.e-5
assert instance.forcing_profiles.QT[-1].value_in(units.mfu / units.s) == 1.e-8
tim = instance.get_model_time()
instance.evolve_model(tim + (60 | units.s))
assert instance.profiles.QT[0].value_in(units.mfu) > instance.profiles.QT[-1].value_in(units.mfu)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_set_bomex_q_nudging(self):
rundir = "work-bomex7"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
zf = instance.get_zf().value_in(units.km)
instance.nudging_profiles[numpy.argwhere(zf < 2.2)].QT = 1.e-3 | units.mfu
instance.nudging_profiles[numpy.argwhere(zf >= 2.2)].QT = 1.e-6 | units.mfu
assert instance.nudging_profiles.QT[0].value_in(units.mfu) == 1.e-3
assert instance.nudging_profiles.QT[-1].value_in(units.mfu) == 1.e-6
tim = instance.get_model_time()
instance.evolve_model(tim + (60 | units.s))
assert instance.profiles.QT[0].value_in(units.mfu) > instance.profiles.QT[-1].value_in(units.mfu)
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_atex_scalar_fields(self):
rundir = "work-atex"
instance = Dales(case="atex", workdir=rundir, **kwargs)
instance.parameters_DOMAIN.itot = 32
instance.parameters_DOMAIN.jtot = 32
tim = instance.get_model_time()
instance.evolve_model(tim + (10 | units.s))
instance.set_wt_surf(0.1 | units.m * units.s ** -1 * units.K)
wt = instance.get_wt_surf().value_in(units.m * units.s ** -1 * units.K)
wtgrid = instance.scalars.wt.value_in(units.m * units.s ** -1 * units.K)
assert (wt == wtgrid == 0.1)
instance.set_z0h_surf(0.2 | units.m)
instance.evolve_model(tim + (1 | units.s))
z0 = instance.get_z0h_surf().value_in(units.m)
z0grid = instance.scalars.z0h[0].value_in(units.m)
assert (z0 == z0grid == 0.2)
z0h = instance.surface_fields.z0h.value_in(units.m)
assert (numpy.all(z0h == 0.2))
instance.set_z0m_surf(0.3 | units.m)
z0 = instance.get_z0m_surf().value_in(units.m)
z0grid = instance.scalars.z0m[0].value_in(units.m)
assert (z0 == z0grid == 0.3)
z0h = instance.surface_fields.z0m.value_in(units.m)
assert (numpy.all(z0h == 0.3))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_bomex_2d_fields(self):
rundir = "work-bomex8"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (60 | units.s))
vars2d = {"TWP": units.kg / units.m ** 2,
"ustar": units.m / units.s,
"tskin": units.K,
"qskin": units.mfu,
"LE": units.W / units.m ** 2,
"H": units.W / units.m ** 2,
"obl": units.m}
for variable, unit in vars2d.items():
field = getattr(instance.surface_fields, variable).value_in(unit)
assert (field.shape == (instance.get_itot(), instance.get_jtot()))
twp = instance.surface_fields.TWP.value_in(vars2d["TWP"])
qt = instance.fields.QT.value_in(units.mfu)
dz = instance.get_zf().value_in(units.m)[-1] / instance.get_ktot()
twp_check = (numpy.sum(qt, axis=2) * dz)
assert (numpy.allclose(twp, twp_check, rtol=0.1))
instance.cleanup_code()
instance.stop()
cleanup_data(rundir)
def test_bomex_3d_fields(self):
rundir = "work-bomex9"
instance = Dales(case="bomex", workdir=rundir, **kwargs)
tim = instance.get_model_time()
instance.evolve_model(tim + (60 | units.s))
rain = instance.fields.QR.value_in(units.mfu)
tot_rain = instance.scalars.QR.value_in(units.kg / units.m ** 2)
assert (tot_rain == numpy.mean(rain))
ice_volume = instance.fields.QL_ice.value_in(units.mfu)
ice_profile = instance.profiles.QL_ice.value_in(units.mfu)
assert numpy.sum(ice_volume) | |
<filename>utils/hct/hctdb_instrhelp.py<gh_stars>0
# Copyright (C) Microsoft Corporation. All rights reserved.
# This file is distributed under the University of Illinois Open Source License. See LICENSE.TXT for details.
import argparse
import functools
import collections
from hctdb import *
# get db singletons
g_db_dxil = None
def get_db_dxil():
global g_db_dxil
if g_db_dxil is None:
g_db_dxil = db_dxil()
return g_db_dxil
g_db_hlsl = None
def get_db_hlsl():
global g_db_hlsl
if g_db_hlsl is None:
thisdir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(thisdir, "gen_intrin_main.txt"), "r") as f:
g_db_hlsl = db_hlsl(f)
return g_db_hlsl
def format_comment(prefix, val):
"Formats a value with a line-comment prefix."
result = ""
line_width = 80
content_width = line_width - len(prefix)
l = len(val)
while l:
if l < content_width:
result += prefix + val.strip()
result += "\n"
l = 0
else:
split_idx = val.rfind(" ", 0, content_width)
result += prefix + val[:split_idx].strip()
result += "\n"
val = val[split_idx+1:]
l = len(val)
return result
def format_rst_table(list_of_tuples):
"Produces a reStructuredText simple table from the specified list of tuples."
# Calculate widths.
widths = None
for t in list_of_tuples:
if widths is None:
widths = [0] * len(t)
for i, v in enumerate(t):
widths[i] = max(widths[i], len(str(v)))
# Build banner line.
banner = ""
for i, w in enumerate(widths):
if i > 0:
banner += " "
banner += "=" * w
banner += "\n"
# Build the result.
result = banner
for i, t in enumerate(list_of_tuples):
for j, v in enumerate(t):
if j > 0:
result += " "
result += str(v)
result += " " * (widths[j] - len(str(v)))
result = result.rstrip()
result += "\n"
if i == 0:
result += banner
result += banner
return result
def build_range_tuples(i):
"Produces a list of tuples with contiguous ranges in the input list."
i = sorted(i)
low_bound = None
high_bound = None
for val in i:
if low_bound is None:
low_bound = val
high_bound = val
else:
assert(not high_bound is None)
if val == high_bound + 1:
high_bound = val
else:
yield (low_bound, high_bound)
low_bound = val
high_bound = val
if not low_bound is None:
yield (low_bound, high_bound)
def build_range_code(var, i):
"Produces a fragment of code that tests whether the variable name matches values in the given range."
ranges = build_range_tuples(i)
result = ""
for r in ranges:
if r[0] == r[1]:
cond = var + " == " + str(r[0])
else:
cond = "(%d <= %s && %s <= %d)" % (r[0], var, var, r[1])
if result == "":
result = cond
else:
result = result + " || " + cond
return result
class db_docsref_gen:
"A generator of reference documentation."
def __init__(self, db):
self.db = db
instrs = [i for i in self.db.instr if i.is_dxil_op]
instrs = sorted(instrs, key=lambda v : ("" if v.category == None else v.category) + "." + v.name)
self.instrs = instrs
val_rules = sorted(db.val_rules, key=lambda v : ("" if v.category == None else v.category) + "." + v.name)
self.val_rules = val_rules
def print_content(self):
self.print_header()
self.print_body()
self.print_footer()
def print_header(self):
print("<!DOCTYPE html>")
print("<html><head><title>DXIL Reference</title>")
print("<style>body { font-family: Verdana; font-size: small; }</style>")
print("</head><body><h1>DXIL Reference</h1>")
self.print_toc("Instructions", "i", self.instrs)
self.print_toc("Rules", "r", self.val_rules)
def print_body(self):
self.print_instruction_details()
self.print_valrule_details()
def print_instruction_details(self):
print("<h2>Instruction Details</h2>")
for i in self.instrs:
print("<h3><a name='i%s'>%s</a></h3>" % (i.name, i.name))
print("<div>Opcode: %d. This instruction %s.</div>" % (i.dxil_opid, i.doc))
if i.remarks:
# This is likely a .rst fragment, but this will do for now.
print("<div> " + i.remarks + "</div>")
print("<div>Operands:</div>")
print("<ul>")
for o in i.ops:
if o.pos == 0:
print("<li>result: %s - %s</li>" % (o.llvm_type, o.doc))
else:
enum_desc = "" if o.enum_name == "" else " one of %s: %s" % (o.enum_name, ",".join(db.enum_idx[o.enum_name].value_names()))
print("<li>%d - %s: %s%s%s</li>" % (o.pos - 1, o.name, o.llvm_type, "" if o.doc == "" else " - " + o.doc, enum_desc))
print("</ul>")
print("<div><a href='#Instructions'>(top)</a></div>")
def print_valrule_details(self):
print("<h2>Rule Details</h2>")
for i in self.val_rules:
print("<h3><a name='r%s'>%s</a></h3>" % (i.name, i.name))
print("<div>" + i.doc + "</div>")
print("<div><a href='#Rules'>(top)</a></div>")
def print_toc(self, name, aprefix, values):
print("<h2><a name='" + name + "'>" + name + "</a></h2>")
last_category = ""
for i in values:
if i.category != last_category:
if last_category != None:
print("</ul>")
print("<div><b>%s</b></div><ul>" % i.category)
last_category = i.category
print("<li><a href='#" + aprefix + "%s'>%s</a></li>" % (i.name, i.name))
print("</ul>")
def print_footer(self):
print("</body></html>")
class db_instrhelp_gen:
"A generator of instruction helper classes."
def __init__(self, db):
self.db = db
TypeInfo = collections.namedtuple("TypeInfo", "name bits")
self.llvm_type_map = {
"i1": TypeInfo("bool", 1),
"i8": TypeInfo("int8_t", 8),
"u8": TypeInfo("uint8_t", 8),
"i32": TypeInfo("int32_t", 32),
"u32": TypeInfo("uint32_t", 32)
}
self.IsDxilOpFuncCallInst = "hlsl::OP::IsDxilOpFuncCallInst"
def print_content(self):
self.print_header()
self.print_body()
self.print_footer()
def print_header(self):
print("///////////////////////////////////////////////////////////////////////////////")
print("// //")
print("// Copyright (C) Microsoft Corporation. All rights reserved. //")
print("// DxilInstructions.h //")
print("// //")
print("// This file provides a library of instruction helper classes. //")
print("// //")
print("// MUCH WORK YET TO BE DONE - EXPECT THIS WILL CHANGE - GENERATED FILE //")
print("// //")
print("///////////////////////////////////////////////////////////////////////////////")
print("")
print("// TODO: add correct include directives")
print("// TODO: add accessors with values")
print("// TODO: add validation support code, including calling into right fn")
print("// TODO: add type hierarchy")
print("namespace hlsl {")
def bool_lit(self, val):
return "true" if val else "false";
def op_type(self, o):
if o.llvm_type in self.llvm_type_map:
return self.llvm_type_map[o.llvm_type].name
raise ValueError("Don't know how to describe type %s for operand %s." % (o.llvm_type, o.name))
def op_size(self, o):
if o.llvm_type in self.llvm_type_map:
return self.llvm_type_map[o.llvm_type].bits
raise ValueError("Don't know how to describe type %s for operand %s." % (o.llvm_type, o.name))
def op_const_expr(self, o):
return "(%s)(llvm::dyn_cast<llvm::ConstantInt>(Instr->getOperand(%d))->getZExtValue())" % (self.op_type(o), o.pos - 1)
def op_set_const_expr(self, o):
type_size = self.op_size(o)
return "llvm::Constant::getIntegerValue(llvm::IntegerType::get(Instr->getContext(), %d), llvm::APInt(%d, (uint64_t)val))" % (type_size, type_size)
def print_body(self):
for i in self.db.instr:
if i.is_reserved: continue
if i.inst_helper_prefix:
struct_name = "%s_%s" % (i.inst_helper_prefix, i.name)
elif i.is_dxil_op:
struct_name = "DxilInst_%s" % i.name
else:
struct_name = "LlvmInst_%s" % i.name
if i.doc:
print("/// This instruction %s" % i.doc)
print("struct %s {" % struct_name)
print(" llvm::Instruction *Instr;")
print(" // Construction and identification")
print(" %s(llvm::Instruction *pInstr) : Instr(pInstr) {}" % struct_name)
print(" operator bool() const {")
if i.is_dxil_op:
op_name = i.fully_qualified_name()
print(" return %s(Instr, %s);" % (self.IsDxilOpFuncCallInst, op_name))
else:
print(" return Instr->getOpcode() == llvm::Instruction::%s;" % i.name)
print(" }")
print(" // Validation support")
print(" bool isAllowed() const { return %s; }" % self.bool_lit(i.is_allowed))
if i.is_dxil_op:
print(" bool isArgumentListValid() const {")
print(" if (%d != llvm::dyn_cast<llvm::CallInst>(Instr)->getNumArgOperands()) return false;" % (len(i.ops) - 1))
print(" return true;")
# TODO - check operand types
print(" }")
print(" // Metadata")
print(" bool requiresUniformInputs() const { return %s; }" % self.bool_lit(i.requires_uniform_inputs))
EnumWritten = False
for o in i.ops:
if o.pos > 1: # 0 is return type, 1 is DXIL OP id
if not EnumWritten:
print(" // Operand indexes")
print(" enum OperandIdx {")
EnumWritten = True
print(" arg_%s = %d," % (o.name, o.pos - 1))
if EnumWritten:
print(" };")
AccessorsWritten = False
for o in i.ops:
if o.pos > 1: # 0 is return type, 1 is DXIL OP id
if not AccessorsWritten:
print(" // Accessors")
AccessorsWritten = True
print(" llvm::Value *get_%s() const { return Instr->getOperand(%d); }" % (o.name, o.pos - 1))
print(" void set_%s(llvm::Value *val) { Instr->setOperand(%d, val); }" % (o.name, o.pos - 1))
if o.is_const:
if o.llvm_type in self.llvm_type_map:
print(" %s get_%s_val() const { return %s; }" % (self.op_type(o), o.name, self.op_const_expr(o)))
print(" void set_%s_val(%s val) { Instr->setOperand(%d, %s); }" % (o.name, self.op_type(o), o.pos - 1, self.op_set_const_expr(o)))
print("};")
print("")
def print_footer(self):
print("} // namespace hlsl")
class db_enumhelp_gen:
"A generator of enumeration declarations."
def __init__(self, db):
self.db = db
# Some enums should get a last enum marker.
self.lastEnumNames = {
"OpCode": "NumOpCodes",
"OpCodeClass": "NumOpClasses"
}
def print_enum(self, e, **kwargs):
print("// %s" % e.doc)
print("enum class %s : unsigned {" % e.name)
hide_val = kwargs.get("hide_val", False)
sorted_values = e.values
if kwargs.get("sort_val", True):
sorted_values = sorted(e.values, key=lambda v : ("" if v.category == None else v.category) + "." + v.name)
last_category = None
for v in sorted_values:
if v.category != last_category:
if last_category != None:
print("")
print(" // %s" % v.category)
last_category = v.category
line_format = " {name}"
if not e.is_internal and not hide_val:
line_format += " = {value}"
line_format += ","
if v.doc:
line_format += " // {doc}"
print(line_format.format(name=v.name, value=v.value, doc=v.doc))
if | |
makes sense to implement a
gradient solver. The question is: how do I make this fast with arbitrary
equations. Maybe start with a product version like linsolve and go from there
"""
pass
def solve(self):
"""
"""
pass
# XXX make a version of linproductsolver that taylor expands in e^{a+bi} form
# see https://github.com/HERA-Team/linsolve/issues/15
class LinProductSolver:
def __init__(self, data, sol0, wgts={}, sparse=False, **kwargs):
"""Set up a nonlinear system of equations of the form a*b + c*d = 1.0
to linearize via Taylor expansion and solve iteratively using the Gauss-Newton algorithm.
Args:
data: Dictionary that maps nonlinear product equations, written as valid python-interpetable
strings that include the variables in question, to (complex) numbers or numpy arrarys.
Variables with trailing underscores '_' are interpreted as complex conjugates (e.g. x*y_
parses as x * y.conj()).
sol0: Dictionary mapping all variables (as keyword strings) to their starting guess values.
This is the point that is Taylor expanded around, so it must be relatively close to the
true chi^2 minimizing solution. In the same format as that produced by
linsolve.LogProductSolver.solve() or linsolve.LinProductSolver.solve().
wgts: Dictionary that maps equation strings from data to real weights to apply to each
equation. Weights are treated as 1/sigma^2. All equations in the data must have a weight
if wgts is not the default, {}, which means all 1.0s.
sparse: Boolean (default False). If True, represents A matrix sparsely (though AtA, Aty end up dense)
May be faster for certain systems of equations.
**kwargs: keyword arguments of constants (python variables in keys of data that
are not to be solved for)
Returns:
None
"""
# XXX make this something hard to collide with
# see https://github.com/HERA-Team/linsolve/issues/17
self.prepend = "d"
self.data, self.sparse, self.keys = data, sparse, list(data.keys())
self.wgts = verify_weights(wgts, self.keys)
constants = kwargs.pop("constants", kwargs)
self.init_kwargs, self.sols_kwargs = constants, deepcopy(constants)
self.sols_kwargs.update(sol0)
self.all_terms, self.taylors, self.taylor_keys = self.gen_taylors()
self.build_solver(sol0)
self.dtype = self.ls.dtype
def gen_taylors(self, keys=None):
"""Parses all terms, performs a taylor expansion, and maps equation keys to taylor expansion keys."""
if keys is None:
keys = self.keys
all_terms = [ast_getterms(ast.parse(k, mode="eval")) for k in keys]
taylors, taylor_keys = [], {}
for terms, k in zip(all_terms, keys):
taylor = taylor_expand(terms, self.init_kwargs, prepend=self.prepend)
taylors.append(taylor)
taylor_keys[k] = jointerms(taylor[len(terms) :])
return all_terms, taylors, taylor_keys
def build_solver(self, sol0):
"""Builds a LinearSolver using the taylor expansions and all relevant constants.
Update it with the latest solutions."""
dlin, wlin = {}, {}
for k in self.keys:
tk = self.taylor_keys[k]
dlin[tk] = self.data[
k
] # in theory, this will always be replaced with data - ans0 before use
try:
wlin[tk] = self.wgts[k]
except (KeyError):
pass
self.ls = LinearSolver(
dlin, wgts=wlin, sparse=self.sparse, constants=self.sols_kwargs
)
self.eq_dict = {
eq.val: eq for eq in self.ls.eqs
} # maps taylor string expressions to linear equations
# Now make sure every taylor equation has every relevant constant, even if they don't appear in the derivative terms.
for k, terms in zip(self.keys, self.all_terms):
for term in terms:
for t in term:
t_name = get_name(t)
if t_name in self.sols_kwargs:
self.eq_dict[self.taylor_keys[k]].add_const(
t_name, self.sols_kwargs
)
self._update_solver(sol0)
def _update_solver(self, sol):
"""Update all constants in the internal LinearSolver and its LinearEquations based on new solutions.
Also update the residuals (data - ans0) for next iteration."""
self.sol0 = sol
self.sols_kwargs.update(sol)
for eq in self.ls.eqs:
for c in list(eq.consts.values()):
if c.name in sol:
eq.consts[c.name].val = self.sols_kwargs[c.name]
self.ls.consts.update(eq.consts)
ans0 = self._get_ans0(sol)
for k in ans0:
self.ls.data[self.taylor_keys[k]] = self.data[k] - ans0[k]
def _get_ans0(self, sol, keys=None):
"""Evaluate the system of equations given input sol.
Specify keys to evaluate only a subset of the equations."""
if keys is None:
keys = self.keys
all_terms = self.all_terms
taylors = self.taylors
else:
all_terms, taylors, _ = self.gen_taylors(keys)
ans0 = {}
for k, taylor, terms in zip(keys, taylors, all_terms):
eq = self.eq_dict[self.taylor_keys[k]]
ans0[k] = np.sum([eq.eval_consts(t) for t in taylor[: len(terms)]], axis=0)
return ans0
def solve(self, rcond=None, mode="default"):
"""Executes one iteration of a LinearSolver on the taylor-expanded system of
equations, improving sol0 to get sol.
Args:
rcond: cutoff ratio for singular values useed in numpy.linalg.lstsq, numpy.linalg.pinv,
or (if sparse) as atol and btol in scipy.sparse.linalg.lsqr
Default: None (resolves to machine precision for inferred dtype)
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
Returns:
sol: a dictionary of complex solutions with variables as keys
"""
dsol = self.ls.solve(rcond=rcond, mode=mode)
sol = {}
for dk in dsol:
k = dk[len(self.prepend) :]
sol[k] = self.sol0[k] + dsol[dk]
return sol
def eval(self, sol, keys=None):
"""Returns a dictionary evaluating data keys to the current values given sol and consts.
Uses the stored data object unless otherwise specified."""
if type(keys) is str:
keys = [keys]
elif type(keys) is dict:
keys = list(keys.keys())
return self._get_ans0(sol, keys=keys)
def chisq(self, sol, data=None, wgts=None):
"""Compute Chi^2 = |obs - mod|^2 / sigma^2 for the specified solution. Weights are treated as 1/sigma^2.
wgts = {} means sigma = 1. Uses the stored data and weights unless otherwise overwritten."""
if data is None:
data = self.data
if wgts is None:
wgts = self.wgts
wgts = verify_weights(wgts, list(data.keys()))
return self.ls._chisq(sol, data, wgts, self.eval)
def solve_iteratively(
self, conv_crit=None, maxiter=50, mode="default", verbose=False
):
"""Repeatedly solves and updates linsolve until convergence or maxiter is reached.
Returns a meta object containing the number of iterations, chisq, and convergence criterion.
Args:
conv_crit: A convergence criterion below which to stop iterating.
Converegence is measured L2-norm of the change in the solution of all the variables
divided by the L2-norm of the solution itself.
Default: None (resolves to machine precision for inferred dtype)
maxiter: An integer maximum number of iterations to perform before quitting. Default 50.
mode: 'default', 'lsqr', 'pinv', or 'solve', selects which inverter to use, unless all equations share the same A matrix, in which case pinv is always used`.
'default': alias for 'pinv'.
'lsqr': uses numpy.linalg.lstsq to do an inversion-less solve. Usually
the fastest solver.
'solve': uses numpy.linalg.solve to do an inversion-less solve. Fastest,
but only works for fully constrained systems of equations.
'pinv': uses numpy.linalg.pinv to perform a pseudo-inverse and then solves. Can
sometimes be more numerically stable (but slower) than 'lsqr'.
All of these modes are superceded if the same system of equations applies
to all datapoints in an array. In this case, a inverse-based method is used so
that the inverted matrix can be re-used to solve all array indices.
verbose: print information about iterations
Returns: meta, sol
meta: a dictionary with metadata about the solution, including
iter: the number of iterations taken to reach convergence (or maxiter)
chisq: the chi^2 of the solution produced by the final iteration
conv_crit: the convergence criterion evaluated at the final iteration
sol: a dictionary of complex solutions with variables as keys
"""
if conv_crit is None:
conv_crit = np.finfo(self.dtype).resolution
for i in range(1, maxiter + 1):
if verbose:
print("Beginning iteration %d/%d" % (i, maxiter))
# rcond=conv_crit works because you can't get better precision than the accuracy of your inversion
# and vice versa, there's no real point in inverting with greater precision than you are shooting for
new_sol = self.solve(rcond=conv_crit, mode=mode)
deltas = [new_sol[k] - self.sol0[k] for k in new_sol.keys()]
conv = np.linalg.norm(deltas, axis=0) / np.linalg.norm(
list(new_sol.values()), axis=0
)
if np.all(conv < conv_crit) or i == maxiter:
meta = {"iter": i, | |
rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new
def _add_trajs_to_new_trajs_list_memory_RW(self, produced_trajs):
"""
only replaces the worst trajectories
trajectories stored on ram
RW --> stands for Replce worst
"""
# poduced_trajs = myutils.produce_trajs_from_policy(self.actor_critic, self.init_params['num_trajs_produced_each_iter'], self.init_params['produced_traj_length'], self.kwargs, self.myargs)
produced_trajs_list_all, produced_trajs_returns_all = myutils.trajs_calc_return_no_device(produced_trajs, self.myargs.discounted_rew, self.myargs.gamma)
if not start_rew_updates:
produced_trajs_list, produced_trajs_returns = [], []
for traj, rew in zip(produced_trajs_list_all, produced_trajs_returns_all):
if rew > 0:
produced_trajs_list.append(traj)
produced_trajs_returns.append(rew)
else:
produced_trajs_list, produced_trajs_returns = produced_trajs_list_all, produced_trajs_returns_all
start_idx, end_idx = self._calc_start_stop_and_update_new_trajs_last_idx_v1(len(produced_trajs_list))
if len(self.new_trajs_returns_list) > 1:
self.new_trajs_returns_list, self.new_trajs_list = (list(t) for t in zip(*sorted(zip(self.new_trajs_returns_list, self.new_trajs_list))))
self.new_trajs_list[start_idx:end_idx] = produced_trajs_list[:]
self.new_trajs_returns_list[start_idx:end_idx] = produced_trajs_returns[:]
if len(produced_trajs_returns) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
rew_mean_new = np.mean(produced_trajs_returns)
rew_range_new = np.abs(np.max(produced_trajs_returns) - np.min(produced_trajs_returns))
rew_std_new = np.std(produced_trajs_returns)
else:
rew_mean_new, rew_range_new, rew_std_new = 0, 0, 0
if len(self.new_trajs_returns_list) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
else:
rew_mean, rew_range, rew_std = 0, 0, 0
return rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new
def _calc_start_stop_and_update_new_trajs_last_idx(self, num_trajs):
# first in first out
if self.new_trajs_last_idx+num_trajs >= self.init_params["size_of_new_trajs_list"]:
self.new_trajs_last_idx = self.myargs.num_opt_demo
start_idx, end_idx = self.new_trajs_last_idx, self.new_trajs_last_idx+num_trajs-1
self.new_trajs_last_idx += num_trajs
return start_idx, end_idx
def _calc_start_stop_and_update_new_trajs_last_idx_v1(self, num_trajs):
# only replaces the last elements (of the sorted buffer)
if self.new_trajs_last_idx+num_trajs <= self.init_params["size_of_new_trajs_list"]:
start_idx, end_idx = self.new_trajs_last_idx, self.new_trajs_last_idx+num_trajs
self.new_trajs_last_idx += num_trajs
else:
start_idx, end_idx = self.init_params["size_of_new_trajs_list"]-1-num_trajs, self.init_params["size_of_new_trajs_list"]-1
return start_idx, end_idx
def grad_g_theta_update(self, overal_tr_iter_idx, num_batches, num_rew_nets, batch_size, demos_or_policy, pretrain_or_train, discounted_rew):
"""
this function should only be called when it's possible to produce trajectory pairs from the buffer
"""
# zero the gradient buffer for all the reward networks
criterion = torch.nn.CrossEntropyLoss(weight=None, reduction='mean')
losses_all_nets = []
accuracies_all_nets = []
# print(f"***************** Updating reward_obj: {rew_obj_idx} \n")
loss_per_rew_net = []
accuracy_per_rew_net = []
pairs_all = []
returns_all = []
pair_select_time_total = 0
load_trajs_time_total = 0
rew_update_time_total = 0
for batch_counter in range(num_batches):
# Iterate over all reward networks for training
for rew_obj_idx, reward_obj in enumerate(self.reward_objs):
loss_item, accuracy, pairs, returns, pair_select_time, rew_update_time, time_loading_trajs = self._grad_individual_rew_obj(batch_size, reward_obj, criterion, discounted_rew)
pair_select_time_total += pair_select_time
rew_update_time_total += rew_update_time
load_trajs_time_total += time_loading_trajs
pairs_all.extend(pairs)
returns_all.extend(returns)
if loss_item != "no pair":
loss_per_rew_net.append(loss_item)
accuracy_per_rew_net.append(accuracy)
# Here, after all updates, we write onto the rew_losses
assert len(loss_per_rew_net) > 0
mean_g = np.mean(loss_per_rew_net)
std_g = np.std(loss_per_rew_net)
mean_accuracy = np.mean(accuracy_per_rew_net)
std_accuracy = np.std(accuracy_per_rew_net)
with open(self.log_dir + f"/rew_losses.txt", "a") as file:
file.write(f" {mean_g:.10f} {std_g:.10f} {mean_accuracy:.10f} {std_accuracy:.10f} {pair_select_time_total:>5} {rew_update_time_total:>5} {overal_tr_iter_idx:>5} \n")
# log magnitute of reward weights
reward_weights_list = [torch.norm(reward_obj.get_flat_weights()) for reward_obj in self.reward_objs]
reward_weights_mean = np.mean([item.item() for item in reward_weights_list])
reward_weights_std = np.std([item.item() for item in reward_weights_list])
reward_weights_min = np.min([item.item() for item in reward_weights_list])
reward_weights_max = np.max([item.item() for item in reward_weights_list])
with open(self.log_dir + f"/rew_weights_stats.txt", "a") as file:
file.write(f" {reward_weights_mean:.10f} {reward_weights_std:.10f} {reward_weights_min:.10f} {reward_weights_max:.10f} {overal_tr_iter_idx:>5} \n")
# validation is performed after all batches are used for training
if not self.myargs.skip_rew_eval:
# Iterate over all reward networks for validation
start = time.time()
loss_per_rew_net = []
accuracy_per_rew_net = []
for rew_obj_idx, reward_obj in enumerate(self.reward_objs):
if self.val_pairs:
loss_item, accuracy = self._individual_rew_obj_validation(self.val_pairs, self.val_pairs_returns, reward_obj, criterion, self.myargs.discounted_rew)
loss_per_rew_net.append(loss_item)
accuracy_per_rew_net.append(accuracy)
mean_g = np.mean(loss_per_rew_net)
std_g = np.std(loss_per_rew_net)
mean_accuracy = np.mean(accuracy_per_rew_net)
std_accuracy = np.std(accuracy_per_rew_net)
end = time.time()
total_time_rew_eval = end - start
with open(self.log_dir + f"/rew_losses_val.txt", "a") as file:
file.write(f" {mean_g:.10f} {std_g:.10f} {mean_accuracy:.10f} {std_accuracy:.10f} {total_time_rew_eval:>5} {overal_tr_iter_idx:>5} \n")
return pairs_all, returns_all, pair_select_time_total, rew_update_time_total, load_trajs_time_total
def _individual_rew_obj_validation(self, pairs, returns, reward_obj, criterion, discounted_rew):
"""
uses the validation pairs and returns to compute the validaion accuracy of the reward networks
"""
# ***********************************
with torch.no_grad():
return_traj_preds_list = []
for (traj_i, traj_j), (rank_i, rank_j) in zip(pairs, returns):
# return_traj = torch.zeros((num_pairs,1), requires_grad=True, device=self.device)
# grad_theta = torch.zeros(agent.theta_size)
# return_theta_traj_j = self.return_theta_traj_calc(traj_j return_traj_j, idx)
# return_theta_traj_i = self.return_theta_traj_calc(traj_j return_traj_i, idx)
assert rank_i != rank_j
reward_input_batch_j = reward_obj.reward_net_input_batch_traj_method(traj_j)
reward_input_batch_i = reward_obj.reward_net_input_batch_traj_method(traj_i)
reward_output_batch_j = reward_obj.reward_net(reward_input_batch_j)
reward_output_batch_i = reward_obj.reward_net(reward_input_batch_i)
if discounted_rew:
num_rows = reward_output_batch_j.size()[0]
weights = torch.tensor([self.myargs.gamma**idx for idx in range(num_rows)], device=self.device)
weights = torch.unsqueeze(weights, dim=1)
reward_sum_j = torch.unsqueeze(torch.sum(weights * reward_output_batch_j, dim=0), dim=0) # element-wise multiplication
reward_sum_i = torch.unsqueeze(torch.sum(weights * reward_output_batch_i, dim=0), dim=0)
else:
reward_sum_j = torch.unsqueeze(torch.sum(reward_output_batch_j, dim=0), dim=0)
reward_sum_i = torch.unsqueeze(torch.sum(reward_output_batch_i, dim=0), dim=0)
if rank_j > rank_i:
return_sum_pair = torch.cat([reward_sum_j, reward_sum_i], dim=1)
return_traj_preds_list.append(return_sum_pair)
elif rank_j < rank_i:
return_sum_pair = torch.cat([reward_sum_i, reward_sum_j], dim=1)
return_traj_preds_list.append(return_sum_pair)
if len(return_traj_preds_list) > 0:
# update the reward function after every batch_size number of pairs
return_traj_preds = torch.cat(return_traj_preds_list, dim=0)
high_return_idx = torch.zeros((len(return_traj_preds_list)), dtype=torch.long, requires_grad=False, device=self.device)
accuracy = self.calc_accuracy(return_traj_preds)
loss = criterion(return_traj_preds, high_return_idx)
return loss.item(), accuracy
else:
return "no pair", "no pair"
def _grad_individual_rew_obj(self, batch_size, reward_obj, criterion, discounted_rew):
"""
reads collected returns from self.new_trajs_returns_list and uses self.save_path_new_trajs
"""
start = time.time()
if self.sample_trajs_from_memory:
pairs, returns, time_loading_trajs = myutils.create_pairs_distance_apart_device_memory(self.new_trajs_list, self.new_trajs_returns_list, batch_size, self.myargs.priority_sampling, self.device, self.init_params["difference_factor"])
else:
pairs, returns, time_loading_trajs = myutils.create_pairs_distance_apart_device_hardDrive(self.save_path_new_trajs, self.new_trajs_returns_list, batch_size, self.myargs.priority_sampling, self.device)
pair_select_time = time.time() - start
# pairs, returns = myutils.create_pairs_no_step_no_subsample(ranked_traj_list, traj_returns, batch_size, self.myargs.priority_sampling)
# if any pair is returned, the returns should be different as this is guaranteed in myutils.create_pairs_no_step
# pairs.extend(pairs_raw), returns.extend(returns_raw)
# ***********************************
start = time.time()
reward_obj.reward_net.zero_grad()
return_traj_preds_list = []
if pairs:
for (traj_i, traj_j), (rank_i, rank_j) in zip(pairs, returns):
# return_traj = torch.zeros((num_pairs,1), requires_grad=True, device=self.device)
# grad_theta = torch.zeros(agent.theta_size)
# return_theta_traj_j = self.return_theta_traj_calc(traj_j return_traj_j, idx)
# return_theta_traj_i = self.return_theta_traj_calc(traj_j return_traj_i, idx)
assert rank_i != rank_j
reward_input_batch_j = reward_obj.reward_net_input_batch_traj_method(traj_j)
reward_input_batch_i = reward_obj.reward_net_input_batch_traj_method(traj_i)
reward_output_batch_j = reward_obj.reward_net(reward_input_batch_j)
reward_output_batch_i = reward_obj.reward_net(reward_input_batch_i)
if discounted_rew:
num_rows = reward_output_batch_j.size()[0]
weights = torch.tensor([self.myargs.gamma**idx for idx in range(num_rows)], device=self.device)
weights = torch.unsqueeze(weights, dim=1)
reward_sum_j = torch.unsqueeze(torch.sum(weights * reward_output_batch_j, dim=0), dim=0) # element-wise multiplication
reward_sum_i = torch.unsqueeze(torch.sum(weights * reward_output_batch_i, dim=0), dim=0)
else:
reward_sum_j = torch.unsqueeze(torch.sum(reward_output_batch_j, dim=0), dim=0)
reward_sum_i = torch.unsqueeze(torch.sum(reward_output_batch_i, dim=0), dim=0)
if rank_j > rank_i:
return_sum_pair = torch.cat([reward_sum_j, reward_sum_i], dim=1)
return_traj_preds_list.append(return_sum_pair)
elif rank_j < rank_i:
return_sum_pair = torch.cat([reward_sum_i, reward_sum_j], dim=1)
return_traj_preds_list.append(return_sum_pair)
# update the reward function after every batch_size number of pairs
return_traj_preds = torch.cat(return_traj_preds_list, dim=0)
high_return_idx = torch.zeros((len(return_traj_preds_list)), dtype=torch.long, requires_grad=False, device=self.device)
accuracy = self.calc_accuracy(return_traj_preds)
loss = criterion(return_traj_preds, high_return_idx)
loss.backward()
reward_obj.optimizer.step()
rew_update_time = time.time() - start
return loss.item(), accuracy, pairs, returns, pair_select_time, rew_update_time, time_loading_trajs
else:
return "no pair", "no pair", "no pair", "no pair", pair_select_time, 0, 0
def calc_accuracy(self, return_traj_preds):
num_total = return_traj_preds.size()[0]
num_correct = 0
for i in range(num_total):
if return_traj_preds[i,0] > return_traj_preds[i,1]:
num_correct += 1
return num_correct / num_total
def calc_sparse_reward(self, done, infos, displacement_forward_till_rew, steps_till_rew, displacement_forward_episode_total,
num_succ_run_forward, num_steps_taken_to_rew, num_succ_not_done, reward_GT, num_envs, myargs):
"""
unitsV2 uses myargs.num_steps as the maximum possible number of steps
"""
sparseness = myargs.sparseness
vel_thresh = 0
reward_sparse = torch.zeros(num_envs)
displacement_forward_step = np.zeros(num_envs)
if myargs.sparse_rew_type == "steps":
if myargs.env_name in ["InvertedPendulum-v2", "CartPole-v0"]:
for idx, done_proc in enumerate(done):
if not done_proc:
num_succ_not_done[idx] += 1
else:
num_succ_not_done[idx] = 0
if num_succ_not_done[idx] >= sparseness:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
elif myargs.env_name == "InvertedDoublePendulum-v2":
for idx, info in enumerate(infos):
angle_range = sparseness
angles = info["angles"]
if abs(angles[1]) < angle_range*math.pi/180:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
else:
for idx, info in enumerate(infos):
if info['reward_run'] > vel_thresh:
num_succ_run_forward[idx] += 1
else:
num_succ_run_forward[idx] = 0
if num_succ_run_forward[idx] >= sparseness:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
elif myargs.sparse_rew_type in ["unitsV2", "units"]:
for idx, info in enumerate(infos):
displacement_forward_step[idx] = info["x_position"] - info["x_position_before"]
displacement_forward_till_rew += displacement_forward_step
displacement_forward_episode_total += displacement_forward_step
steps_till_rew += np.ones(num_envs)
for idx in range(np.shape(displacement_forward_till_rew)[0]):
# reward_sparse[idx] = myargs.num_steps * (displacement_forward_till_rew[idx] // sparseness) / steps_till_rew[idx]
if displacement_forward_till_rew[idx] > sparseness:
if myargs.sparse_rew_type == "unitsV2":
reward_sparse[idx] = (2 - (steps_till_rew[idx] / myargs.num_steps)) * (displacement_forward_till_rew[idx] // sparseness)
elif myargs.sparse_rew_type == "units":
reward_sparse[idx] = displacement_forward_till_rew[idx] // sparseness
displacement_forward_till_rew[idx] = 0
steps_till_rew[idx] = 0
else:
reward_sparse[idx] = 0
# elif myargs.sparse_rew_type in ["episodic"]:
# steps_till_rew += np.ones(num_envs)
# if myargs.env_name in ["Reacher-v2", "MountainCar-v0"]:
# done = [item['done_dist'] for item in infos]
# for idx, done_proc in enumerate(done):
# if done_proc:
# reward_sparse[idx] = 1000/steps_till_rew[idx]
# steps_till_rew[idx] = 0
# else:
# reward_sparse[idx] = 0 # This is redundant, reward_sparse is zero by default
elif myargs.sparse_rew_type in ["episodic"]:
if myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", | |
the end containing the metadata.
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
actual = j[:]
self.assertEqual(actual.shape, (512, 512, 3))
c = j.get_codestream(header_only=False)
actual = c.segment[2].code_block_size
expected = (64, 64)
self.assertEqual(actual, expected)
self.assertEqual(c.segment[2].layers, 1)
self.assertEqual(c.segment[2].num_res, 5)
at_least_one_eph = any(
isinstance(seg, glymur.codestream.EPHsegment)
for seg in c.segment
)
self.assertFalse(at_least_one_eph)
at_least_one_plt = any(
isinstance(seg, glymur.codestream.PLTsegment)
for seg in c.segment
)
self.assertFalse(at_least_one_plt)
at_least_one_sop = any(
isinstance(seg, glymur.codestream.SOPsegment)
for seg in c.segment
)
self.assertFalse(at_least_one_sop)
self.assertEqual(c.segment[2].prog_order, glymur.core.LRCP)
self.assertEqual(
c.segment[2].xform, glymur.core.WAVELET_XFORM_5X3_REVERSIBLE
)
self.assertEqual(j.box[-1].box_id, 'uuid')
self.assertEqual(j.box[-1].data['ImageWidth'], 512)
self.assertEqual(j.box[-1].data['ImageLength'], 512)
def test_geotiff(self):
"""
SCENARIO: Convert GEOTIFF file to JP2
EXPECTED RESULT: there is a geotiff UUID.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
with ir.path('tests.data', 'albers27.tif') as path:
with Tiff2Jp2k(path, self.temp_jp2_filename) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
self.assertEqual(j.box[-1].box_id, 'uuid')
self.assertEqual(
j.box[-1].uuid, UUID('b14bf8bd-083d-4b43-a5ae-8cd7d5a6ce03')
)
def test_no_uuid(self):
"""
SCENARIO: Convert TIFF file to JP2, but do not include the UUID box
for the TIFF IFD.
EXPECTED RESULT: data matches, no UUID box
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
create_uuid=False
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
actual = j[:]
self.assertEqual(actual.shape, (512, 512, 3))
at_least_one_uuid = any(
isinstance(box, glymur.jp2box.UUIDBox) for box in j.box
)
self.assertFalse(at_least_one_uuid)
def test_psnr(self):
"""
SCENARIO: Convert TIFF file to JP2 with the irreversible transform.
EXPECTED RESULT: data matches, the irreversible transform is confirmed
"""
with Tiff2Jp2k(
self.minisblack_spp1_path, self.temp_jp2_filename,
psnr=(30, 35, 40, 0)
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
d = {}
for layer in range(4):
j.layer = layer
d[layer] = j[:]
with warnings.catch_warnings():
# MSE is zero for that first image, resulting in a divide-by-zero
# warning
warnings.simplefilter('ignore')
psnr = [
fixtures.skimage.metrics.peak_signal_noise_ratio(
fixtures.skimage.data.moon(), d[j]
)
for j in range(4)
]
# That first image should be lossless.
self.assertTrue(np.isinf(psnr[0]))
# None of the subsequent images should have inf PSNR.
self.assertTrue(not np.any(np.isinf(psnr[1:])))
# PSNR should increase for the remaining images.
self.assertTrue(np.all(np.diff(psnr[1:])) > 0)
def test_irreversible(self):
"""
SCENARIO: Convert TIFF file to JP2 with the irreversible transform.
EXPECTED RESULT: data matches, the irreversible transform is confirmed
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
irreversible=True
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
c = j.get_codestream(header_only=False)
self.assertEqual(
c.segment[2].xform, glymur.core.WAVELET_XFORM_9X7_IRREVERSIBLE
)
def test_sop(self):
"""
SCENARIO: Convert TIFF file to JP2 with SOP markers.
EXPECTED RESULT: data matches, sop markers confirmed
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename, sop=True
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
c = j.get_codestream(header_only=False)
at_least_one_sop = any(
isinstance(seg, glymur.codestream.SOPsegment)
for seg in c.segment
)
self.assertTrue(at_least_one_sop)
def test_progression_order(self):
"""
SCENARIO: Convert TIFF file to JP2 with EPH markers.
EXPECTED RESULT: data matches, plt markers confirmed
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
prog='rlcp'
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
c = j.get_codestream(header_only=False)
self.assertEqual(c.segment[2].prog_order, glymur.core.RLCP)
def test_eph(self):
"""
SCENARIO: Convert TIFF file to JP2 with EPH markers.
EXPECTED RESULT: data matches, plt markers confirmed
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename, eph=True
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
c = j.get_codestream(header_only=False)
at_least_one_eph = any(
isinstance(seg, glymur.codestream.EPHsegment)
for seg in c.segment
)
self.assertTrue(at_least_one_eph)
def test_plt(self):
"""
SCENARIO: Convert TIFF file to JP2 with PLT markers.
EXPECTED RESULT: data matches, plt markers confirmed
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename, plt=True
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
c = j.get_codestream(header_only=False)
at_least_one_plt = any(
isinstance(seg, glymur.codestream.PLTsegment)
for seg in c.segment
)
self.assertTrue(at_least_one_plt)
def test_resolutions(self):
"""
SCENARIO: Convert TIFF file to JP2 with 4 resolution layers instead
of the default, which is 5.
EXPECTED RESULT: data matches, number of resolution layers is 4.
"""
expected = 4
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
numres=expected
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
actual = j[:]
self.assertEqual(actual.shape, (512, 512, 3))
c = j.get_codestream()
actual = c.segment[2].num_res
self.assertEqual(actual, expected - 1)
def test_layers(self):
"""
SCENARIO: Convert TIFF file to JP2 with multiple compression layers
EXPECTED RESULT: data matches, number of layers is 3
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
cratios=[200, 50, 10]
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
actual = j[:]
self.assertEqual(actual.shape, (512, 512, 3))
c = j.get_codestream()
self.assertEqual(c.segment[2].layers, 3)
def test_codeblock_size(self):
"""
SCENARIO: Convert TIFF file to JP2 with a specific code block size
EXPECTED RESULT: data matches, number of resolution is the default
"""
expected = (32, 32)
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
cbsize=expected
) as j:
j.run()
j = Jp2k(self.temp_jp2_filename)
actual = j[:]
self.assertEqual(actual.shape, (512, 512, 3))
c = j.get_codestream()
actual = c.segment[2].code_block_size
self.assertEqual(actual, expected)
def test_verbosity(self):
"""
SCENARIO: Convert TIFF file to JP2, use INFO log level.
EXPECTED RESULT: data matches
"""
with Tiff2Jp2k(
self.astronaut_ycbcr_jpeg_tif, self.temp_jp2_filename,
verbosity=logging.INFO
) as j:
with self.assertLogs(logger='tiff2jp2', level=logging.INFO) as cm:
j.run()
self.assertEqual(len(cm.output), 1)
def test_partial_strip_and_partial_tiles(self):
"""
SCENARIO: Convert monochromatic stripped TIFF file to JP2. The TIFF
has a partial last strip. The JP2K will have partial tiles.
EXPECTED RESULT: The data matches. The JP2 file has 4 tiles.
"""
with Tiff2Jp2k(
self.minisblack_3strip_partial_last_strip, self.temp_jp2_filename,
tilesize=(250, 250)
) as j:
j.run()
jp2 = Jp2k(self.temp_jp2_filename)
actual = jp2[:]
np.testing.assert_array_equal(
actual, self.minisblack_2x2_partial_tiles_data
)
c = jp2.get_codestream()
self.assertEqual(c.segment[1].xsiz, 480)
self.assertEqual(c.segment[1].ysiz, 480)
self.assertEqual(c.segment[1].xtsiz, 250)
self.assertEqual(c.segment[1].ytsiz, 250)
def test_partial_last_strip(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF has a
partial last strip.
EXPECTED RESULT: The data matches. The JP2 file has 4 tiles.
"""
with Tiff2Jp2k(
self.minisblack_3strip_partial_last_strip, self.temp_jp2_filename,
tilesize=(240, 240)
) as j:
j.run()
jp2 = Jp2k(self.temp_jp2_filename)
actual = jp2[:]
np.testing.assert_array_equal(actual, self.minisblack_3x3_data)
c = jp2.get_codestream()
self.assertEqual(c.segment[1].xsiz, 480)
self.assertEqual(c.segment[1].ysiz, 480)
self.assertEqual(c.segment[1].xtsiz, 240)
self.assertEqual(c.segment[1].ytsiz, 240)
def test_32bit(self):
"""
SCENARIO: The sample format is 32bit integer.
EXPECTED RESULT: RuntimeError
"""
data = fixtures.skimage.data.moon().astype(np.uint32)
h, w = data.shape
th, tw = h // 2, w // 2
fp = libtiff.open(self.temp_tiff_filename, mode='w')
libtiff.setField(fp, 'Photometric', libtiff.Photometric.MINISBLACK)
libtiff.setField(fp, 'Compression', libtiff.Compression.DEFLATE)
libtiff.setField(fp, 'SampleFormat', libtiff.SampleFormat.UINT)
libtiff.setField(fp, 'ImageLength', data.shape[0])
libtiff.setField(fp, 'ImageWidth', data.shape[1])
libtiff.setField(fp, 'TileLength', th)
libtiff.setField(fp, 'TileWidth', tw)
libtiff.setField(fp, 'BitsPerSample', 32)
libtiff.setField(fp, 'SamplesPerPixel', 1)
libtiff.writeEncodedTile(fp, 0, data[:th, :tw].copy())
libtiff.writeEncodedTile(fp, 1, data[:th, tw:w].copy())
libtiff.writeEncodedTile(fp, 2, data[th:h, :tw].copy())
libtiff.writeEncodedTile(fp, 3, data[th:h, tw:w].copy())
libtiff.close(fp)
with Tiff2Jp2k(self.temp_tiff_filename, self.temp_jp2_filename) as j:
with self.assertRaises(RuntimeError):
j.run()
def test_floating_point(self):
"""
SCENARIO: The sample format is 32bit floating point.
EXPECTED RESULT: RuntimeError
"""
data = fixtures.skimage.data.moon().astype(np.float32)
h, w = data.shape
th, tw = h // 2, w // 2
fp = libtiff.open(self.temp_tiff_filename, mode='w')
libtiff.setField(fp, 'Photometric', libtiff.Photometric.MINISBLACK)
libtiff.setField(fp, 'Compression', libtiff.Compression.DEFLATE)
libtiff.setField(fp, 'SampleFormat', libtiff.SampleFormat.IEEEFP)
libtiff.setField(fp, 'ImageLength', data.shape[0])
libtiff.setField(fp, 'ImageWidth', data.shape[1])
libtiff.setField(fp, 'TileLength', th)
libtiff.setField(fp, 'TileWidth', tw)
libtiff.setField(fp, 'BitsPerSample', 32)
libtiff.setField(fp, 'SamplesPerPixel', 1)
libtiff.writeEncodedTile(fp, 0, data[:th, :tw].copy())
libtiff.writeEncodedTile(fp, 1, data[:th, tw:w].copy())
libtiff.writeEncodedTile(fp, 2, data[th:h, :tw].copy())
libtiff.writeEncodedTile(fp, 3, data[th:h, tw:w].copy())
libtiff.close(fp)
with Tiff2Jp2k(self.temp_tiff_filename, self.temp_jp2_filename) as j:
with self.assertRaises(RuntimeError):
j.run()
def test_evenly_tiled(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF is evenly
tiled 2x2.
EXPECTED RESULT: The data matches. The JP2 file has 4 tiles.
"""
with Tiff2Jp2k(
self.minisblack_spp1_path,
self.temp_jp2_filename,
tilesize=(256, 256)
) as j:
j.run()
jp2 = Jp2k(self.temp_jp2_filename)
actual = jp2[:]
np.testing.assert_array_equal(actual, self.minisblack_spp1_data)
c = jp2.get_codestream()
self.assertEqual(c.segment[1].xsiz, 512)
self.assertEqual(c.segment[1].ysiz, 512)
self.assertEqual(c.segment[1].xtsiz, 256)
self.assertEqual(c.segment[1].ytsiz, 256)
def test_tiled_logging(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF is evenly
tiled 2x2. Logging is turned on.
EXPECTED RESULT: there are four messages logged, one for each tile
"""
with Tiff2Jp2k(
self.minisblack_spp1_path,
self.temp_jp2_filename,
tilesize=(256, 256)
) as j:
with self.assertLogs(logger='tiff2jp2', level=logging.INFO) as cm:
j.run()
self.assertEqual(len(cm.output), 4)
def test_minisblack__smaller_tilesize_specified(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF is evenly
tiled 2x2, but we want 4x4.
EXPECTED RESULT: The data matches. The JP2 file has 16 tiles.
"""
with Tiff2Jp2k(
self.minisblack_spp1_path, self.temp_jp2_filename,
tilesize=(128, 128)
) as j:
j.run()
jp2 = Jp2k(self.temp_jp2_filename)
actual = jp2[:]
np.testing.assert_array_equal(actual, self.minisblack_spp1_data)
c = jp2.get_codestream()
self.assertEqual(c.segment[1].xsiz, 512)
self.assertEqual(c.segment[1].ysiz, 512)
self.assertEqual(c.segment[1].xtsiz, 128)
self.assertEqual(c.segment[1].ytsiz, 128)
def test_minisblack_3strip_to_2x2(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF is evenly
stripped by 3, but we want 2x2.
EXPECTED RESULT: The data matches. The JP2 file has 4 tiles.
"""
with Tiff2Jp2k(
self.minisblack_3_full_strips_path, self.temp_jp2_filename,
tilesize=(240, 240)
) as j:
j.run()
jp2 = Jp2k(self.temp_jp2_filename)
actual = jp2[:]
np.testing.assert_array_equal(actual, self.minisblack_3x3_data)
c = jp2.get_codestream()
self.assertEqual(c.segment[1].xsiz, 480)
self.assertEqual(c.segment[1].ysiz, 480)
self.assertEqual(c.segment[1].xtsiz, 240)
self.assertEqual(c.segment[1].ytsiz, 240)
def test_minisblack_3x3__larger_tilesize_specified(self):
"""
SCENARIO: Convert monochromatic TIFF file to JP2. The TIFF is evenly
tiled 3x3, but we want 2x2.
EXPECTED RESULT: The data matches. The JP2 file has 4 tiles.
"""
with Tiff2Jp2k(
self.minisblack_3x3_tif, self.temp_jp2_filename,
tilesize=(240, 240)
) | |
(output wpkh only, input tx1-3)
txid = tx.txid
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=0))
desc = f'raw({str(tr_addr.locking_script)})'
txin_utxo_list.append(UtxoData(
txid=txid, vout=0, amount=txouts[0].amount, descriptor=desc))
txouts2 = [
TxOut(100000000, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=20.0,
knapsack_min_change=1)
# add sign
join_utxo_list: List['UtxoData'] = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sk = main_sk
hash_type = main_addr.hash_type
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
def test_taproot_tapscript(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
main_addr = test_obj.addr_dic['main']
main_pk, _ = SchnorrPubkey.from_pubkey(str(main_addr.pubkey))
pkh_addr = test_obj.addr_dic['p2pkh']
spk1, _ = SchnorrPubkey.from_pubkey(str(pkh_addr.pubkey))
wpkh_addr = test_obj.addr_dic['p2wpkh']
spk2, _ = SchnorrPubkey.from_pubkey(str(wpkh_addr.pubkey))
main_path = str(test_obj.path_dic[str(main_addr)])
main_sk = test_obj.hdwallet.get_privkey(path=main_path).privkey
pkh_path = str(test_obj.path_dic[str(pkh_addr)])
sk1 = test_obj.hdwallet.get_privkey(path=pkh_path).privkey
# wpkh_path = str(test_obj.path_dic[str(wpkh_addr)])
# sk2 = test_obj.hdwallet.get_privkey(path=wpkh_path).privkey
script1 = Script.from_asm([str(spk1), 'OP_CHECKSIG'])
script2 = Script.from_asm([str(spk2), 'OP_CHECKSIG'])
op_true_script = Script('51')
op_true_sub_tree1 = TaprootScriptTree(op_true_script)
op_true_sub_tree1.add_branch(script1)
script1_tree = TaprootScriptTree(script1)
script1_tree.add_branches([op_true_script, script2])
script1_tree.internal_pubkey = main_pk
op_true_tree = TaprootScriptTree(op_true_script)
op_true_tree.add_branches([script1, script2])
op_true_tree.internal_pubkey = main_pk
script2_tree = TaprootScriptTree(script2)
script2_tree.add_branch(op_true_sub_tree1)
script2_tree.internal_pubkey = main_pk
tr_addr1 = AddressUtil.taproot(script1_tree, network=NETWORK)
tr_addr2 = AddressUtil.taproot(op_true_tree, network=NETWORK)
tr_addr3 = AddressUtil.taproot(script2_tree, network=NETWORK)
txouts = [
TxOut(100000, str(tr_addr1)),
TxOut(150000, str(tr_addr2)),
TxOut(200000, str(tr_addr3)),
]
tx = Transaction.create(2, 0, [], txouts)
# fundrawtransaction
fee_addr = str(test_obj.addr_dic['fee'])
fee_desc = test_obj.desc_dic[fee_addr]
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx.fund_raw_transaction([], utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
for txin in tx.txin_list:
utxo = search_utxos(test_obj, utxo_list, txin.outpoint)
tx.sign_with_privkey(txin.outpoint, fee_desc.data.hash_type, fee_sk,
amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
txid = tx.txid
utxo1 = UtxoData(txid=txid, vout=0, amount=txouts[0].amount,
descriptor=f'raw({str(tr_addr1.locking_script)})')
utxo2 = UtxoData(txid=txid, vout=1, amount=txouts[1].amount,
descriptor=f'raw({str(tr_addr2.locking_script)})')
utxo3 = UtxoData(txid=txid, vout=2, amount=txouts[2].amount,
descriptor=f'raw({str(tr_addr3.locking_script)})')
# send tapscript script1
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=0))
txin_utxo_list.append(utxo1)
txouts2 = [
TxOut(txouts[0].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list: List['UtxoData'] = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sk = sk1
sighash = tx2.get_sighash(
txin.outpoint, HashType.TAPROOT, redeem_script=script1,
sighashtype=SigHashType.DEFAULT, utxos=join_utxo_list)
sig = SchnorrUtil.sign(sighash, sk1)
sign_param = SignParameter(sig, sighashtype=SigHashType.DEFAULT)
_, _, _, control_block = script1_tree.get_taproot_data()
tx2.add_tapscript_sign(txin.outpoint, [sign_param],
script1, control_block)
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# send tapscript OP_TRUE
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=1))
txin_utxo_list.append(utxo2)
txouts2 = [
TxOut(txouts[1].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
_, _, _, control_block = op_true_tree.get_taproot_data()
tx2.add_tapscript_sign(txin.outpoint, [],
op_true_script, control_block)
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# send tapscript internal_pubkey
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=2))
txin_utxo_list.append(utxo3)
txouts2 = [
TxOut(txouts[2].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sk = script2_tree.get_privkey(main_sk)
hash_type = tr_addr3.hash_type
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
def test_taproot_single_key(test_obj: 'TestBitcoin'):
btc_rpc = test_obj.conn.get_rpc()
main_addr = test_obj.addr_dic['main']
main_pk, _ = SchnorrPubkey.from_pubkey(str(main_addr.pubkey))
pkh_addr = test_obj.addr_dic['p2pkh']
spk1, _ = SchnorrPubkey.from_pubkey(str(pkh_addr.pubkey))
wpkh_addr = test_obj.addr_dic['p2wpkh']
spk2, _ = SchnorrPubkey.from_pubkey(str(wpkh_addr.pubkey))
main_path = str(test_obj.path_dic[str(main_addr)])
main_sk = test_obj.hdwallet.get_privkey(path=main_path).privkey
pkh_path = str(test_obj.path_dic[str(pkh_addr)])
sk1 = test_obj.hdwallet.get_privkey(path=pkh_path).privkey
wpkh_path = str(test_obj.path_dic[str(wpkh_addr)])
sk2 = test_obj.hdwallet.get_privkey(path=wpkh_path).privkey
branch = TapBranch()
tr_addr1 = AddressUtil.taproot(
main_pk, script_tree=branch, network=NETWORK)
tr_sk1 = branch.get_privkey(main_sk)
tr_addr2 = AddressUtil.taproot(spk1, script_tree=branch, network=NETWORK)
tr_sk2 = branch.get_privkey(sk1)
tr_addr3 = AddressUtil.taproot(spk2, script_tree=branch, network=NETWORK)
tr_sk3 = branch.get_privkey(sk2)
txouts = [
TxOut(100000, str(tr_addr1)),
TxOut(150000, str(tr_addr2)),
TxOut(200000, str(tr_addr3)),
]
tx = Transaction.create(2, 0, [], txouts)
# fundrawtransaction
fee_addr = str(test_obj.addr_dic['fee'])
fee_desc = test_obj.desc_dic[fee_addr]
fee_sk = test_obj.hdwallet.get_privkey(path=FEE_PATH).privkey
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx.fund_raw_transaction([], utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
for txin in tx.txin_list:
utxo = search_utxos(test_obj, utxo_list, txin.outpoint)
tx.sign_with_privkey(txin.outpoint, fee_desc.data.hash_type, fee_sk,
amount=utxo.amount,
sighashtype=SigHashType.ALL)
# broadcast
print(Transaction.parse_to_json(str(tx), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
txid = tx.txid
utxo1 = UtxoData(txid=txid, vout=0, amount=txouts[0].amount,
descriptor=f'raw({str(tr_addr1.locking_script)})')
utxo2 = UtxoData(txid=txid, vout=1, amount=txouts[1].amount,
descriptor=f'raw({str(tr_addr2.locking_script)})')
utxo3 = UtxoData(txid=txid, vout=2, amount=txouts[2].amount,
descriptor=f'raw({str(tr_addr3.locking_script)})')
# send taproot singleKey1
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=0))
txin_utxo_list.append(utxo1)
txouts2 = [
TxOut(txouts[0].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list: List['UtxoData'] = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sighash = tx2.get_sighash(
txin.outpoint, HashType.TAPROOT, pubkey=tr_addr1.pubkey,
sighashtype=SigHashType.DEFAULT, utxos=join_utxo_list)
sig = SchnorrUtil.sign(sighash, tr_sk1)
sign_param = SignParameter(sig, sighashtype=SigHashType.DEFAULT)
tx2.add_taproot_sign(txin.outpoint, sign_param)
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# send taproot singleKey2
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=1))
txin_utxo_list.append(utxo2)
txouts2 = [
TxOut(txouts[1].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sighash = tx2.get_sighash(
txin.outpoint, HashType.TAPROOT, pubkey=tr_addr2.pubkey,
sighashtype=SigHashType.DEFAULT, utxos=join_utxo_list)
sig = SchnorrUtil.sign(sighash, tr_sk2)
sign_param = SignParameter(sig, sighashtype=SigHashType.DEFAULT)
tx2.add_taproot_sign(txin.outpoint, sign_param)
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
# send taproot singleKey3
txin_list = []
txin_utxo_list = []
txin_list.append(TxIn(txid=txid, vout=2))
txin_utxo_list.append(utxo3)
txouts2 = [
TxOut(txouts[2].amount, str(test_obj.addr_dic['main'])),
]
tx2 = Transaction.create(2, 0, txin_list, txouts2)
main_addr = test_obj.addr_dic['main']
utxos = get_utxo(btc_rpc, [fee_addr])
utxo_list = convert_bitcoin_utxos(test_obj, utxos)
tx2.fund_raw_transaction(txin_utxo_list, utxo_list, fee_addr,
target_amount=0, effective_fee_rate=2.0,
knapsack_min_change=0)
# add sign
join_utxo_list = []
join_utxo_list[len(join_utxo_list):len(join_utxo_list)] = txin_utxo_list
for txin in tx2.txin_list:
for utxo in utxo_list:
if utxo.outpoint == txin.outpoint:
join_utxo_list.append(utxo)
for index, txin in enumerate(tx2.txin_list):
utxo = search_utxos(test_obj, join_utxo_list, txin.outpoint)
if index == 0:
sighash = tx2.get_sighash(
txin.outpoint, HashType.TAPROOT, pubkey=tr_addr3.pubkey,
sighashtype=SigHashType.DEFAULT, utxos=join_utxo_list)
sig = SchnorrUtil.sign(sighash, tr_sk3)
sign_param = SignParameter(sig, sighashtype=SigHashType.DEFAULT)
tx2.add_taproot_sign(txin.outpoint, sign_param)
else:
path = str(test_obj.path_dic[str(utxo.descriptor.data.address)])
sk = test_obj.hdwallet.get_privkey(path=path).privkey
hash_type = utxo.descriptor.data.hash_type
tx2.sign_with_privkey(txin.outpoint, hash_type,
sk, amount=utxo.amount,
sighashtype=SigHashType.ALL,
utxos=join_utxo_list)
# broadcast
print(Transaction.parse_to_json(str(tx2), network=NETWORK))
btc_rpc.sendrawtransaction(str(tx2))
# generate block
btc_rpc.generatetoaddress(2, fee_addr)
time.sleep(2)
utxos = get_utxo(btc_rpc, [str(main_addr)])
print('UTXO: {}'.format(utxos))
class TestBitcoin(unittest.TestCase):
hdwallet: 'HDWallet'
# addr_dic: dict[str, 'Address']
# desc_dic: dict[str, 'Descriptor']
# path_dic: dict[str, Union[str, List[str]]]
conn: 'RpcWrapper'
def setUp(self):
logging.basicConfig()
logging.getLogger("BitcoinRPC").setLevel(logging.DEBUG)
self.path_dic = {}
self.addr_dic = {}
self.desc_dic = {}
self.hdwallet = HDWallet.from_mnemonic(
MNEMONIC, passphrase=<PASSWORD>, network=NETWORK)
create_bitcoin_address(self)
self.conn = RpcWrapper(
port=18443, rpc_user='bitcoinrpc', rpc_password='password')
def test_bitcoin(self):
'''
To execute sequentially, define only one test
and call the test function in | |
nan, 10, 10, nan, 0.00, nan ],
[ nan, 20, 20, nan, 0.00, nan ],
[ nan, 30, 30, nan, 0.00, nan ],
[ nan, 40, 40, nan, 0.00, nan ],
[ nan, 50, 50, nan, 0.00, nan ],
[ nan, 60, 60, nan, 0.00, nan ],
[ nan, 70, 70, nan, 0.00, nan ],
[ nan, 80, 80, nan, 0.00, nan ],
[ nan, 90, 90, nan, 0.00, nan ],
[ nan, 100, 100, nan, 0.00, nan ],
[ nan, 200, 200, nan, 0.02, nan ],
[ nan, 300, 300, nan, 0.05, nan ],
[ nan, 400, 400, nan, 0.08, nan ],
[ nan, 500, 500, nan, 0.15, nan ],
[ nan, 600, 600, nan, 0.15, nan ],
[ nan, 700, 700, nan, 0.20, nan ],
[ nan, 800, 800, nan, 0.31, nan ],
[ nan, 900, 900, nan, 0.32, nan ],
[ nan, 1000, 1000, nan, 0.39, nan ],
[ nan, 2000, 2000, nan, 1.69, nan ],
[ nan, 3000, 3000, nan, 4.50, nan ],
[ nan, 4000, 4000, nan, 9.26, nan ],
[ nan, 5000, 5000, nan, 16.58, nan ],
[ nan, 6000, 6000, nan, 27.12, nan ],
[ nan, 7000, 7000, nan, 40.84, nan ],
[ nan, 8000, 8000, nan, 59.67, nan ],
[ nan, 9000, 9000, nan, 82.25, nan ],
[ nan, 10000, 10000, nan, 113.55, nan ],
[ nan, 12000, 12000, nan, 204.99, nan ],
[ nan, 14000, 14000, nan, 302.73, nan ],
[ nan, 16000, 16000, nan, 460.29, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/zgetrf.txt
# numactl --interleave=all ./testing_zgetrf -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zgetrf = array([
[ 10, 10, nan, nan, 0.26, 0.00, nan ],
[ 20, 20, nan, nan, 0.75, 0.00, nan ],
[ 30, 30, nan, nan, 1.27, 0.00, nan ],
[ 40, 40, nan, nan, 3.09, 0.00, nan ],
[ 50, 50, nan, nan, 2.22, 0.00, nan ],
[ 60, 60, nan, nan, 3.92, 0.00, nan ],
[ 70, 70, nan, nan, 1.11, 0.00, nan ],
[ 80, 80, nan, nan, 1.59, 0.00, nan ],
[ 90, 90, nan, nan, 2.07, 0.00, nan ],
[ 100, 100, nan, nan, 2.71, 0.00, nan ],
[ 200, 200, nan, nan, 10.69, 0.00, nan ],
[ 300, 300, nan, nan, 23.03, 0.00, nan ],
[ 400, 400, nan, nan, 36.86, 0.00, nan ],
[ 500, 500, nan, nan, 52.48, 0.01, nan ],
[ 600, 600, nan, nan, 68.63, 0.01, nan ],
[ 700, 700, nan, nan, 87.08, 0.01, nan ],
[ 800, 800, nan, nan, 105.82, 0.01, nan ],
[ 900, 900, nan, nan, 123.14, 0.02, nan ],
[ 1000, 1000, nan, nan, 142.71, 0.02, nan ],
[ 2000, 2000, nan, nan, 339.72, 0.06, nan ],
[ 3000, 3000, nan, nan, 518.56, 0.14, nan ],
[ 4000, 4000, nan, nan, 627.35, 0.27, nan ],
[ 5000, 5000, nan, nan, 684.01, 0.49, nan ],
[ 6000, 6000, nan, nan, 772.08, 0.75, nan ],
[ 7000, 7000, nan, nan, 829.56, 1.10, nan ],
[ 8000, 8000, nan, nan, 882.56, 1.55, nan ],
[ 9000, 9000, nan, nan, 906.12, 2.15, nan ],
[ 10000, 10000, nan, nan, 944.31, 2.82, nan ],
[ 12000, 12000, nan, nan, 994.15, 4.63, nan ],
[ 14000, 14000, nan, nan, 1027.60, 7.12, nan ],
[ 16000, 16000, nan, nan, 1053.91, 10.36, nan ],
[ 18000, 18000, nan, nan, 1063.89, 14.62, nan ],
[ 20000, 20000, nan, nan, 1071.63, 19.91, nan ],
])
# numactl --interleave=all ./testing_zgetrf_gpu -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zgetrf_gpu = array([
[ 10, 10, nan, nan, 0.06, 0.00, nan ],
[ 20, 20, nan, nan, 0.36, 0.00, nan ],
[ 30, 30, nan, nan, 0.69, 0.00, nan ],
[ 40, 40, nan, nan, 1.38, 0.00, nan ],
[ 50, 50, nan, nan, 1.19, 0.00, nan ],
[ 60, 60, nan, nan, 2.64, 0.00, nan ],
[ 70, 70, nan, nan, 0.67, 0.00, nan ],
[ 80, 80, nan, nan, 1.02, 0.00, nan ],
[ 90, 90, nan, nan, 1.35, 0.00, nan ],
[ 100, 100, nan, nan, 1.79, 0.00, nan ],
[ 200, 200, nan, nan, 7.88, 0.00, nan ],
[ 300, 300, nan, nan, 18.82, 0.00, nan ],
[ 400, 400, nan, nan, 32.03, 0.01, nan ],
[ 500, 500, nan, nan, 50.66, 0.01, nan ],
[ 600, 600, nan, nan, 68.52, 0.01, nan ],
[ 700, 700, nan, nan, 89.21, 0.01, nan ],
[ 800, 800, nan, nan, 110.91, 0.01, nan ],
[ 900, 900, nan, nan, 133.32, 0.01, nan ],
[ 1000, 1000, nan, nan, 161.19, 0.02, nan ],
[ 2000, 2000, nan, nan, 405.92, 0.05, nan ],
[ 3000, 3000, nan, nan, 630.44, 0.11, nan ],
[ 4000, 4000, nan, nan, 753.23, 0.23, nan ],
[ 5000, 5000, nan, nan, 725.11, 0.46, nan ],
[ 6000, 6000, nan, nan, 884.64, 0.65, nan ],
[ 7000, 7000, nan, nan, 945.01, 0.97, nan ],
[ 8000, 8000, nan, nan, 996.60, 1.37, nan ],
[ 9000, 9000, nan, nan, 986.27, 1.97, nan ],
[ 10000, 10000, nan, nan, 1021.82, 2.61, nan ],
[ 12000, 12000, nan, nan, 1076.85, 4.28, nan ],
[ 14000, 14000, nan, nan, 1110.41, 6.59, nan ],
[ 16000, 16000, nan, nan, 1120.76, 9.75, nan ],
[ 18000, 18000, nan, nan, 1133.20, 13.72, nan ],
[ 20000, 20000, nan, nan, 1120.56, 19.04, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/zheevd.txt
# numactl --interleave=all ./testing_zheevd -JN -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zheevd_JN = array([
[ 10, nan, 0.0000 ],
[ 20, nan, 0.0001 ],
[ 30, nan, 0.0001 ],
[ 40, nan, 0.0002 ],
[ 50, nan, 0.0003 ],
[ 60, nan, 0.0004 ],
[ 70, nan, 0.0007 ],
[ 80, nan, 0.0009 ],
[ 90, nan, 0.0013 ],
[ 100, nan, 0.0016 ],
[ 200, nan, 0.0148 ],
[ 300, nan, 0.0275 ],
[ 400, nan, 0.0471 ],
[ 500, nan, 0.0672 ],
[ 600, nan, 0.0942 ],
[ 700, nan, 0.1218 ],
[ 800, nan, 0.1559 ],
[ 900, nan, 0.1931 ],
[ 1000, nan, 0.2313 ],
[ 2000, nan, 0.8349 ],
[ 3000, nan, 2.0546 ],
[ 4000, nan, 3.9284 ],
[ 5000, nan, 6.6537 ],
[ 6000, nan, 10.3261 ],
[ 7000, nan, 15.1780 ],
[ 8000, nan, 21.2056 ],
[ 9000, nan, 28.8199 ],
[ 10000, nan, 37.7967 ],
[ 12000, nan, 61.5253 ],
[ 14000, nan, 93.0888 ],
[ 16000, nan, 135.2279 ],
[ 18000, nan, 189.0661 ],
[ 20000, nan, 256.5293 ],
])
# numactl --interleave=all ./testing_zheevd -JV -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zheevd_JV = array([
[ 10, nan, 0.0002 ],
[ 20, nan, 0.0002 ],
[ 30, nan, 0.0003 ],
[ 40, nan, 0.0005 ],
[ 50, nan, 0.0007 ],
[ 60, nan, 0.0009 ],
[ 70, nan, 0.0014 ],
[ 80, nan, 0.0018 ],
[ 90, nan, 0.0022 ],
[ 100, nan, 0.0027 ],
[ 200, nan, 0.0201 ],
[ 300, nan, 0.0345 ],
[ 400, nan, 0.0567 ],
[ 500, nan, 0.0815 ],
[ 600, nan, 0.1100 ],
[ 700, nan, 0.1416 ],
[ 800, nan, 0.1816 ],
[ 900, nan, 0.2242 ],
[ 1000, nan, 0.2697 ],
[ 2000, nan, 1.0056 ],
[ 3000, nan, 2.3441 ],
[ 4000, nan, 4.5146 ],
[ 5000, nan, 7.7003 ],
[ 6000, nan, 12.1018 ],
[ 7000, nan, 18.2927 ],
[ 8000, nan, 25.3105 ],
[ 9000, nan, 34.7899 ],
[ 10000, nan, 45.9261 ],
[ 12000, nan, 74.9215 ],
[ 14000, nan, 114.9891 ],
[ 16000, nan, 167.8138 ],
[ 18000, nan, 236.7416 ],
[ 20000, nan, 321.9395 ],
])
# numactl --interleave=all ./testing_zheevd_gpu -JN -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zheevd_gpu_JN = array([
[ 10, nan, 0.0001 ],
[ 20, nan, 0.0001 ],
[ 30, nan, 0.0002 ],
[ 40, nan, 0.0002 ],
[ 50, nan, 0.0003 ],
[ 60, nan, 0.0005 ],
[ 70, nan, 0.0008 ],
[ 80, nan, 0.0010 ],
[ 90, nan, 0.0014 ],
[ 100, nan, 0.0017 ],
[ 200, nan, 0.0148 ],
[ 300, nan, 0.0271 ],
[ 400, nan, 0.0465 ],
[ 500, nan, 0.0666 ],
[ 600, nan, 0.0933 ],
[ 700, nan, 0.1251 ],
[ 800, nan, 0.1540 ],
[ 900, nan, 0.1913 ],
[ 1000, nan, 0.2276 ],
[ 2000, nan, 0.8259 ],
[ 3000, nan, 2.0320 ],
[ 4000, nan, 3.9013 ],
[ 5000, nan, 6.5860 ],
[ 6000, nan, 10.2309 ],
[ 7000, nan, 14.9722 ],
[ 8000, nan, 21.0334 ],
[ 9000, nan, 28.4432 ],
[ 10000, nan, 37.5093 ],
[ 12000, nan, 60.9586 ],
[ 14000, nan, 92.5242 ],
[ 16000, nan, 134.1422 ],
[ 18000, nan, 187.9327 ],
[ 20000, nan, nan ], # malloc failed?
])
# numactl --interleave=all ./testing_zheevd_gpu -JV -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000
zheevd_gpu_JV = array([
[ 10, nan, 0.0002 ],
[ 20, nan, 0.0002 ],
[ 30, nan, 0.0004 ],
[ 40, nan, 0.0006 ],
[ 50, nan, 0.0007 ],
[ 60, nan, 0.0010 ],
[ | |
<reponame>leonhard-s/auraxium
"""Base classes for the Auraxium object model.
These classes define shared functionality required by all object
representations of API data, and defines the basic class hierarchy used
throughout the PlanetSide 2 object model.
"""
import abc
import logging
from typing import Any, ClassVar, List, Optional, Type, TypeVar, Union
import pydantic
from .models.base import RESTPayload
from ._cache import TLRUCache
from .census import Query
from .errors import PayloadError
from ._rest import RequestClient
from .types import CensusData
from ._support import deprecated
__all__ = [
'Ps2Object',
'Cached',
'Named'
]
CachedT = TypeVar('CachedT', bound='Cached')
NamedT = TypeVar('NamedT', bound='Named')
Ps2ObjectT = TypeVar('Ps2ObjectT', bound='Ps2Object')
_log = logging.getLogger('auraxium.ps2')
class Ps2Object(metaclass=abc.ABCMeta):
"""Common base class for all PS2 object representations.
This requires that subclasses overwrite the :attr:`collection` and
:attr:`id_field` names, which are used to tie the class to its
corresponding API counterpart.
.. attribute:: collection
:type: str
The API collection linked to this type.
.. attribute:: id_field
:type: str
The field name containing the unique ID for this type.
.. note::
This will generally match the ``<type>_id`` convention, but
some collections like ``outfit_member`` or ``profile_2`` use
custom names. This attribute provides support for the latter.
"""
collection: ClassVar[str] = 'bogus'
_model: ClassVar[Type[RESTPayload]]
id_field: ClassVar[str] = 'bogus_id'
def __init__(self, data: CensusData, client: RequestClient) -> None:
"""Initialise the object.
This sets the object's :attr:`id` attribute and populates the
instance using the provided payload.
:param auraxium.types.CensusData data: The census response
dictionary to populate the object with.
:param auraxium.Client client: The client object to use for
requests performed via this object.
"""
id_ = int(str(data[self.id_field]))
_log.debug('Instantiating <%s:%d> using payload: %s',
self.__class__.__name__, id_, data)
self.id = id_
self._client = client
try:
self.data = self._model(**data)
except pydantic.ValidationError as err:
_log.warning(
'Encountered unsupported payload: %s\n'
'This message means that the Auraxium data model must '
'be updated. Please ensure you are on the latest '
'version of the Auraxium library and report this '
'message to the project maintainers.', data)
raise PayloadError(
f'Unable to instantiate {self.__class__.__name__} instance '
f'from given payload: {err}', data) from err
def __eq__(self, o: Any) -> bool:
if not isinstance(o, self.__class__):
return False
return self.id == o.id
def __getattr__(self, name: str) -> Any:
"""Fallback for missing attributes.
This allows missing attribute in the :class:`Ps2Object`
instance to fall back to its corresponding data class.
If the attribute cannot be found there either, an
:exc:`AttributeError` is raised as normal.
"""
# Re-raising or propagating the inner exception would only clutter up
# the exception traceback, so we raise one "from scratch" instead.
if hasattr(self.data, name):
return getattr(self.data, name)
raise AttributeError(name)
def __hash__(self) -> int:
return hash((self.__class__, self.id))
def __repr__(self) -> str:
"""Return the unique string representation of this object.
This will take the form of ``<Class:id>``, e.g.
``<Weapon:108>``.
"""
return f'<{self.__class__.__name__}:{self.id}>'
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.count`')
async def count(cls, client: RequestClient, **kwargs: Any) -> int:
"""Return the number of items matching the given terms.
:param auraxium.Client client: The client through which to
perform the request.
:param kwargs: Any number of query filters to apply.
:return: The number of entries entries.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.count(cls, **kwargs) # type: ignore
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.find`')
async def find(cls: Type[Ps2ObjectT], results: int = 10, *,
offset: int = 0, promote_exact: bool = False,
check_case: bool = True, client: RequestClient,
**kwargs: Any) -> List[Ps2ObjectT]:
"""Return a list of entries matching the given terms.
This returns up to as many entries as indicated by the results
argument. Note that it may be fewer if not enough matches are
found.
:param int results: The maximum number of results.
:param int offset: The number of entries to skip. Useful for
paginated views.
:param bool promote_exact: If enabled, exact matches to
non-exact searches will always come first in the return
list.
:param bool check_case: Whether to check case when comparing
strings. Note that case-insensitive searches are much more
expensive.
:param auraxium.Client client: The client through which to
perform the request.
:param kwargs: Any number of filters to apply.
:return: A list of matching entries.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.find( # type: ignore
cls, results=results, offset=offset, promote_exact=promote_exact,
check_case=check_case, **kwargs)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get(cls: Type[Ps2ObjectT], client: RequestClient,
check_case: bool = True, **kwargs: Any
) -> Optional[Ps2ObjectT]:
"""Return the first entry matching the given terms.
Like :meth:`Ps2Object.get`, but will only return one item.
:param auraxium.Client client: The client through which to
perform the request.
:param bool check_case: Whether to check case when comparing
strings. Note that case-insensitive searches are much more
expensive.
:return: A matching entry, or :obj:`None` if not found.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.get( # type: ignore
cls, results=1, check_case=check_case, **kwargs)
@classmethod
@deprecated('0.2', '0.3', replacement=':meth:`auraxium.Client.get`')
async def get_by_id(cls: Type[Ps2ObjectT], id_: int, *,
client: RequestClient) -> Optional[Ps2ObjectT]:
"""Retrieve an object by its unique Census ID.
:param int id\\_: The unique ID of the object.
:param auraxium.Client client: The client through which to
perform the request.
:return: The entry with the matching ID, or :obj:`None` if not
found.
"""
# NOTE: The following is a runtime-only compatibility hack and violates
# type hinting. This is scheduled for removal as per the decorator.
return await client.get_by_id(cls, id_) # type: ignore
def query(self) -> Query:
"""Return a query from the current object.
This is a utility method targeted at advanced users and
developers. It is generally not required for most use cases.
"""
query = Query(self.collection, service_id=self._client.service_id)
query.add_term(field=self.id_field, value=self.id)
return query
class Cached(Ps2Object, metaclass=abc.ABCMeta):
"""Base class for cacheable data types.
This generates a cache for each subclass that allows the storage
and retrieval of objects by ID. This cache may be customised using
keyword arguments as part of the class definition.
This customisation is done via two parameters: the cache size and
the TTU.
The cache size defines the maximum number of items the cache may
bold before it will discard the least recently used item for every
new item added.
The TTU (time-to-use) will independently discard items that are
older than the given number of seconds to ensure data does not go
too far out of date.
"""
_cache: ClassVar[TLRUCache[int, Any]]
def __init__(self, data: CensusData, client: RequestClient) -> None:
"""Initialise the cached object.
After initialising this object via the parent class's
initialiser, this adds the current class to the cache.
:param auraxium.types.CensusData data: The API response to
instantiate the object from.
:param auraxium.Client client: The client used to retrieve the
object.
"""
super().__init__(data=data, client=client)
self._cache.add(self.id, self)
@classmethod
def __init_subclass__(cls, cache_size: int,
cache_ttu: float = 0.0) -> None:
"""Initialise a cacheable subclass.
This sets up the TLRU cache for the given subclass using the
keyword arguments provided in the class definitions.
:param int cache_size: The maximum number of items in the
cache. Once the cache reaches this number of items, it will
delete the least recently used item for every new item
added.
:param float cache_ttu: The time-to-use for cache items. If an
item is older than TTU allows, it will be re-fetched
regardless of how often it is accessed.
"""
super().__init_subclass__()
_log.debug('Setting up cache for %s (size: %d, ttu: %.1f sec.)',
cls.__name__, cache_size, cache_ttu)
cls._cache = TLRUCache(size=cache_size, ttu=cache_ttu,
name=f'{cls.__name__}_Cache')
@classmethod
def alter_cache(cls, size: int, ttu: Optional[float] = None) -> None:
"""Modify the class cache to use a new size and TTU.
This will update and clear the cache for the current class.
This allows customisation of the class depending on your
use-case.
:param int size: The new cache size.
:param float ttu: The new item TTU.
:raises ValueError: Raised if the size is less than 1.
"""
if size < 1:
raise ValueError(f'{size} is not a valid cache size')
cls._cache.clear()
cls._cache.size = | |
return node
single_param_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_returncode_type, [
PyrexTypes.CFuncTypeArg("obj", PyrexTypes.py_object_type, None),
],
exception_value = "-1")
def _handle_simple_method_list_sort(self, node, function, args, is_unbound_method):
"""Call PyList_Sort() instead of the 0-argument l.sort().
"""
if len(args) != 1:
return node
return self._substitute_method_call(
node, function, "PyList_Sort", self.single_param_func_type,
'sort', is_unbound_method, args).coerce_to(node.type, self.current_env)
Pyx_PyDict_GetItem_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_get(self, node, function, args, is_unbound_method):
"""Replace dict.get() by a call to PyDict_GetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.get', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_GetItemDefault", self.Pyx_PyDict_GetItem_func_type,
'get', is_unbound_method, args,
may_return_none = True,
utility_code = load_c_utility("dict_getitem_default"))
Pyx_PyDict_SetDefault_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("is_safe_type", PyrexTypes.c_int_type, None),
])
def _handle_simple_method_dict_setdefault(self, node, function, args, is_unbound_method):
"""Replace dict.setdefault() by calls to PyDict_GetItem() and PyDict_SetItem().
"""
if len(args) == 2:
args.append(ExprNodes.NoneNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.setdefault', node, args, "2 or 3")
return node
key_type = args[1].type
if key_type.is_builtin_type:
is_safe_type = int(key_type.name in
'str bytes unicode float int long bool')
elif key_type is PyrexTypes.py_object_type:
is_safe_type = -1 # don't know
else:
is_safe_type = 0 # definitely not
args.append(ExprNodes.IntNode(
node.pos, value=str(is_safe_type), constant_result=is_safe_type))
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_SetDefault", self.Pyx_PyDict_SetDefault_func_type,
'setdefault', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('dict_setdefault'))
PyDict_Pop_func_type = PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("dict", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("key", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("default", PyrexTypes.py_object_type, None),
])
def _handle_simple_method_dict_pop(self, node, function, args, is_unbound_method):
"""Replace dict.pop() by a call to _PyDict_Pop().
"""
if len(args) == 2:
args.append(ExprNodes.NullNode(node.pos))
elif len(args) != 3:
self._error_wrong_arg_count('dict.pop', node, args, "2 or 3")
return node
return self._substitute_method_call(
node, function,
"__Pyx_PyDict_Pop", self.PyDict_Pop_func_type,
'pop', is_unbound_method, args,
may_return_none=True,
utility_code=load_c_utility('py_dict_pop'))
Pyx_BinopInt_func_types = dict(
((ctype, ret_type), PyrexTypes.CFuncType(
ret_type, [
PyrexTypes.CFuncTypeArg("op1", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("op2", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("cval", ctype, None),
PyrexTypes.CFuncTypeArg("inplace", PyrexTypes.c_bint_type, None),
PyrexTypes.CFuncTypeArg("zerodiv_check", PyrexTypes.c_bint_type, None),
], exception_value=None if ret_type.is_pyobject else ret_type.exception_value))
for ctype in (PyrexTypes.c_long_type, PyrexTypes.c_double_type)
for ret_type in (PyrexTypes.py_object_type, PyrexTypes.c_bint_type)
)
def _handle_simple_method_object___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_object___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_object___mul__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Multiply', node, function, args, is_unbound_method)
def _handle_simple_method_object___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_object___ne__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _handle_simple_method_object___and__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('And', node, function, args, is_unbound_method)
def _handle_simple_method_object___or__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Or', node, function, args, is_unbound_method)
def _handle_simple_method_object___xor__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Xor', node, function, args, is_unbound_method)
def _handle_simple_method_object___rshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Rshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___lshift__(self, node, function, args, is_unbound_method):
if len(args) != 2 or not isinstance(args[1], ExprNodes.IntNode):
return node
if not args[1].has_constant_result() or not (1 <= args[1].constant_result <= 63):
return node
return self._optimise_num_binop('Lshift', node, function, args, is_unbound_method)
def _handle_simple_method_object___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_object___floordiv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('FloorDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_object___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_div('Divide', node, function, args, is_unbound_method)
def _optimise_num_div(self, operator, node, function, args, is_unbound_method):
if len(args) != 2 or not args[1].has_constant_result() or args[1].constant_result == 0:
return node
if isinstance(args[1], ExprNodes.IntNode):
if not (-2**30 <= args[1].constant_result <= 2**30):
return node
elif isinstance(args[1], ExprNodes.FloatNode):
if not (-2**53 <= args[1].constant_result <= 2**53):
return node
else:
return node
return self._optimise_num_binop(operator, node, function, args, is_unbound_method)
def _handle_simple_method_float___add__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Add', node, function, args, is_unbound_method)
def _handle_simple_method_float___sub__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Subtract', node, function, args, is_unbound_method)
def _handle_simple_method_float___truediv__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('TrueDivide', node, function, args, is_unbound_method)
def _handle_simple_method_float___div__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Divide', node, function, args, is_unbound_method)
def _handle_simple_method_float___mod__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Remainder', node, function, args, is_unbound_method)
def _handle_simple_method_float___eq__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Eq', node, function, args, is_unbound_method)
def _handle_simple_method_float___ne__(self, node, function, args, is_unbound_method):
return self._optimise_num_binop('Ne', node, function, args, is_unbound_method)
def _optimise_num_binop(self, operator, node, function, args, is_unbound_method):
"""
Optimise math operators for (likely) float or small integer operations.
"""
if len(args) != 2:
return node
if node.type.is_pyobject:
ret_type = PyrexTypes.py_object_type
elif node.type is PyrexTypes.c_bint_type and operator in ('Eq', 'Ne'):
ret_type = PyrexTypes.c_bint_type
else:
return node
# When adding IntNode/FloatNode to something else, assume other operand is also numeric.
# Prefer constants on RHS as they allows better size control for some operators.
num_nodes = (ExprNodes.IntNode, ExprNodes.FloatNode)
if isinstance(args[1], num_nodes):
if args[0].type is not PyrexTypes.py_object_type:
return node
numval = args[1]
arg_order = 'ObjC'
elif isinstance(args[0], num_nodes):
if args[1].type is not PyrexTypes.py_object_type:
return node
numval = args[0]
arg_order = 'CObj'
else:
return node
if not numval.has_constant_result():
return node
is_float = isinstance(numval, ExprNodes.FloatNode)
num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type
if is_float:
if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
return node
elif operator == 'Divide':
# mixed old-/new-style division is not currently optimised for integers
return node
elif abs(numval.constant_result) > 2**30:
# Cut off at an integer border that is still safe for all operations.
return node
if operator in ('TrueDivide', 'FloorDivide', 'Divide', 'Remainder'):
if args[1].constant_result == 0:
# Don't optimise division by 0. :)
return node
args = list(args)
args.append((ExprNodes.FloatNode if is_float else ExprNodes.IntNode)(
numval.pos, value=numval.value, constant_result=numval.constant_result,
type=num_type))
inplace = node.inplace if isinstance(node, ExprNodes.NumBinopNode) else False
args.append(ExprNodes.BoolNode(node.pos, value=inplace, constant_result=inplace))
if is_float or operator not in ('Eq', 'Ne'):
# "PyFloatBinop" and "PyIntBinop" take an additional "check for zero division" argument.
zerodivision_check = arg_order == 'CObj' and (
not node.cdivision if isinstance(node, ExprNodes.DivNode) else False)
args.append(ExprNodes.BoolNode(node.pos, value=zerodivision_check, constant_result=zerodivision_check))
utility_code = TempitaUtilityCode.load_cached(
"PyFloatBinop" if is_float else "PyIntCompare" if operator in ('Eq', 'Ne') else "PyIntBinop",
"Optimize.c",
context=dict(op=operator, order=arg_order, ret_type=ret_type))
call_node = self._substitute_method_call(
node, function,
"__Pyx_Py%s_%s%s%s" % (
'Float' if is_float else 'Int',
'' if ret_type.is_pyobject else 'Bool',
operator,
arg_order),
self.Pyx_BinopInt_func_types[(num_type, ret_type)],
'__%s__' % operator[:3].lower(), is_unbound_method, args,
may_return_none=True,
with_none_check=False,
utility_code=utility_code)
if node.type.is_pyobject and not ret_type.is_pyobject:
call_node = ExprNodes.CoerceToPyTypeNode(call_node, self.current_env(), node.type)
return call_node
### unicode type methods
PyUnicode_uchar_predicate_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_bint_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
def _inject_unicode_predicate(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
if method_name == 'istitle':
# istitle() doesn't directly map to Py_UNICODE_ISTITLE()
utility_code = UtilityCode.load_cached(
"py_unicode_istitle", "StringTools.c")
function_name = '__Pyx_Py_UNICODE_ISTITLE'
else:
utility_code = None
function_name = 'Py_UNICODE_%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_predicate_func_type,
method_name, is_unbound_method, [uchar],
utility_code = utility_code)
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
_handle_simple_method_unicode_isalnum = _inject_unicode_predicate
_handle_simple_method_unicode_isalpha = _inject_unicode_predicate
_handle_simple_method_unicode_isdecimal = _inject_unicode_predicate
_handle_simple_method_unicode_isdigit = _inject_unicode_predicate
_handle_simple_method_unicode_islower = _inject_unicode_predicate
_handle_simple_method_unicode_isnumeric = _inject_unicode_predicate
_handle_simple_method_unicode_isspace = _inject_unicode_predicate
_handle_simple_method_unicode_istitle = _inject_unicode_predicate
_handle_simple_method_unicode_isupper = _inject_unicode_predicate
PyUnicode_uchar_conversion_func_type = PyrexTypes.CFuncType(
PyrexTypes.c_py_ucs4_type, [
PyrexTypes.CFuncTypeArg("uchar", PyrexTypes.c_py_ucs4_type, None),
])
# DISABLED: Return value can only be one character, which is not correct.
'''
def _inject_unicode_character_conversion(self, node, function, args, is_unbound_method):
if is_unbound_method or len(args) != 1:
return node
ustring = args[0]
if not isinstance(ustring, ExprNodes.CoerceToPyTypeNode) or \
not ustring.arg.type.is_unicode_char:
return node
uchar = ustring.arg
method_name = function.attribute
function_name = 'Py_UNICODE_TO%s' % method_name.upper()
func_call = self._substitute_method_call(
node, function,
function_name, self.PyUnicode_uchar_conversion_func_type,
method_name, is_unbound_method, [uchar])
if node.type.is_pyobject:
func_call = func_call.coerce_to_pyobject(self.current_env)
return func_call
#_handle_simple_method_unicode_lower = _inject_unicode_character_conversion
#_handle_simple_method_unicode_upper = _inject_unicode_character_conversion
#_handle_simple_method_unicode_title = _inject_unicode_character_conversion
'''
PyUnicode_Splitlines_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("keepends", PyrexTypes.c_bint_type, None),
])
def _handle_simple_method_unicode_splitlines(self, node, function, args, is_unbound_method):
"""Replace unicode.splitlines(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2):
self._error_wrong_arg_count('unicode.splitlines', node, args, "1 or 2")
return node
self._inject_bint_default_argument(node, args, 1, False)
return self._substitute_method_call(
node, function,
"PyUnicode_Splitlines", self.PyUnicode_Splitlines_func_type,
'splitlines', is_unbound_method, args)
PyUnicode_Split_func_type = PyrexTypes.CFuncType(
Builtin.list_type, [
PyrexTypes.CFuncTypeArg("str", Builtin.unicode_type, None),
PyrexTypes.CFuncTypeArg("sep", PyrexTypes.py_object_type, None),
PyrexTypes.CFuncTypeArg("maxsplit", PyrexTypes.c_py_ssize_t_type, None),
]
)
def _handle_simple_method_unicode_split(self, node, function, args, is_unbound_method):
"""Replace unicode.split(...) by a direct call to the
corresponding C-API function.
"""
if len(args) not in (1,2,3):
self._error_wrong_arg_count('unicode.split', node, args, "1-3")
return node
if len(args) | |
"""Close connection to a currently connected snes"""
self.ctx.snes_reconnect_address = None
if self.ctx.snes_socket is not None and not self.ctx.snes_socket.closed:
asyncio.create_task(self.ctx.snes_socket.close())
return True
else:
return False
def _cmd_connect(self, address: str = "") -> bool:
"""Connect to a MultiWorld Server"""
self.ctx.server_address = None
asyncio.create_task(connect(self.ctx, address if address else None))
return True
def _cmd_disconnect(self) -> bool:
"""Disconnect from a MultiWorld Server"""
self.ctx.server_address = None
asyncio.create_task(self.ctx.disconnect())
return True
def _cmd_received(self) -> bool:
"""List all received items"""
self.ctx.ui_node.log_info('Received items:')
for index, item in enumerate(self.ctx.items_received, 1):
self.ctx.ui_node.notify_item_received(self.ctx.player_names[item.player], get_item_name_from_id(item.item),
get_location_name_from_address(item.location), index,
len(self.ctx.items_received))
logging.info('%s from %s (%s) (%d/%d in list)' % (
color(get_item_name_from_id(item.item), 'red', 'bold'),
color(self.ctx.player_names[item.player], 'yellow'),
get_location_name_from_address(item.location), index, len(self.ctx.items_received)))
return True
def _cmd_missing(self) -> bool:
"""List all missing location checks, from your local game state"""
count = 0
for location in [k for k, v in Regions.location_table.items() if type(v[0]) is int]:
if location not in self.ctx.locations_checked:
self.output('Missing: ' + location)
count += 1
if count:
self.output(f"Found {count} missing location checks")
else:
self.output("No missing location checks found.")
return True
def _cmd_show_items(self, toggle: str = "") -> bool:
"""Toggle showing of items received across the team"""
if toggle:
self.ctx.found_items = toggle.lower() in {"1", "true", "on"}
else:
self.ctx.found_items = not self.ctx.found_items
self.ctx.ui_node.log_info(f"Set showing team items to {self.ctx.found_items}")
asyncio.create_task(self.ctx.send_msgs([['UpdateTags', get_tags(self.ctx)]]))
return True
def _cmd_slow_mode(self, toggle: str = ""):
"""Toggle slow mode, which limits how fast you send / receive items."""
if toggle:
self.ctx.slow_mode = toggle.lower() in {"1", "true", "on"}
else:
self.ctx.slow_mode = not self.ctx.slow_mode
self.ctx.ui_node.log_info(f"Setting slow mode to {self.ctx.slow_mode}")
def _cmd_web(self):
if self.ctx.webui_socket_port:
webbrowser.open(f'http://localhost:5050?port={self.ctx.webui_socket_port}')
else:
self.output("Web UI was never started.")
def default(self, raw: str):
asyncio.create_task(self.ctx.send_msgs([['Say', raw]]))
async def console_loop(ctx: Context):
session = prompt_toolkit.PromptSession()
commandprocessor = ClientCommandProcessor(ctx)
while not ctx.exit_event.is_set():
try:
with patch_stdout():
input_text = await session.prompt_async()
if ctx.input_requests > 0:
ctx.input_requests -= 1
ctx.input_queue.put_nowait(input_text)
continue
if not input_text:
continue
commandprocessor(input_text)
except Exception as e:
logging.exception(e)
await snes_flush_writes(ctx)
async def track_locations(ctx : Context, roomid, roomdata):
new_locations = []
def new_check(location):
ctx.locations_checked.add(location)
ctx.ui_node.log_info("New check: %s (%d/216)" % (location, len(ctx.locations_checked)))
ctx.ui_node.send_location_check(ctx, location)
new_locations.append(Regions.location_table[location][0])
for location, (loc_roomid, loc_mask) in location_table_uw.items():
if location not in ctx.locations_checked and loc_roomid == roomid and (roomdata << 4) & loc_mask != 0:
new_check(location)
uw_begin = 0x129
uw_end = 0
uw_unchecked = {}
for location, (roomid, mask) in location_table_uw.items():
if location not in ctx.locations_checked:
uw_unchecked[location] = (roomid, mask)
uw_begin = min(uw_begin, roomid)
uw_end = max(uw_end, roomid + 1)
if uw_begin < uw_end:
uw_data = await snes_read(ctx, SAVEDATA_START + (uw_begin * 2), (uw_end - uw_begin) * 2)
if uw_data is not None:
for location, (roomid, mask) in uw_unchecked.items():
offset = (roomid - uw_begin) * 2
roomdata = uw_data[offset] | (uw_data[offset + 1] << 8)
if roomdata & mask != 0:
new_check(location)
ow_begin = 0x82
ow_end = 0
ow_unchecked = {}
for location, screenid in location_table_ow.items():
if location not in ctx.locations_checked:
ow_unchecked[location] = screenid
ow_begin = min(ow_begin, screenid)
ow_end = max(ow_end, screenid + 1)
if ow_begin < ow_end:
ow_data = await snes_read(ctx, SAVEDATA_START + 0x280 + ow_begin, ow_end - ow_begin)
if ow_data is not None:
for location, screenid in ow_unchecked.items():
if ow_data[screenid - ow_begin] & 0x40 != 0:
new_check(location)
if not all([location in ctx.locations_checked for location in location_table_npc.keys()]):
npc_data = await snes_read(ctx, SAVEDATA_START + 0x410, 2)
if npc_data is not None:
npc_value = npc_data[0] | (npc_data[1] << 8)
for location, mask in location_table_npc.items():
if npc_value & mask != 0 and location not in ctx.locations_checked:
new_check(location)
if not all([location in ctx.locations_checked for location in location_table_misc.keys()]):
misc_data = await snes_read(ctx, SAVEDATA_START + 0x3c6, 4)
if misc_data is not None:
for location, (offset, mask) in location_table_misc.items():
assert(0x3c6 <= offset <= 0x3c9)
if misc_data[offset - 0x3c6] & mask != 0 and location not in ctx.locations_checked:
new_check(location)
await ctx.send_msgs([['LocationChecks', new_locations]])
async def send_finished_game(ctx: Context):
try:
await ctx.send_msgs([['GameFinished', '']])
ctx.finished_game = True
except Exception as ex:
logging.exception(ex)
async def game_watcher(ctx : Context):
prev_game_timer = 0
perf_counter = time.perf_counter()
while not ctx.exit_event.is_set():
try:
await asyncio.wait_for(ctx.watcher_event.wait(), 0.125)
except asyncio.TimeoutError:
pass
ctx.watcher_event.clear()
if not ctx.rom:
ctx.finished_game = False
rom = await snes_read(ctx, ROMNAME_START, ROMNAME_SIZE)
if rom is None or rom == bytes([0] * ROMNAME_SIZE):
continue
ctx.rom = list(rom)
if not ctx.prev_rom or ctx.prev_rom != ctx.rom:
ctx.locations_checked = set()
ctx.locations_scouted = set()
ctx.prev_rom = ctx.rom.copy()
if ctx.awaiting_rom:
await server_auth(ctx, False)
if ctx.auth and ctx.auth != ctx.rom:
ctx.ui_node.log_warning("ROM change detected, please reconnect to the multiworld server")
await ctx.disconnect()
gamemode = await snes_read(ctx, WRAM_START + 0x10, 1)
gameend = await snes_read(ctx, SAVEDATA_START + 0x443, 1)
game_timer = await snes_read(ctx, SAVEDATA_START + 0x42E, 4)
if gamemode is None or gameend is None or game_timer is None or \
(gamemode[0] not in INGAME_MODES and gamemode[0] not in ENDGAME_MODES):
continue
delay = 7 if ctx.slow_mode else 2
if gameend[0]:
if not ctx.finished_game:
await(send_finished_game(ctx))
if time.perf_counter() - perf_counter < delay:
continue
else:
perf_counter = time.perf_counter()
else:
game_timer = game_timer[0] | (game_timer[1] << 8) | (game_timer[2] << 16) | (game_timer[3] << 24)
if abs(game_timer - prev_game_timer) < (delay * 60):
continue
else:
prev_game_timer = game_timer
if gamemode in ENDGAME_MODES: # triforce room and credits
continue
data = await snes_read(ctx, RECV_PROGRESS_ADDR, 8)
if data is None:
continue
recv_index = data[0] | (data[1] << 8)
assert RECV_ITEM_ADDR == RECV_PROGRESS_ADDR + 2
recv_item = data[2]
assert ROOMID_ADDR == RECV_PROGRESS_ADDR + 4
roomid = data[4] | (data[5] << 8)
assert ROOMDATA_ADDR == RECV_PROGRESS_ADDR + 6
roomdata = data[6]
assert SCOUT_LOCATION_ADDR == RECV_PROGRESS_ADDR + 7
scout_location = data[7]
if recv_index < len(ctx.items_received) and recv_item == 0:
item = ctx.items_received[recv_index]
ctx.ui_node.notify_item_received(ctx.player_names[item.player], get_item_name_from_id(item.item),
get_location_name_from_address(item.location), recv_index + 1,
len(ctx.items_received))
logging.info('Received %s from %s (%s) (%d/%d in list)' % (
color(get_item_name_from_id(item.item), 'red', 'bold'), color(ctx.player_names[item.player], 'yellow'),
get_location_name_from_address(item.location), recv_index + 1, len(ctx.items_received)))
recv_index += 1
snes_buffered_write(ctx, RECV_PROGRESS_ADDR, bytes([recv_index & 0xFF, (recv_index >> 8) & 0xFF]))
snes_buffered_write(ctx, RECV_ITEM_ADDR, bytes([item.item]))
snes_buffered_write(ctx, RECV_ITEM_PLAYER_ADDR, bytes([item.player if item.player != ctx.slot else 0]))
if scout_location > 0 and scout_location in ctx.locations_info:
snes_buffered_write(ctx, SCOUTREPLY_LOCATION_ADDR, bytes([scout_location]))
snes_buffered_write(ctx, SCOUTREPLY_ITEM_ADDR, bytes([ctx.locations_info[scout_location][0]]))
snes_buffered_write(ctx, SCOUTREPLY_PLAYER_ADDR, bytes([ctx.locations_info[scout_location][1]]))
await snes_flush_writes(ctx)
if scout_location > 0 and scout_location not in ctx.locations_scouted:
ctx.locations_scouted.add(scout_location)
ctx.ui_node.log_info(f'Scouting item at {list(Regions.location_table.keys())[scout_location - 1]}')
await ctx.send_msgs([['LocationScouts', [scout_location]]])
await track_locations(ctx, roomid, roomdata)
async def run_game(romfile):
import webbrowser
webbrowser.open(romfile)
async def websocket_server(websocket: websockets.WebSocketServerProtocol, path, ctx: Context):
endpoint = Endpoint(websocket)
ctx.ui_node.endpoints.append(endpoint)
process_command = ClientCommandProcessor(ctx)
try:
async for incoming_data in websocket:
try:
data = json.loads(incoming_data)
logging.debug(f"WebUIData:{data}")
if ('type' not in data) or ('content' not in data):
raise Exception('Invalid data received in websocket')
elif data['type'] == 'webStatus':
if data['content'] == 'connections':
ctx.ui_node.send_connection_status(ctx)
elif data['content'] == 'devices':
await get_snes_devices(ctx)
elif data['content'] == 'gameInfo':
ctx.ui_node.send_game_info(ctx)
elif data['content'] == 'checkData':
ctx.ui_node.send_location_check(ctx, 'Waiting for check...')
elif data['type'] == 'webConfig':
if 'serverAddress' in data['content']:
ctx.server_address = data['content']['serverAddress']
await connect(ctx, data['content']['serverAddress'])
elif 'deviceId' in data['content']:
# Allow a SNES disconnect via UI sending -1 as new device
if data['content']['deviceId'] == "-1":
ctx.ui_node.manual_snes = None
ctx.snes_reconnect_address = None
await snes_disconnect(ctx)
else:
await snes_disconnect(ctx)
ctx.ui_node.manual_snes = data['content']['deviceId']
await snes_connect(ctx, ctx.snes_address)
elif data['type'] == 'webControl':
if 'disconnect' in data['content']:
await ctx.disconnect()
elif data['type'] == 'webCommand':
process_command(data['content'])
except json.JSONDecodeError:
pass
except Exception as e:
if not isinstance(e, websockets.WebSocketException):
logging.exception(e)
finally:
await ctx.ui_node.disconnect(endpoint)
async def main():
multiprocessing.freeze_support()
parser = argparse.ArgumentParser()
parser.add_argument('diff_file', default="", type=str, nargs="?",
help='Path to a Berserker Multiworld Binary Patch file')
parser.add_argument('--snes', default='localhost:8080', help='Address of the QUsb2snes server.')
parser.add_argument('--connect', default=None, help='Address of the multiworld host.')
parser.add_argument('--password', default=None, help='Password of the multiworld host.')
parser.add_argument('--loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical'])
parser.add_argument('--founditems', default=False, action='store_true',
help='Show items found by other players for themselves.')
parser.add_argument('--disable_web_ui', default=False, action='store_true', help="Turn off emitting a webserver for the webbrowser based user interface.")
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=getattr(logging, args.loglevel.upper(), logging.INFO))
port = None
if not args.disable_web_ui:
# Find an available port on the host system to use for hosting the websocket server
while True:
port = randrange(5000, 5999)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
if not sock.connect_ex(('localhost', port)) == 0:
break
import threading
WebUI.start_server(
port, on_start=threading.Timer(1, webbrowser.open, (f'http://localhost:5050?port={port}',)).start)
if args.diff_file:
import Patch
logging.info("Patch file was supplied. Creating sfc rom..")
meta, romfile = Patch.create_rom_file(args.diff_file)
args.connect = meta["server"]
logging.info(f"Wrote rom file to {romfile}")
adjustedromfile, adjusted = Utils.get_adjuster_settings(romfile)
if adjusted:
try:
import os
os.replace(adjustedromfile, romfile)
adjustedromfile = romfile
except | |
<gh_stars>0
#
# Copyright (c), 2018-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author <NAME> <<EMAIL>>
#
"""
This module contains base classes and helper functions for defining Pratt parsers.
"""
import sys
import re
from unicodedata import name as unicode_name
from decimal import Decimal, DecimalException
from itertools import takewhile
from abc import ABCMeta
from collections.abc import MutableSequence
#
# Simple top down parser based on Vaughan Pratt's algorithm (Top Down Operator Precedence).
#
# References:
#
# https://tdop.github.io/ (<NAME>'s "Top Down Operator Precedence" - 1973)
# http://crockford.com/javascript/tdop/tdop.html (D<NAME>rockford - 2007)
# http://effbot.org/zone/simple-top-down-parsing.htm (<NAME> - 2008)
#
# This implementation is based on a base class for tokens and a base class for parsers.
# A real parser is built with a derivation of the base parser class followed by the
# registrations of token classes for the symbols of the language.
#
# A parser can be extended by derivation, copying the reusable token classes and
# defining the additional ones. See the files xpath1_parser.py and xpath2_parser.py
# for a fully implementation example of a real parser.
#
# Parser special symbols set, that includes the TDOP's special symbols plus two
# additional special symbols for managing invalid literals and unknown symbols.
SPECIAL_SYMBOLS = frozenset((
'(string)', '(float)', '(decimal)', '(integer)',
'(name)', '(end)', '(invalid)', '(unknown)'
))
SPACE_PATTERN = re.compile(r'\s')
class ParseError(SyntaxError):
"""An error when parsing source with TDOP parser."""
def count_leading_spaces(s):
return sum(1 for _ in takewhile(str.isspace, s))
def symbol_to_identifier(symbol):
"""
Converts a symbol string to an identifier (only alphanumeric and '_').
"""
def get_id_name(c):
if c.isalnum() or c == '_':
return c
else:
return '%s_' % unicode_name(str(c)).title()
if symbol.isalnum():
return symbol
elif symbol in SPECIAL_SYMBOLS:
return symbol[1:-1]
elif all(c in '-_' for c in symbol):
value = '_'.join(unicode_name(str(c)).title() for c in symbol)
return value.replace(' ', '').replace('-', '')
value = symbol.replace('-', '_')
if value.isidentifier():
return value
else:
value = ''.join(get_id_name(c) for c in symbol).replace(' ', '').replace('-', '')
return value[:-1] if value.endswith('_') else value
class MultiLabel(object):
"""
Helper class for defining multi-value label for tokens. Useful when a symbol has more roles.
A label of this type has equivalence with each of its values.
Example:
label = MultiLabel('function', 'operator')
label == 'symbol' # False
label == 'function' # True
label == 'operator' # True
"""
def __init__(self, *values):
self.values = values
def __eq__(self, other):
return any(other == v for v in self.values)
def __ne__(self, other):
return all(other != v for v in self.values)
def __repr__(self):
return '%s%s' % (self.__class__.__name__, self.values)
def __str__(self):
return '__'.join(self.values).replace(' ', '_')
def __hash__(self):
return hash(self.values)
def __contains__(self, item):
return any(item in v for v in self.values)
def startswith(self, string):
return any(v.startswith(string) for v in self.values)
def endswith(self, string):
return any(v.endswith(string) for v in self.values)
class Token(MutableSequence):
"""
Token base class for defining a parser based on Pratt's method.
Each token instance is a list-like object. The number of token's items is
the arity of the represented operator, where token's items are the operands.
Nullary operators are used for symbols, names and literals. Tokens with items
represent the other operators (unary, binary and so on).
Each token class has a *symbol*, a lbp (left binding power) value and a rbp
(right binding power) value, that are used in the sense described by the
Pratt's method. This implementation of Pratt tokens includes two extra
attributes, *pattern* and *label*, that can be used to simplify the parsing
of symbols in a concrete parser.
:param parser: The parser instance that creates the token instance.
:param value: The token value. If not provided defaults to token symbol.
:cvar symbol: the symbol of the token class.
:cvar lbp: Pratt's left binding power, defaults to 0.
:cvar rbp: Pratt's right binding power, defaults to 0.
:cvar pattern: the regex pattern used for the token class. Defaults to the \
escaped symbol. Can be customized to match more detailed conditions (eg. a \
function with its left round bracket), in order to simplify the related code.
:cvar label: defines the typology of the token class. Its value is used in \
representations of the token instance and can be used to restrict code choices \
without more complicated analysis. The label value can be set as needed by the \
parser implementation (eg. 'function', 'axis', 'constructor function' are used by \
the XPath parsers). In the base parser class defaults to 'symbol' with 'literal' \
and 'operator' as possible alternatives. If set by a tuple of values the token \
class label is transformed to a multi-value label, that means the token class can \
covers multiple roles (eg. as XPath function or axis). In those cases the definitive \
role is defined at parse time (nud and/or led methods) after the token instance creation.
"""
symbol = None # the token identifier, key in the token table.
lbp = 0 # left binding power
rbp = 0 # right binding power
pattern = None # the token regex pattern, for building the tokenizer.
label = 'symbol' # optional label
def __init__(self, parser, value=None):
self._items = []
self.parser = parser
self.value = value if value is not None else self.symbol
self._source = parser.source
try:
self.span = parser.match.span()
except AttributeError:
# If the token is created outside the parsing phase and then
# the source string is the empty string and match is None
self.span = (0, 0)
def __getitem__(self, i):
return self._items[i]
def __setitem__(self, i, item):
self._items[i] = item
def __delitem__(self, i):
del self._items[i]
def __len__(self):
return len(self._items)
def insert(self, i, item):
self._items.insert(i, item)
def __str__(self):
if self.symbol in SPECIAL_SYMBOLS:
return '%r %s' % (self.value, self.symbol[1:-1])
else:
return '%r %s' % (self.symbol, self.label)
def __repr__(self):
symbol, value = self.symbol, self.value
if value != symbol:
return u'%s(value=%r)' % (self.__class__.__name__, value)
else:
return u'%s()' % self.__class__.__name__
def __eq__(self, other):
try:
return self.symbol == other.symbol and self.value == other.value
except AttributeError:
return False
@property
def arity(self):
return len(self)
@property
def tree(self):
"""Returns a tree representation string."""
symbol, length = self.symbol, len(self)
if symbol == '(name)':
return u'(%s)' % self.value
elif symbol in SPECIAL_SYMBOLS:
return u'(%r)' % self.value
elif symbol == '(':
return '()' if not self else self[0].tree
elif not length:
return u'(%s)' % symbol
else:
return u'(%s %s)' % (symbol, ' '.join(item.tree for item in self))
@property
def source(self):
"""Returns the source representation string."""
symbol = self.symbol
if symbol == '(name)':
return self.value
elif symbol == '(decimal)':
return str(self.value)
elif symbol in SPECIAL_SYMBOLS:
return repr(self.value)
else:
length = len(self)
if not length:
return symbol
elif length == 1:
return u'%s %s' % (symbol, self[0].source)
elif length == 2:
return u'%s %s %s' % (self[0].source, symbol, self[1].source)
else:
return u'%s %s' % (symbol, ' '.join(item.source for item in self))
@property
def position(self):
"""A tuple with the position of the token in terms of line and column."""
if not isinstance(self._source, (str, bytes)):
return None, None
token_index = self.span[0]
line = self._source[:token_index].count('\n') + 1
if line == 1:
column = token_index + 1
else:
column = token_index - self._source[:token_index].rindex('\n') + 1
return line, column + count_leading_spaces(self._source[column - 1:])
def nud(self):
"""Pratt's null denotation method"""
raise self.wrong_syntax()
def led(self, left):
"""Pratt's left denotation method"""
raise self.wrong_syntax()
def evaluate(self, *args, **kwargs):
"""Evaluation method"""
def iter(self, *symbols):
"""Returns a generator for iterating the token's tree."""
if not self:
if not symbols or self.symbol in symbols:
yield self
elif len(self) == 1:
if not symbols or self.symbol in symbols:
yield self
yield from self[0].iter(*symbols)
else:
yield from self[0].iter(*symbols)
if not symbols or self.symbol in symbols:
yield self
for t in self._items[1:]:
yield from t.iter(*symbols)
def expected(self, *symbols, message=None):
if symbols and self.symbol not in symbols:
raise self.wrong_syntax(message)
def unexpected(self, *symbols, message=None):
if not symbols or self.symbol in symbols:
raise self.wrong_syntax(message)
def wrong_syntax(self, message=None):
if message:
return ParseError(message)
elif self.symbol not in SPECIAL_SYMBOLS:
return ParseError('unexpected %s' % self)
elif self.symbol == '(invalid)':
return ParseError('invalid literal %r' % self.value)
| |
Sets the tags of this IaasUcsdInfo.
The array of tags, which allow to add key, value meta-data to managed objects.
:param tags: The tags of this IaasUcsdInfo.
:type: list[MoTag]
"""
self._tags = tags
@property
def version_context(self):
"""
Gets the version_context of this IaasUcsdInfo.
The versioning info for this managed object.
:return: The version_context of this IaasUcsdInfo.
:rtype: MoVersionContext
"""
return self._version_context
@version_context.setter
def version_context(self, version_context):
"""
Sets the version_context of this IaasUcsdInfo.
The versioning info for this managed object.
:param version_context: The version_context of this IaasUcsdInfo.
:type: MoVersionContext
"""
self._version_context = version_context
@property
def ancestors(self):
"""
Gets the ancestors of this IaasUcsdInfo.
The array containing the MO references of the ancestors in the object containment hierarchy.
:return: The ancestors of this IaasUcsdInfo.
:rtype: list[MoBaseMoRef]
"""
return self._ancestors
@ancestors.setter
def ancestors(self, ancestors):
"""
Sets the ancestors of this IaasUcsdInfo.
The array containing the MO references of the ancestors in the object containment hierarchy.
:param ancestors: The ancestors of this IaasUcsdInfo.
:type: list[MoBaseMoRef]
"""
self._ancestors = ancestors
@property
def parent(self):
"""
Gets the parent of this IaasUcsdInfo.
The direct ancestor of this managed object in the containment hierarchy.
:return: The parent of this IaasUcsdInfo.
:rtype: MoBaseMoRef
"""
return self._parent
@parent.setter
def parent(self, parent):
"""
Sets the parent of this IaasUcsdInfo.
The direct ancestor of this managed object in the containment hierarchy.
:param parent: The parent of this IaasUcsdInfo.
:type: MoBaseMoRef
"""
self._parent = parent
@property
def permission_resources(self):
"""
Gets the permission_resources of this IaasUcsdInfo.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:return: The permission_resources of this IaasUcsdInfo.
:rtype: list[MoBaseMoRef]
"""
return self._permission_resources
@permission_resources.setter
def permission_resources(self, permission_resources):
"""
Sets the permission_resources of this IaasUcsdInfo.
A slice of all permission resources (organizations) associated with this object. Permission ties resources and its associated roles/privileges. These resources which can be specified in a permission is PermissionResource. Currently only organizations can be specified in permission. All logical and physical resources part of an organization will have organization in PermissionResources field. If DeviceRegistration contains another DeviceRegistration and if parent is in org1 and child is part of org2, then child objects will have PermissionResources as org1 and org2. Parent Objects will have PermissionResources as org1. All profiles/policies created with in an organization will have the organization as PermissionResources.
:param permission_resources: The permission_resources of this IaasUcsdInfo.
:type: list[MoBaseMoRef]
"""
self._permission_resources = permission_resources
@property
def device_id(self):
"""
Gets the device_id of this IaasUcsdInfo.
Moid of the UCSD device connector's asset.DeviceRegistration.
:return: The device_id of this IaasUcsdInfo.
:rtype: str
"""
return self._device_id
@device_id.setter
def device_id(self, device_id):
"""
Sets the device_id of this IaasUcsdInfo.
Moid of the UCSD device connector's asset.DeviceRegistration.
:param device_id: The device_id of this IaasUcsdInfo.
:type: str
"""
self._device_id = device_id
@property
def guid(self):
"""
Gets the guid of this IaasUcsdInfo.
Unique ID of UCSD getting registerd with Intersight.
:return: The guid of this IaasUcsdInfo.
:rtype: str
"""
return self._guid
@guid.setter
def guid(self, guid):
"""
Sets the guid of this IaasUcsdInfo.
Unique ID of UCSD getting registerd with Intersight.
:param guid: The guid of this IaasUcsdInfo.
:type: str
"""
self._guid = guid
@property
def host_name(self):
"""
Gets the host_name of this IaasUcsdInfo.
The UCSD host name.
:return: The host_name of this IaasUcsdInfo.
:rtype: str
"""
return self._host_name
@host_name.setter
def host_name(self, host_name):
"""
Sets the host_name of this IaasUcsdInfo.
The UCSD host name.
:param host_name: The host_name of this IaasUcsdInfo.
:type: str
"""
self._host_name = host_name
@property
def ip(self):
"""
Gets the ip of this IaasUcsdInfo.
The UCSD IP address.
:return: The ip of this IaasUcsdInfo.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this IaasUcsdInfo.
The UCSD IP address.
:param ip: The ip of this IaasUcsdInfo.
:type: str
"""
self._ip = ip
@property
def last_backup(self):
"""
Gets the last_backup of this IaasUcsdInfo.
Last successful backup created for this UCS Director appliance if backup is configured.
:return: The last_backup of this IaasUcsdInfo.
:rtype: datetime
"""
return self._last_backup
@last_backup.setter
def last_backup(self, last_backup):
"""
Sets the last_backup of this IaasUcsdInfo.
Last successful backup created for this UCS Director appliance if backup is configured.
:param last_backup: The last_backup of this IaasUcsdInfo.
:type: datetime
"""
self._last_backup = last_backup
@property
def node_type(self):
"""
Gets the node_type of this IaasUcsdInfo.
NodeType specifies if UCSD is deployed in Stand-alone or Multi Node.
:return: The node_type of this IaasUcsdInfo.
:rtype: str
"""
return self._node_type
@node_type.setter
def node_type(self, node_type):
"""
Sets the node_type of this IaasUcsdInfo.
NodeType specifies if UCSD is deployed in Stand-alone or Multi Node.
:param node_type: The node_type of this IaasUcsdInfo.
:type: str
"""
self._node_type = node_type
@property
def product_name(self):
"""
Gets the product_name of this IaasUcsdInfo.
The UCSD product name.
:return: The product_name of this IaasUcsdInfo.
:rtype: str
"""
return self._product_name
@product_name.setter
def product_name(self, product_name):
"""
Sets the product_name of this IaasUcsdInfo.
The UCSD product name.
:param product_name: The product_name of this IaasUcsdInfo.
:type: str
"""
self._product_name = product_name
@property
def product_vendor(self):
"""
Gets the product_vendor of this IaasUcsdInfo.
The UCSD product vendor.
:return: The product_vendor of this IaasUcsdInfo.
:rtype: str
"""
return self._product_vendor
@product_vendor.setter
def product_vendor(self, product_vendor):
"""
Sets the product_vendor of this IaasUcsdInfo.
The UCSD product vendor.
:param product_vendor: The product_vendor of this IaasUcsdInfo.
:type: str
"""
self._product_vendor = product_vendor
@property
def product_version(self):
"""
Gets the product_version of this IaasUcsdInfo.
The UCSD product/platform version.
:return: The product_version of this IaasUcsdInfo.
:rtype: str
"""
return self._product_version
@product_version.setter
def product_version(self, product_version):
"""
Sets the product_version of this IaasUcsdInfo.
The UCSD product/platform version.
:param product_version: The product_version of this IaasUcsdInfo.
:type: str
"""
self._product_version = product_version
@property
def status(self):
"""
Gets the status of this IaasUcsdInfo.
The UCSD status. Possible values are Active, Inactive, Unknown.
:return: The status of this IaasUcsdInfo.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this IaasUcsdInfo.
The UCSD status. Possible values are Active, Inactive, Unknown.
:param status: The status of this IaasUcsdInfo.
:type: str
"""
self._status = status
@property
def connector_pack(self):
"""
Gets the connector_pack of this IaasUcsdInfo.
Relationship to a collection of connector packs installed on the UCSD.
:return: The connector_pack of this IaasUcsdInfo.
:rtype: list[IaasConnectorPackRef]
"""
return self._connector_pack
@connector_pack.setter
def connector_pack(self, connector_pack):
"""
Sets the connector_pack of this IaasUcsdInfo.
Relationship to a collection of connector packs installed on the UCSD.
:param connector_pack: The connector_pack of this IaasUcsdInfo.
:type: list[IaasConnectorPackRef]
"""
self._connector_pack = connector_pack
@property
def device_status(self):
"""
Gets the device_status of this IaasUcsdInfo.
Relationship to a collection of infra accounts managed by the UCSD.
:return: The device_status of this IaasUcsdInfo.
:rtype: list[IaasDeviceStatusRef]
"""
return self._device_status
@device_status.setter
def device_status(self, device_status):
"""
Sets the device_status of this IaasUcsdInfo.
Relationship to a collection of infra accounts managed by the UCSD.
:param device_status: The device_status of this IaasUcsdInfo.
:type: list[IaasDeviceStatusRef]
"""
self._device_status = device_status
@property
def license_info(self):
"""
Gets the license_info of this IaasUcsdInfo.
Relationship to license information of the UCSD.
:return: The license_info of this IaasUcsdInfo.
:rtype: IaasLicenseInfoRef
"""
return self._license_info
@license_info.setter
def license_info(self, license_info):
"""
Sets the license_info of this IaasUcsdInfo.
Relationship to license information of the UCSD.
:param license_info: The license_info of this IaasUcsdInfo.
:type: IaasLicenseInfoRef
"""
self._license_info = license_info
@property
def most_run_tasks(self):
"""
Gets the most_run_tasks of this IaasUcsdInfo.
Relationship to collection of MostRunTasks objects with cascade on delete of UcsdInfo object.
:return: The most_run_tasks | |
import collections
import itertools
from . import raw_ast, common, objects
def _astclass(name, fields):
# type is set to None for statements
return collections.namedtuple(name, ['location', 'type'] + fields)
StrConstant = _astclass('StrConstant', ['python_string'])
StrJoin = _astclass('StrJoin', ['parts']) # there are always >=2 parts
IntConstant = _astclass('IntConstant', ['python_int'])
GetVar = _astclass('GetVar', ['var'])
SetVar = _astclass('SetVar', ['var', 'value'])
GetAttr = _astclass('GetAttr', ['obj', 'attrname'])
SetAttr = _astclass('SetAttr', ['obj', 'attrname', 'value'])
GetFromModule = _astclass('GetFromModule', ['other_compilation', 'name'])
CreateFunction = _astclass('CreateFunction', ['argvars', 'body'])
CreateLocalVar = _astclass('CreateLocalVar', ['var'])
ExportObject = _astclass('ExportObject', ['name', 'value'])
CallFunction = _astclass('CallFunction', ['function', 'args'])
Return = _astclass('Return', ['value']) # value can be None
Throw = _astclass('Throw', ['value'])
IfStatement = _astclass('IfStatement', ['cond', 'if_body', 'else_body'])
IfExpression = _astclass('IfExpression', ['cond', 'true_expr', 'false_expr'])
Loop = _astclass('Loop', ['pre_cond', 'post_cond', 'incr', 'body'])
# each item of catches is an (errorvar, body) pair
TryCatch = _astclass('TryCatch', ['try_body', 'catches'])
TryFinally = _astclass('TryFinally', ['try_body', 'finally_body'])
New = _astclass('New', ['args'])
# used when creating classes
SetMethodsToClass = _astclass('SetMethodsToClass', ['klass', 'methods'])
Plus = _astclass('Plus', ['lhs', 'rhs'])
Minus = _astclass('Minus', ['lhs', 'rhs'])
PrefixMinus = _astclass('PrefixMinus', ['prefixed'])
Times = _astclass('Times', ['lhs', 'rhs'])
# Divide = _astclass('Divide', ['lhs', 'rhs'])
IntEqual = _astclass('IntEqual', ['lhs', 'rhs'])
StrEqual = _astclass('StrEqual', ['lhs', 'rhs'])
# equalities are wrapped in this for '!=' operator
BoolNegation = _astclass('BoolNegation', ['value'])
# this is a somewhat evil function
def _replace_generic_markers_with_object(node, markers):
node = node._replace(type=node.type.undo_generics(
dict.fromkeys(markers, objects.BUILTIN_TYPES['Object'])))
for name, value in node._asdict().items():
if name in ['location', 'type']:
continue
# FIXME: what if the value is a list
if not (isinstance(value, tuple) and
hasattr(value, 'location') and
hasattr(value, 'type')):
# it is not a cooked ast namedtuple
continue
node = node._replace(**{
name: _replace_generic_markers_with_object(value, markers)
})
return node
# FIXME: this is wrong? collections.ChainMap.__iter__ source code is:
#
# def __iter__(self):
# return iter(set().union(*self.maps))
def _create_chainmap(fallback_chainmap):
return collections.ChainMap(
collections.OrderedDict(), *fallback_chainmap.maps)
# note that there is code that uses copy.copy() with Variable objects
class Variable:
def __init__(self, name, tybe, definition_location, level):
self.name = name
self.type = tybe
self.definition_location = definition_location # can be None
self.level = level
def __repr__(self):
return '<%s %r: level=%d>' % (
type(self).__name__, self.name, self.level)
class GenericVariable(Variable):
def __init__(self, generic_markers, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generic_markers = generic_markers
def __repr__(self):
return "<%s '%s[%s]': level=%d>" % (
type(self).__name__,
self.name,
', '.join(marker.name for marker in self.generic_markers),
self.level)
BUILTIN_VARS = collections.OrderedDict([
(name, Variable(name, tybe, None, 0))
for name, tybe in objects.BUILTIN_VARS.items()
])
BUILTIN_GENERIC_VARS = collections.OrderedDict([
(name, GenericVariable(generic_types, name, tybe, None, 0))
for name, (tybe, generic_types) in objects.BUILTIN_GENERIC_VARS.items()
])
class _Chef:
def __init__(self, parent_chef, export_types,
is_function=False, returntype=None):
if is_function:
self.is_function = True
self.returntype = returntype
else:
assert returntype is None
self.is_function = False
self.returntype = None
self.parent_chef = parent_chef
if parent_chef is None:
self.level = 0
self.import_compilations = None
self.import_name_mapping = None
# these are ChainMaps to make any_chef.vars.maps[0] always work
self.vars = collections.ChainMap(BUILTIN_VARS)
self.types = collections.ChainMap(objects.BUILTIN_TYPES)
self.generic_vars = collections.ChainMap(BUILTIN_GENERIC_VARS)
self.generic_types = collections.ChainMap(
objects.BUILTIN_GENERIC_TYPES)
else:
# the level can be incremented immediately after creating a Chef
self.level = parent_chef.level
# keys are paths, values are Compilation objects
self.import_compilations = parent_chef.import_compilations
# keys are names from import statements, values are paths
if parent_chef.import_name_mapping is None:
self.import_name_mapping = {}
else:
self.import_name_mapping = parent_chef.import_name_mapping
self.vars = _create_chainmap(parent_chef.vars)
self.types = _create_chainmap(parent_chef.types)
self.generic_vars = _create_chainmap(parent_chef.generic_vars)
self.generic_types = _create_chainmap(parent_chef.generic_types)
# keys are strings, values are type objects
self.export_types = export_types
def _create_subchef(self):
return _Chef(self, self.export_types,
self.is_function, self.returntype)
# there are multiple different kind of names:
# * types
# * generic types (FIXME: doesn't seem to check for those?)
# * variables
# * generic variables
#
# all can come from any scope
# TODO: display definition location in error message
def _check_name_not_exist(self, name, location):
if name in self.types:
raise common.CompileError(
"there's already a '%s' type" % name, location)
if name in self.vars:
raise common.CompileError(
"there's already a '%s' variable" % name, location)
if name in self.generic_vars:
raise common.CompileError(
"there's already a generic '%s' variable" % name, location)
def _get_arguments_message(self, types):
if len(types) >= 2:
return "arguments of types (%s)" % ', '.join(t.name for t in types)
if len(types) == 1:
return "one argument of type %s" % types[0].name
assert not types
return "no arguments"
def _cook_arguments(self, raw_args, expected_types,
cannot_do_something, error_location):
args = [self.cook_expression(arg) for arg in raw_args]
actual_types = [arg.type for arg in args]
if actual_types != expected_types:
raise common.CompileError(
"%s with %s, because %s %s needed" % (
cannot_do_something,
self._get_arguments_message(actual_types),
self._get_arguments_message(expected_types),
'is' if len(expected_types) == 1 else 'are',
), error_location)
return args
def cook_function_call(self, raw_func_call: raw_ast.FuncCall):
function = self.cook_expression(raw_func_call.function)
if not isinstance(function.type, objects.FunctionType):
raise common.CompileError(
"expected a function, got %s" % function.type.name,
function.location)
args = self._cook_arguments(
raw_func_call.args, function.type.argtypes,
"cannot call " + function.type.name, raw_func_call.location)
return CallFunction(raw_func_call.location, function.type.returntype,
function, args)
def get_chef_for_varname(self, varname, is_generic, error_location):
chef = self
while chef is not None:
if is_generic:
if varname in chef.generic_vars.maps[0]:
return chef
else:
if varname in chef.vars.maps[0]:
return chef
chef = chef.parent_chef
if varname == 'this':
raise common.CompileError(
"'this' can be used only inside methods", error_location)
message = "variable not found: " + varname
if is_generic:
message = "generic " + message
raise common.CompileError(message, error_location)
def cook_expression(self, raw_expression):
if isinstance(raw_expression, raw_ast.String):
return StrConstant(raw_expression.location,
objects.BUILTIN_TYPES['Str'],
raw_expression.python_string)
if isinstance(raw_expression, raw_ast.Integer):
return IntConstant(raw_expression.location,
objects.BUILTIN_TYPES['Int'],
raw_expression.python_int)
if isinstance(raw_expression, raw_ast.GetAttr):
obj = self.cook_expression(raw_expression.obj)
try:
tybe = obj.type.attributes[raw_expression.attrname].tybe
except KeyError:
raise common.CompileError(
"%s objects have no '%s' attribute" % (
obj.type.name, raw_expression.attrname),
raw_expression.location)
return GetAttr(raw_expression.location, tybe,
obj, raw_expression.attrname)
if isinstance(raw_expression, raw_ast.FuncCall):
call = self.cook_function_call(raw_expression)
if call.function.type.returntype is None:
raise common.CompileError(
("functions of type %s don't return a value"
% call.function.type.name),
raw_expression.location)
return call
if isinstance(raw_expression, raw_ast.New):
tybe = self.cook_type(raw_expression.tybe)
if tybe.constructor_argtypes is None:
raise common.CompileError(
("cannot create {0} objects with 'new {0}(...)'"
.format(tybe.name)),
raw_expression.location)
args = self._cook_arguments(
raw_expression.args, tybe.constructor_argtypes,
"cannot do 'new %s(...)'" % tybe.name, raw_expression.location)
return New(raw_expression.location, tybe, args)
if isinstance(raw_expression, raw_ast.FuncDefinition):
return self.cook_function_definition(raw_expression)
if isinstance(raw_expression, raw_ast.GetVar):
if raw_expression.module_path is None:
chef = self.get_chef_for_varname(
raw_expression.varname,
(raw_expression.generics is not None),
raw_expression.location)
if raw_expression.generics is None:
var = chef.vars[raw_expression.varname]
tybe = var.type
else:
name = raw_expression.varname
var = chef.generic_vars[name]
tybe = objects.substitute_generics(
var.type, var.generic_markers,
list(map(self.cook_type, raw_expression.generics)),
raw_expression.location)
return GetVar(raw_expression.location, tybe, var)
assert raw_expression.generics is None, (
"sorry, import and generics don't work together yet")
compilation = self.import_compilations[raw_expression.module_path]
try:
tybe = compilation.export_types[raw_expression.varname]
except KeyError:
raise common.CompileError(
"\"%s\" doesn't export anything called '%s'",
common.path_string(raw_expression.module_path),
raw_expression.varname)
return GetFromModule(
raw_expression.location, tybe,
compilation, raw_expression.varname)
# from now on, 'this' is a variable
# it is actually a keyword to prevent doing confusing things
if isinstance(raw_expression, raw_ast.ThisExpression):
chef = self.get_chef_for_varname(
'this', False, raw_expression.location)
return GetVar(raw_expression.location, chef.vars['this'].type,
chef.vars['this'])
if isinstance(raw_expression, raw_ast.StrJoin):
return StrJoin(
raw_expression.location, objects.BUILTIN_TYPES['Str'],
list(map(self.cook_expression, raw_expression.parts)))
if isinstance(raw_expression, raw_ast.PrefixOperator):
assert raw_expression.operator == '-'
integer = self.cook_expression(raw_expression.expression)
if integer.type is not objects.BUILTIN_TYPES['Int']:
raise common.CompileError(
"expected -Int, got -%s" % integer.type.name,
raw_expression.location)
return PrefixMinus(
raw_expression.location, objects.BUILTIN_TYPES['Int'], integer)
# TODO: make this much less hard-coded
if isinstance(raw_expression, raw_ast.BinaryOperator):
lhs = self.cook_expression(raw_expression.lhs)
rhs = self.cook_expression(raw_expression.rhs)
if raw_expression.operator == '/':
# i want 3/2 to return 1.5 as a float or fraction object, but
# the only number type i have now is Int
raise common.CompileError(
"sorry, division is not supported yet :(",
raw_expression.location)
if raw_expression.operator == '!=':
fake_operator = '=='
else:
fake_operator = raw_expression.operator
# TODO: add == for at least Bool
try:
b = objects.BUILTIN_TYPES # pep8 line length
klass, tybe = {
(b['Int'], '+', b['Int']): (Plus, b['Int']),
(b['Int'], '-', b['Int']): (Minus, b['Int']),
(b['Int'], '*', b['Int']): (Times, b['Int']),
(b['Int'], '==', b['Int']): (IntEqual, b['Bool']),
(b['Str'], '==', b['Str']): (StrEqual, b['Bool']),
}[(lhs.type, fake_operator, rhs.type)]
except KeyError:
raise common.CompileError(
"wrong types: %s %s %s" % (
lhs.type.name, raw_expression.operator, rhs.type.name),
lhs.location + rhs.location)
result = klass(raw_expression.location, tybe, lhs, rhs)
if raw_expression.operator == '!=':
result = BoolNegation(
raw_expression.location, objects.BUILTIN_TYPES['Bool'],
result)
return result
if isinstance(raw_expression, raw_ast.IfExpression):
cond = self.cook_expression(raw_expression.cond)
if cond.type != objects.BUILTIN_TYPES['Bool']:
raise common.CompileError(
"expected Bool, got " + cond.type.name, cond.location)
true_expr = self.cook_expression(raw_expression.true_expr)
false_expr = self.cook_expression(raw_expression.false_expr)
if true_expr.type != false_expr.type:
raise common.CompileError(
"'then' value has type %s, but 'else' value has type %s"
% (true_expr.type.name, false_expr.type.name),
raw_expression.location)
return IfExpression(raw_expression.location, true_expr.type,
cond, true_expr, false_expr)
raise NotImplementedError( # pragma: no cover
"oh no: " + str(type(raw_expression)))
def cook_type(self, tybe):
if isinstance(tybe, raw_ast.GetType):
if tybe.generics is None:
if tybe.name in self.types:
return self.types[tybe.name]
it_is = "type"
else:
if tybe.name in self.generic_types:
return objects.substitute_generics(
self.generic_types[tybe.name],
self.generic_types[tybe.name].generic_types,
list(map(self.cook_type, tybe.generics)),
tybe.location)
it_is = "generic type"
raise common.CompileError(
| |
state as new_state:
new_state.constrain(expression == new_value)
# and set the PC of the new state to the concrete pc-dest
# (or other register or memory address to concrete)
setstate(new_state, new_value)
# enqueue new_state, assign new state id
new_state_id = self._put_state(new_state)
# maintain a list of children for logging purpose
children.append(new_state_id)
with self._lock:
self._busy_states.remove(state.id)
self._remove(state.id)
state._id = None
self._lock.notify_all()
self._publish("did_fork_state", new_state, expression, new_value, policy)
logger.debug("Forking current state %r into states %r", state.id, children)
@staticmethod
@deprecated("Use utils.log.set_verbosity instead.")
def verbosity(level):
""" Sets global vervosity level.
This will activate different logging profiles globally depending
on the provided numeric value
"""
set_verbosity(level)
# State storage
@Eventful.will_did("save_state")
def _save(self, state, state_id=None):
""" Store or update a state in secondary storage under state_id.
Use a fresh id is None is provided.
:param state: A manticore State
:param state_id: if not None force state_id (overwrite)
:type state_id: int or None
:returns: the state id used
"""
state._id = self._workspace.save_state(state, state_id=state_id)
return state.id
@Eventful.will_did("load_state")
def _load(self, state_id):
""" Load the state from the secondary storage
:param state_id: a estate id
:type state_id: int
:returns: the state id used
"""
if not hasattr(self, "stcache"):
self.stcache = weakref.WeakValueDictionary()
if state_id in self.stcache:
return self.stcache[state_id]
state = self._workspace.load_state(state_id, delete=False)
state._id = state_id
self.forward_events_from(state, True)
self.stcache[state_id] = state
return state
@Eventful.will_did("remove_state")
def _remove(self, state_id):
""" Remove a state from secondary storage
:param state_id: a estate id
:type state_id: int
"""
if not hasattr(self, "stcache"):
self.stcache = weakref.WeakValueDictionary()
if state_id in self.stcache:
del self.stcache[state_id]
self._workspace.rm_state(state_id)
# Internal support for state lists
def _put_state(self, state):
""" This enqueues the state for exploration.
Serialize and store the state with a fresh state_id. Then add it to
the shared READY states list
+-------+
State +----- >+ READY |
+-------+
"""
state_id = self._save(state, state_id=state.id)
with self._lock:
# Enqueue it in the ready state list for processing
self._ready_states.append(state_id)
self._lock.notify_all()
return state_id
def _get_state(self, wait=False):
""" Dequeue a state form the READY list and add it to the BUSY list """
with self._lock:
# If wait is true do the conditional wait for states
if wait:
# if not more states in the queue, let's wait for some forks
while not self._ready_states and not self._killed.value:
# if a shutdown has been requested then bail
if self.is_killed():
return None # Cancelled operation
# If there are no more READY states and no more BUSY states
# there is no chance we will get any new state so raise
if not self._busy_states:
return None # There are not states
# if there ares actually some workers ready, wait for state forks
logger.debug("Waiting for available states")
self._lock.wait()
if self._killed.value:
return None
# at this point we know there is at least one element
# and we have exclusive access
assert self._ready_states
# make the choice under exclusive access to the shared ready list
# state_id = self._policy.choice(list(self._ready_states)[0])
state_id = random.choice(list(self._ready_states))
# Move from READY to BUSY
self._ready_states.remove(state_id)
self._busy_states.append(state_id)
self._lock.notify_all()
return self._load(state_id)
@sync
def _revive_state(self, state_id):
""" Send a BUSY state back to READY list
+--------+ +------+
| READY +<-------+ BUSY |
+---+----+ +------+
"""
# Move from BUSY to READY
self._busy_states.remove(state_id)
self._ready_states.append(state_id)
self._lock.notify_all()
@sync
def _terminate_state(self, state_id, delete=False):
""" Send a BUSY state to the TERMINATED list or trash it if delete is True
+------+ +------------+
| BUSY +------->+ TERMINATED |
+---+--+ +------------+
|
v
###
###
"""
# wait for a state id to be added to the ready list and remove it
if state_id not in self._busy_states:
raise Exception("Can not terminate. State is not being analyzed")
self._busy_states.remove(state_id)
if delete:
self._remove(state_id)
else:
# add the state_id to the terminated list
self._terminated_states.append(state_id)
# wake up everyone waiting for a change in the state lists
self._lock.notify_all()
@sync
def _kill_state(self, state_id, delete=False):
""" Send a BUSY state to the KILLED list or trash it if delete is True
+------+ +--------+
| BUSY +------->+ KILLED |
+---+--+ +--------+
|
v
###
###
"""
# wait for a state id to be added to the ready list and remove it
if state_id not in self._busy_states:
raise Exception("Can not even kill it. State is not being analyzed")
self._busy_states.remove(state_id)
if delete:
self._remove(state_id)
else:
# add the state_id to the terminated list
self._killed_states.append(state_id)
# wake up everyone waiting for a change in the state lists
self._lock.notify_all()
@property
@sync
def ready_states(self):
"""
Iterator over ready states.
It supports state changes. State changes will be saved back at each iteration.
The state data change must be done in a loop, e.g. `for state in ready_states: ...`
as we re-save the state when the generator comes back to the function.
This means it is not possible to change the state used by Manticore with `states = list(m.ready_states)`.
"""
for state_id in self._ready_states:
state = self._load(state_id)
yield state
# Re-save the state in case the user changed its data
self._save(state, state_id=state_id)
@property
def running_states(self):
logger.warning(
"manticore.running_states is deprecated! (You probably want manticore.ready_states)"
)
return self.ready_states
@property
@sync
def terminated_states(self):
"""
Iterates over the terminated states.
See also `ready_states`.
"""
for state_id in self._terminated_states:
state = self._load(state_id)
yield state
# Re-save the state in case the user changed its data
self._save(state, state_id=state_id)
@property
@sync
@at_not_running
def killed_states(self):
"""
Iterates over the cancelled/killed states.
See also `ready_states`.
"""
for state_id in self._killed_states:
state = self._load(state_id)
yield state
# Re-save the state in case the user changed its data
self._save(state, state_id=state_id)
@property
@sync
@at_not_running
def _all_states(self):
""" Only allowed at not running.
(At running we can have states at busy)
"""
return (
tuple(self._ready_states) + tuple(self._terminated_states) + tuple(self._killed_states)
)
@property
@sync
def all_states(self):
"""
Iterates over the all states (ready and terminated and cancelled)
It holds a lock so no changes state lists are allowed
See also `ready_states`.
"""
for state_id in self._all_states:
state = self._load(state_id)
yield state
# Re-save the state in case the user changed its data
self._save(state, state_id=state_id)
@sync
def count_states(self):
""" Total states count """
return len(self._all_states)
@sync
def count_ready_states(self):
""" Ready states count """
return len(self._ready_states)
@sync
def count_busy_states(self):
""" Busy states count """
return len(self._busy_states)
@sync
def count_killed_states(self):
""" Cancelled states count """
return len(self._killed_states)
@sync
def count_terminated_states(self):
""" Terminated states count """
return len(self._terminated_states)
def generate_testcase(self, state, message="test", name="test"):
if message == "test" and hasattr(state, "_terminated_by") and state._terminated_by:
message = str(state._terminated_by)
testcase = self._output.testcase(prefix=name)
with testcase.open_stream("pkl", binary=True) as statef:
PickleSerializer().serialize(state, statef)
# Let the plugins generate a state based report
for p in self.plugins:
p.generate_testcase(state, testcase, message)
logger.info("Generated testcase No. %d - %s", testcase.num, message)
return testcase
@at_not_running
def register_plugin(self, plugin):
# Global enumeration of valid events
assert isinstance(plugin, Plugin)
assert plugin not in self.plugins, "Plugin instance already registered"
assert getattr(plugin, "manticore", None) is None, "Plugin instance already owned"
plugin.manticore = self
self.plugins.add(plugin)
events = Eventful.all_events()
prefix = Eventful.prefixes
all_events = [x + y for x, y in itertools.product(prefix, events)]
for event_name in all_events:
callback_name = f"{event_name}_callback"
callback = getattr(plugin, callback_name, None)
if callback is not None:
self.subscribe(event_name, callback)
# Safety checks
for callback_name in dir(plugin):
if callback_name.endswith("_callback"):
event_name = callback_name[:-9]
if event_name not in all_events:
logger.warning(
"There is no event named %s for callback on plugin %s",
event_name,
type(plugin).__name__,
)
for event_name in all_events:
for plugin_method_name in dir(plugin):
if event_name in plugin_method_name:
if not plugin_method_name.endswith("_callback"):
if (
plugin_method_name.startswith("on_")
or plugin_method_name.startswith("will_")
or plugin_method_name.startswith("did_")
):
logger.warning(
"Plugin methods named '%s()' should end with '_callback' on plugin %s",
plugin_method_name,
type(plugin).__name__,
)
if (
plugin_method_name.endswith("_callback")
and not plugin_method_name.startswith("on_")
and not plugin_method_name.startswith("will_")
and not plugin_method_name.startswith("did_")
):
logger.warning(
"Plugin methods named '%s()' should start with 'on_', 'will_' or 'did_' on plugin %s",
plugin_method_name,
type(plugin).__name__,
)
plugin.on_register()
@at_not_running
def unregister_plugin(self, plugin):
""" Removes a plugin from manticore.
No events should be sent to it after
"""
assert plugin in self.plugins, "Plugin instance not registered"
plugin.on_unregister()
self.plugins.remove(plugin)
plugin.manticore = None
def subscribe(self, name, callback):
""" Register a callback to an event"""
from types import MethodType
| |
<gh_stars>1-10
import gc
import os
import math
import random
import warnings
import albumentations as A
import cv2
import numpy as np
import pandas as pd
import timm
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as torchdata
from pathlib import Path
from typing import List
from albumentations.pytorch import ToTensorV2
from catalyst.core import Callback, CallbackOrder, IRunner
from catalyst.dl import Runner, SupervisedRunner
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
from sklearn import model_selection
from sklearn import metrics
from torch.optim.optimizer import Optimizer
from tqdm import tqdm
# =================================================
# Config #
# =================================================
class CFG:
######################
# Globals #
######################
seed = 1213
epochs = 55
train = True
oof = True
inference = True
folds = [0, 1, 2, 3, 4]
img_size = 480
main_metric = "epoch_score"
minimize_metric = False
######################
# Data #
######################
train_datadir = Path("data/Training_Set/Training")
test_datadir = Path("data/Evaluation_Set")
kaggledr_datadir = Path("data/KaggleDR/resized_train_cropped/resized_train_cropped")
train_csv = "data/Training_Set/Camera_annotated.csv"
test_csv = "data/Evaluate_camera_annotated.csv"
######################
# Dataset #
######################
target_columns = [
"Disease_Risk", "DR", "ARMD", "MH", "DN",
"MYA", "BRVO", "TSLN", "ERM", "LS", "MS",
"CSR", "ODC", "CRVO", "TV", "AH", "ODP",
"ODE", "ST", "AION", "PT", "RT", "RS", "CRS",
"EDN", "RPEC", "MHL", "RP", "OTHER"
]
######################
# Loaders #
######################
loader_params = {
"train": {
"batch_size": 64,
"num_workers": 10,
"shuffle": True
},
"valid": {
"batch_size": 64,
"num_workers": 10,
"shuffle": False
},
"test": {
"batch_size": 64,
"num_workers": 10,
"shuffle": False
}
}
######################
# Split #
######################
split = "MultilabelStratifiedKFold"
split_params = {
"n_splits": 5,
"shuffle": True,
"random_state": 1213
}
######################
# Model #
######################
base_model_name = "tf_efficientnet_b0_ns"
pooling = "GeM"
pretrained = True
num_classes = 29
######################
# Criterion #
######################
loss_name = "BCEFocalLoss"
loss_params: dict = {}
######################
# Optimizer #
######################
optimizer_name = "SAM"
base_optimizer = "Adam"
optimizer_params = {
"lr": 0.001
}
# For SAM optimizer
base_optimizer = "Adam"
######################
# Scheduler #
######################
scheduler_name = "CosineAnnealingLR"
scheduler_params = {
"T_max": 10
}
# =================================================
# Utilities #
# =================================================
def set_seed(seed=42):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_device() -> torch.device:
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
def init_logger(log_file='train.log'):
from logging import getLogger, INFO, FileHandler, Formatter, StreamHandler
logger = getLogger(__name__)
logger.setLevel(INFO)
handler1 = StreamHandler()
handler1.setFormatter(Formatter("%(message)s"))
handler2 = FileHandler(filename=log_file)
handler2.setFormatter(Formatter("%(message)s"))
logger.addHandler(handler1)
logger.addHandler(handler2)
return logger
def prepare_model_fore_inference(model, path: Path):
if not torch.cuda.is_available():
ckpt = torch.load(path, map_location="cpu")
else:
ckpt = torch.load(path)
model.load_state_dict(ckpt["model_state_dict"])
model.eval()
return model
# =================================================
# Split #
# =================================================
def get_split():
if hasattr(model_selection, CFG.split):
return model_selection.__getattribute__(CFG.split)(**CFG.scheduler_params)
else:
return MultilabelStratifiedKFold(**CFG.split_params)
# =================================================
# Dataset #
# =================================================
def crop_image_from_gray(image: np.ndarray, threshold: int = 7):
if image.ndim == 2:
mask = image > threshold
return image[np.ix_(mask.any(1), mask.any(0))]
elif image.ndim == 3:
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
mask = gray_image > threshold
check_shape = image[:, :, 0][np.ix_(mask.any(1), mask.any(0))].shape[0]
if (check_shape == 0):
return image
else:
image1 = image[:, :, 0][np.ix_(mask.any(1), mask.any(0))]
image2 = image[:, :, 1][np.ix_(mask.any(1), mask.any(0))]
image3 = image[:, :, 2][np.ix_(mask.any(1), mask.any(0))]
image = np.stack([image1, image2, image3], axis=-1)
return image
def center_crop(image: np.ndarray, ar: float):
h, w, _ = image.shape
new_h = int(ar * w)
start = (h - new_h) // 2
return image[start:start + new_h, :, :]
class TrainDataset(torchdata.Dataset):
def __init__(self, df: pd.DataFrame, datadir: Path, target_columns: list, transform=None,
center_crop=True):
self.df = df
self.filenames = df["ID"].values
self.datadir = datadir
self.target_columns = target_columns
self.labels = df[target_columns].values
self.camera = df["camera"].values
self.transform = transform
self.center_crop = center_crop
def __len__(self):
return len(self.filenames)
def __getitem__(self, index: int):
filename = self.filenames[index]
path = self.datadir / f"{filename}.png"
image = cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
camera = self.camera[index]
if self.center_crop:
image = crop_image_from_gray(image)
if camera != "C2":
image = center_crop(image, ar=0.834)
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
label = torch.tensor(self.labels[index]).float()
return {
"ID": filename,
"image": image,
"targets": label
}
class TestDataset(torchdata.Dataset):
def __init__(self, df: pd.DataFrame, datadir: Path, transform=None, center_crop=True):
self.df = df
self.filenames = df["ID"].values
self.camera = df["camera"].values
self.datadir = datadir
self.transform = transform
self.center_crop = center_crop
def __len__(self):
return len(self.filenames)
def __getitem__(self, index: int):
filename = self.filenames[index]
path = self.datadir / f"{filename}.png"
image = cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
camera = self.camera[index]
if self.center_crop:
image = crop_image_from_gray(image)
if camera != "C2":
image = center_crop(image, ar=0.834)
if self.transform:
augmented = self.transform(image=image)
image = augmented["image"]
return {
"ID": filename,
"image": image
}
# =================================================
# Transforms #
# =================================================
def get_transforms(img_size: int, mode="train"):
if mode == "train":
return A.Compose([
A.RandomResizedCrop(
height=img_size,
width=img_size,
scale=(0.9, 1.1),
ratio=(0.9, 1.1),
p=0.5),
A.ShiftScaleRotate(
shift_limit=0.1,
scale_limit=0.1,
rotate_limit=180,
border_mode=cv2.BORDER_CONSTANT,
value=0,
mask_value=0,
p=0.5),
A.RandomBrightnessContrast(
brightness_limit=0.1, contrast_limit=0.1, p=0.5),
A.HueSaturationValue(
hue_shift_limit=5,
sat_shift_limit=5,
val_shift_limit=5,
p=0.5),
A.Resize(img_size, img_size),
A.Normalize(
mean=[0.485, 0.456, 0.4406],
std=[0.229, 0.224, 0.225],
always_apply=True),
ToTensorV2()
])
elif mode == "valid":
return A.Compose([
A.Resize(img_size, img_size),
A.Normalize(
mean=[0.485, 0.456, 0.4406],
std=[0.229, 0.224, 0.225],
always_apply=True),
ToTensorV2()
])
else:
return A.Compose([
A.Resize(img_size, img_size),
A.Normalize(
mean=[0.485, 0.456, 0.4406],
std=[0.229, 0.224, 0.225],
always_apply=True),
ToTensorV2()
])
# =================================================
# Model #
# =================================================
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def gem(x: torch.Tensor, p=3, eps=1e-6):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1))).pow(1. / p)
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super().__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return gem(x, p=self.p, eps=self.eps)
def __repr__(self):
return self.__class__.__name__ + f"(p={self.p.data.tolist()[0]:.4f}, eps={self.eps})"
class TimmModel(nn.Module):
def __init__(self, base_model_name="tf_efficientnet_b0_ns", pooling="GeM", pretrained=True, num_classes=24):
super().__init__()
self.base_model = timm.create_model(base_model_name, pretrained=pretrained)
if hasattr(self.base_model, "fc"):
in_features = self.base_model.fc.in_features
self.base_model.fc = nn.Linear(in_features, num_classes)
elif hasattr(self.base_model, "classifier"):
in_features = self.base_model.classifier.in_features
self.base_model.classifier = nn.Linear(in_features, num_classes)
else:
raise NotImplementedError
if pooling == "GeM":
self.base_model.avg_pool = GeM()
self.init_layer()
def init_layer(self):
init_layer(self.base_model.classifier)
def forward(self, x):
return self.base_model(x)
# =================================================
# Optimizer and Scheduler #
# =================================================
version_higher = (torch.__version__ >= "1.5.0")
class AdaBelief(Optimizer):
r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
weight_decouple (boolean, optional): ( default: False) If set as True, then
the optimizer uses decoupled weight decay as in AdamW
fixed_decay (boolean, optional): (default: False) This is used when weight_decouple
is set as True.
When fixed_decay == True, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay$.
When fixed_decay == False, the weight decay is performed as
$W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the
weight decay ratio decreases with learning rate (lr).
rectify (boolean, optional): (default: False) If set as True, then perform the rectified
update similar to RAdam
reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients
NeurIPS 2020 Spotlight
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False, weight_decouple=False, fixed_decay=False, rectify=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdaBelief, self).__init__(params, defaults)
self.weight_decouple = weight_decouple
self.rectify = rectify
self.fixed_decay = fixed_decay
if self.weight_decouple:
print('Weight decoupling enabled in AdaBelief')
if self.fixed_decay:
print('Weight decay fixed')
if self.rectify:
print('Rectification enabled in AdaBelief')
if amsgrad:
print('AMS enabled in AdaBelief')
def __setstate__(self, state):
super(AdaBelief, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
amsgrad = group['amsgrad']
# State initialization
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_var'] = torch.zeros_like(
p.data,
memory_format=torch.preserve_format) if version_higher else torch.zeros_like(p.data)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
| |
or b_on_gpu:
gpu_sm = GpuSoftmaxWithBias()(gpu_from_host(x), gpu_from_host(b))
return [host_from_gpu(gpu_sm)]
return False
#### Convolution, maxpooling
from theano.tensor.nnet import conv
@register_opt()
@local_optimizer([])
def local_gpu_conv(node):
"""
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
conv(host_from_gpu) -> host_from_gpu(gpu_conv)
"""
def GpuConvOp_from_ConvOp(op):
logical_img_hw=None
if op.imshp_logical is not None:
logical_img_hw=op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
# this case is not implemented
return None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
#print op.kshp, op.imshp[1:3]
#print op.kshp_logical, logical_img_hw
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
verbose=op.verbose,
imshp=op.imshp,
)
#HACK to print the number of MFlops in the profiler output.
if hasattr(op,'flops'):
ret.flops=op.flops
return ret
if node.op == gpu_from_host:
#gpu_from_host(conv) -> gpu_conv(gpu_from_host)
host_input = node.inputs[0]
if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):
gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)
if gpu_conv is None:
return
img, kern = host_input.owner.inputs
#in some case the ConvOp broadcast the last 2 dimensions differently then the gpu ConvOp
return [tensor.patternbroadcast(gpu_conv(gpu_from_host(img), gpu_from_host(kern)),
node.outputs[0].broadcastable)]
if isinstance(node.op, conv.ConvOp):
#conv(host_from_gpu) -> host_from_gpu(gpu_conv)
img, kern = node.inputs
img_on_gpu = (img.owner and img.owner.op == host_from_gpu)
kern_on_gpu = (kern.owner and kern.owner.op == host_from_gpu)
if img_on_gpu or kern_on_gpu:
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
#in some case the ConvOp broadcast the last 2 dimensions differently then the gpu ConvOp
return [tensor.patternbroadcast(host_from_gpu(gpu_conv(gpu_from_host(img),
gpu_from_host(kern))),
node.outputs[0].broadcastable)]
import theano.tensor.signal.downsample as downsample
@register_opt()
@local_optimizer([])
def local_gpu_downsample_factor_max(node):
if isinstance(node.op, downsample.DownsampleFactorMax):
x, = node.inputs
if (x.owner and x.owner.op == host_from_gpu):
gpu_ds = GpuDownsampleFactorMax(node.op.ds, node.op.ignore_border)
return [host_from_gpu(gpu_ds(x.owner.inputs[0]))]
@register_opt()
@local_optimizer([])
def local_gpu_downsample_factor_max_grad(node):
if isinstance(node.op, downsample.DownsampleFactorMaxGrad):
x,z,gz = node.inputs
if (x.owner and x.owner.op == host_from_gpu):
gpu_ds_grad = GpuDownsampleFactorMaxGrad(node.op.ds, node.op.ignore_border)
return [host_from_gpu(gpu_ds_grad(x.owner.inputs[0], gpu_from_host(z), gpu_from_host(gz)))]
from theano.sandbox.cuda.basic_ops import gpu_join
@register_opt()
@local_optimizer([])
def local_gpu_join(node):
"""
Inspired by the opt for convop.
Very loose notation follows.
Subgraphs concerned first look like
[array of HostTensor] -> HostToGpu -> GpuToHost
-> Join -> HostToGpu -> GpuToHost
First we apply this Opt:
join(host_from_gpu) -> host_from_gpu(gpu_join)
then, as an intermediate result, there should be
host_from_gpu(gpu_join) -> HostToGpu -> GpuToHost
this unnecessary GpuToHost -> HostToGpu should be removed
by other opts, leaving us with
host_from_gpu(gpu_join)
For intermediate places in the graph not covered by the first opt, the following could be useful:
gpu_from_host(join) -> gpu_join(gpu_from_host)
not implemented yet.
"""
if isinstance(node.op, tensor.Join):
# optimizing this case:
# join(host_from_gpu) -> host_from_gpu(gpu_join)
# print "OPT: we've got a Join instance"
axis_and_tensors = node.inputs
#print "OPT: axis_and_tensors=", axis_and_tensors
matches = [(not t.owner is None and t.owner.op == host_from_gpu) or
isinstance(t, gof.Constant) for t in axis_and_tensors[1:]]
#print "OPT: matches =", matches
# if all input tensors are host_from_gpu'ified
if numpy.all(matches):
# the extra gpu_from_host introduced here will
# be removed by further optimizations
new_tensors = [gpu_from_host(t) for t in axis_and_tensors[1:]]
new_a_and_t = [axis_and_tensors[0]]+new_tensors
replacement_node = host_from_gpu(gpu_join(*new_a_and_t))
# print "OPT: replacement_node", replacement_node
return [replacement_node]
#Commented out because it can result in shared = dimshuffle(gemm_inplace(dimshuffle(shared)))
#which causes memory leaks (long term fix is to make the above not leak memory)
@local_optimizer([gpu_gemm_no_inplace])
def local_inplace_gemm(node):
if node.op == gpu_gemm_no_inplace:
return [gpu_gemm_inplace(*node.inputs)]
@local_optimizer([gpu_gemv_no_inplace])
def local_inplace_gemv(node):
if node.op == gpu_gemv_no_inplace:
return [gpu_gemv_inplace(*node.inputs)]
@local_optimizer([gpu_gemm_no_inplace])
def local_inplace_ger(node):
if node.op == gpu_ger_no_inplace:
return [gpu_ger_inplace(*node.inputs)]
# After destroyhandler is in but before we try to make elemwise things inplace
# Try to make gpu gemm inplace
# Also, need to make the gemm optimisation(step 70) happen before the fusion of
# elemwise(step 71)
optdb.register('InplaceGpuBlasOpt',
EquilibriumOptimizer([local_inplace_gemm,
local_inplace_gemv,
local_inplace_ger,
],
failure_callback=EquilibriumOptimizer.warn_inplace,
max_use_ratio=5),
70.0, 'fast_run', 'inplace', 'gpu')
def get_device_type_sizes():
"""
:return:(gpu ptr size, cpu ptr size, int sizes(gpu and cpu))
:return type: tuple
"""
if hasattr(get_device_type_sizes, 'rval'):
return get_device_type_sizes.rval
gpu_ptr_size = 8
cpu_ptr_size = 8
int_size = 8
try:
t = cuda_ndarray.cuda_ndarray.ptr_int_size()
gpu_ptr_size, cpu_ptr_size, int_size, gpu_int_size = t
assert int_size == gpu_int_size
del gpu_int_size
del t
except Exception, e:
_logger.warning(("Optimization Warning: "
"Got the following error, but we can ignore it. "
"This could cause less GpuElemwise fused together.\n"
"%s") % e)
rval = get_device_type_sizes.rval = locals()
return rval
def max_inputs_to_GpuElemwise(node):
"""
return the maximum number of inputs this GpuElemwise Apply node can
accept.
This is needed as currently there is a limit of 256 bytes of
parameter for the gpu function on devices with compute capability
1.x. There is a 4 kbyte limit on devices with compute capability
2.x (not used).
This measures the number of parameters we put in our GPU function and
computes the maximum number of inputs that respect the 256 byte
limit.
"""
type_sizes = get_device_type_sizes()
int_size = type_sizes['int_size']
gpu_ptr_size = type_sizes['gpu_ptr_size']
argument_limit = 232 # some bytes are used for block and thread coords etc.
ndim = node.inputs[0].type.ndim
size_param_mandatory = int_size #for numels
size_param_mandatory += int_size * ndim # for the shape
size_param_mandatory += sum((gpu_ptr_size + int_size * ndim)
for i in node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_inputs = (ndim * int_size) + gpu_ptr_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_inputs
# There is a case this don't algorithm doesn't work. Is this related to
# the order of the parameters to the gpu function?
if node.inputs[0].type.ndim == 1 and max_nb_inputs > 14:
return 14
return max_nb_inputs
def split_huge_add_or_mul(node):
"""
For add and mul, it can happen that we have too much input
That will make nvcc fail compilation of our current code.
We don't want node in the graph that can't execute
as this break DebugMode.
This should not happen for other GpuElemwise as their is only the fusion
that can generate op with too much input and it check for that.
"""
if node.op.scalar_op in (scal.add, scal.mul):
max_nb_inputs = max_inputs_to_GpuElemwise(node)
if max_nb_inputs<=1 and len(node.inputs)>1:
return False
while len(node.inputs)>max_nb_inputs:
inner_op = []
for i in xrange(0,len(node.inputs),max_nb_inputs):
inner_op.append(node.op(*node.inputs[i:i+max_nb_inputs]))
node = node.op(*inner_op).owner
return node
#GpuElemwise fusion
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
if config.gpu.local_elemwise_fusion:
_logger.debug("enabling optimization fusion of gpu elemwise in fast_run")
optdb.register('gpu_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion),
71.00, 'fast_run', 'fusion',
'local_elemwise_fusion','gpu')
else:
_logger.debug("not enabling optimization fusion of gpu elemwise in fast_run")
optdb.register('gpu_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion),
71.00, 'fusion', 'local_elemwise_fusion')
#GpuElemwise inplace
gpu_inplace_elemwise_optimizer = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpu_inplace_elemwise_opt', gpu_inplace_elemwise_optimizer, 75,
'fast_run', 'inplace','gpu_inplace', 'gpu')
@register_opt()
@local_optimizer([tensor.Alloc])
def local_gpualloc(node):
replace=False
if node.op == tensor.alloc:
if node.inputs[0].owner and node.inputs[0].owner.op==host_from_gpu:#if the input was on the gpu
replace = True
if all([c!='output' and c.op == gpu_from_host for c,idx in node.outputs[0].clients]):#if all clients are on gpu
replace=True
if all([c!='output' and c.op == tensor.join and all([i.owner and i.owner.op in [host_from_gpu,tensor.alloc] for i in c.inputs[1:]]) for c,idx in node.outputs[0].clients]):#if the client is a subtensor with input on gpu or alloc
replace=True
if replace:
val = node.inputs[0]
shp = node.inputs[1:]
old_out = node.outputs[0]
val2 = tensor.shape_padleft(val, len(shp) - val.ndim)
new_out = host_from_gpu(gpu_alloc(val2, *shp))
# Sigh. it's an annoying thing about theano
# that you can't add information to the graph.
# If for some reason it has come to light that
# one of the dimensions is broadcastable, we have to hide that
# or the optimization won't go through.
if new_out.type != old_out.type:
assert new_out.type.ndim == old_out.type.ndim
assert new_out.type.dtype == old_out.type.dtype
# it seems to have happened that new_out has some broadcastable
# dimensions that old_out did not have
for b_old,b_new in zip(old_out.type.broadcastable, new_out.type.broadcastable):
assert b_new or (not b_old)
new_out = tensor.patternbroadcast(new_out, old_out.broadcastable)
#if old_out.type != new_out.type:
#import pdb; pdb.set_trace()
return [new_out]
def safe_to_gpu(x):
if (isinstance(x.type, tensor.TensorType) and
x.type.dtype == 'float32'):
return gpu_from_host(x)
else:
return x
def safe_to_cpu(x):
if isinstance(x.type, CudaNdarrayType):
return host_from_gpu(x)
else:
return x
def gpu_safe_new(x, tag = ''):
"""
Internal function that constructs a new variable from x with the same
type, but with a different name ( old name + tag). This function is used
by gradient, or the R-op to construct new variables for the inputs of
the inner graph such that there is no interference between the original
graph and the newly constructed graph.
"""
if hasattr(x, 'name') and x.name is not None:
nw_name = x.name + tag
else:
nw_name = None
if isinstance(x, theano.Constant):
return x.clone()
nw_x = x.type()
nw_x.name = nw_name
return nw_x
def gpu_reconstruct_graph(inputs, outputs, tag = None):
"""
Different interface to clone, that allows you to pass inputs.
Compared to clone, this method always replaces the inputs with
new variables of the same type, and returns those ( in the same
order as the original inputs).
"""
if tag is None:
tag = ''
nw_inputs = [gpu_safe_new(x,tag) for x in inputs]
givens = {}
for nw_x, x in zip(nw_inputs, inputs):
givens[x] | |
def_num=61,
scale=1000,
units='m/s',
),
62: Field(
name='max_pos_vertical_speed',
type=BASE_TYPES[0x83], # sint16
def_num=62,
scale=1000,
units='m/s',
),
63: Field(
name='max_neg_vertical_speed',
type=BASE_TYPES[0x83], # sint16
def_num=63,
scale=1000,
units='m/s',
),
64: Field(
name='min_heart_rate',
type=BASE_TYPES[0x02], # uint8
def_num=64,
units='bpm',
),
65: Field(
name='time_in_hr_zone',
type=BASE_TYPES[0x86], # uint32
def_num=65,
scale=1000,
units='s',
),
66: Field(
name='time_in_speed_zone',
type=BASE_TYPES[0x86], # uint32
def_num=66,
scale=1000,
units='s',
),
67: Field(
name='time_in_cadence_zone',
type=BASE_TYPES[0x86], # uint32
def_num=67,
scale=1000,
units='s',
),
68: Field(
name='time_in_power_zone',
type=BASE_TYPES[0x86], # uint32
def_num=68,
scale=1000,
units='s',
),
69: Field(
name='avg_lap_time',
type=BASE_TYPES[0x86], # uint32
def_num=69,
scale=1000,
units='s',
),
70: Field(
name='best_lap_index',
type=BASE_TYPES[0x84], # uint16
def_num=70,
),
71: Field(
name='min_altitude',
type=BASE_TYPES[0x84], # uint16
def_num=71,
scale=5,
offset=500,
units='m',
components=(
ComponentField(
name='enhanced_min_altitude',
def_num=127,
scale=5,
offset=500,
units='m',
accumulate=False,
bits=16,
bit_offset=0,
),
),
),
82: Field(
name='player_score',
type=BASE_TYPES[0x84], # uint16
def_num=82,
),
83: Field(
name='opponent_score',
type=BASE_TYPES[0x84], # uint16
def_num=83,
),
84: Field(
name='opponent_name',
type=BASE_TYPES[0x07], # string
def_num=84,
),
85: Field( # stroke_type enum used as the index
name='stroke_count',
type=BASE_TYPES[0x84], # uint16
def_num=85,
units='counts',
),
86: Field( # zone number used as the index
name='zone_count',
type=BASE_TYPES[0x84], # uint16
def_num=86,
units='counts',
),
87: Field(
name='max_ball_speed',
type=BASE_TYPES[0x84], # uint16
def_num=87,
scale=100,
units='m/s',
),
88: Field(
name='avg_ball_speed',
type=BASE_TYPES[0x84], # uint16
def_num=88,
scale=100,
units='m/s',
),
89: Field(
name='avg_vertical_oscillation',
type=BASE_TYPES[0x84], # uint16
def_num=89,
scale=10,
units='mm',
),
90: Field(
name='avg_stance_time_percent',
type=BASE_TYPES[0x84], # uint16
def_num=90,
scale=100,
units='percent',
),
91: Field(
name='avg_stance_time',
type=BASE_TYPES[0x84], # uint16
def_num=91,
scale=10,
units='ms',
),
92: Field( # fractional part of the avg_cadence
name='avg_fractional_cadence',
type=BASE_TYPES[0x02], # uint8
def_num=92,
scale=128,
units='rpm',
),
93: Field( # fractional part of the max_cadence
name='max_fractional_cadence',
type=BASE_TYPES[0x02], # uint8
def_num=93,
scale=128,
units='rpm',
),
94: Field( # fractional part of the total_cycles
name='total_fractional_cycles',
type=BASE_TYPES[0x02], # uint8
def_num=94,
scale=128,
units='cycles',
),
95: Field( # Avg saturated and unsaturated hemoglobin
name='avg_total_hemoglobin_conc',
type=BASE_TYPES[0x84], # uint16
def_num=95,
scale=100,
units='g/dL',
),
96: Field( # Min saturated and unsaturated hemoglobin
name='min_total_hemoglobin_conc',
type=BASE_TYPES[0x84], # uint16
def_num=96,
scale=100,
units='g/dL',
),
97: Field( # Max saturated and unsaturated hemoglobin
name='max_total_hemoglobin_conc',
type=BASE_TYPES[0x84], # uint16
def_num=97,
scale=100,
units='g/dL',
),
98: Field( # Avg percentage of hemoglobin saturated with oxygen
name='avg_saturated_hemoglobin_percent',
type=BASE_TYPES[0x84], # uint16
def_num=98,
scale=10,
units='%',
),
99: Field( # Min percentage of hemoglobin saturated with oxygen
name='min_saturated_hemoglobin_percent',
type=BASE_TYPES[0x84], # uint16
def_num=99,
scale=10,
units='%',
),
100: Field( # Max percentage of hemoglobin saturated with oxygen
name='max_saturated_hemoglobin_percent',
type=BASE_TYPES[0x84], # uint16
def_num=100,
scale=10,
units='%',
),
101: Field(
name='avg_left_torque_effectiveness',
type=BASE_TYPES[0x02], # uint8
def_num=101,
scale=2,
units='percent',
),
102: Field(
name='avg_right_torque_effectiveness',
type=BASE_TYPES[0x02], # uint8
def_num=102,
scale=2,
units='percent',
),
103: Field(
name='avg_left_pedal_smoothness',
type=BASE_TYPES[0x02], # uint8
def_num=103,
scale=2,
units='percent',
),
104: Field(
name='avg_right_pedal_smoothness',
type=BASE_TYPES[0x02], # uint8
def_num=104,
scale=2,
units='percent',
),
105: Field(
name='avg_combined_pedal_smoothness',
type=BASE_TYPES[0x02], # uint8
def_num=105,
scale=2,
units='percent',
),
111: Field(
name='sport_index',
type=BASE_TYPES[0x02], # uint8
def_num=111,
),
112: Field( # Total time spend in the standing position
name='time_standing',
type=BASE_TYPES[0x86], # uint32
def_num=112,
scale=1000,
units='s',
),
113: Field( # Number of transitions to the standing state
name='stand_count',
type=BASE_TYPES[0x84], # uint16
def_num=113,
),
114: Field( # Average platform center offset Left
name='avg_left_pco',
type=BASE_TYPES[0x01], # sint8
def_num=114,
units='mm',
),
115: Field( # Average platform center offset Right
name='avg_right_pco',
type=BASE_TYPES[0x01], # sint8
def_num=115,
units='mm',
),
116: Field( # Average left power phase angles. Indexes defined by power_phase_type.
name='avg_left_power_phase',
type=BASE_TYPES[0x02], # uint8
def_num=116,
scale=0.7111111,
units='degrees',
),
117: Field( # Average left power phase peak angles. Data value indexes defined by power_phase_type.
name='avg_left_power_phase_peak',
type=BASE_TYPES[0x02], # uint8
def_num=117,
scale=0.7111111,
units='degrees',
),
118: Field( # Average right power phase angles. Data value indexes defined by power_phase_type.
name='avg_right_power_phase',
type=BASE_TYPES[0x02], # uint8
def_num=118,
scale=0.7111111,
units='degrees',
),
119: Field( # Average right power phase peak angles data value indexes defined by power_phase_type.
name='avg_right_power_phase_peak',
type=BASE_TYPES[0x02], # uint8
def_num=119,
scale=0.7111111,
units='degrees',
),
120: Field( # Average power by position. Data value indexes defined by rider_position_type.
name='avg_power_position',
type=BASE_TYPES[0x84], # uint16
def_num=120,
units='watts',
),
121: Field( # Maximum power by position. Data value indexes defined by rider_position_type.
name='max_power_position',
type=BASE_TYPES[0x84], # uint16
def_num=121,
units='watts',
),
122: Field( # Average cadence by position. Data value indexes defined by rider_position_type.
name='avg_cadence_position',
type=BASE_TYPES[0x02], # uint8
def_num=122,
units='rpm',
),
123: Field( # Maximum cadence by position. Data value indexes defined by rider_position_type.
name='max_cadence_position',
type=BASE_TYPES[0x02], # uint8
def_num=123,
units='rpm',
),
124: Field( # total_distance / total_timer_time
name='enhanced_avg_speed',
type=BASE_TYPES[0x86], # uint32
def_num=124,
scale=1000,
units='m/s',
),
125: Field(
name='enhanced_max_speed',
type=BASE_TYPES[0x86], # uint32
def_num=125,
scale=1000,
units='m/s',
),
126: Field(
name='enhanced_avg_altitude',
type=BASE_TYPES[0x86], # uint32
def_num=126,
scale=5,
offset=500,
units='m',
),
127: Field(
name='enhanced_min_altitude',
type=BASE_TYPES[0x86], # uint32
def_num=127,
scale=5,
offset=500,
units='m',
),
128: Field(
name='enhanced_max_altitude',
type=BASE_TYPES[0x86], # uint32
def_num=128,
scale=5,
offset=500,
units='m',
),
129: Field( # lev average motor power during session
name='avg_lev_motor_power',
type=BASE_TYPES[0x84], # uint16
def_num=129,
units='watts',
),
130: Field( # lev maximum motor power during session
name='max_lev_motor_power',
type=BASE_TYPES[0x84], # uint16
def_num=130,
units='watts',
),
131: Field( # lev battery consumption during session
name='lev_battery_consumption',
type=BASE_TYPES[0x02], # uint8
def_num=131,
scale=2,
units='percent',
),
132: Field(
name='avg_vertical_ratio',
type=BASE_TYPES[0x84], # uint16
def_num=132,
scale=100,
units='percent',
),
133: Field(
name='avg_stance_time_balance',
type=BASE_TYPES[0x84], # uint16
def_num=133,
scale=100,
units='percent',
),
134: Field(
name='avg_step_length',
type=BASE_TYPES[0x84], # uint16
def_num=134,
scale=10,
units='mm',
),
137: Field(
name='total_anaerobic_training_effect',
type=BASE_TYPES[0x02], # uint8
def_num=137,
scale=10,
),
139: Field(
name='avg_vam',
type=BASE_TYPES[0x84], # uint16
def_num=139,
scale=1000,
units='m/s',
),
253: FIELD_TYPE_TIMESTAMP, # Sesson end time.
254: Field( # Selected bit is set for the current session.
name='message_index',
type=FIELD_TYPES['message_index'],
def_num=254,
),
},
),
19: MessageType(
name='lap',
mesg_num=19,
fields={
0: Field(
name='event',
type=FIELD_TYPES['event'],
def_num=0,
),
1: Field(
name='event_type',
type=FIELD_TYPES['event_type'],
def_num=1,
),
2: Field(
name='start_time',
type=FIELD_TYPES['date_time'],
def_num=2,
),
3: Field(
name='start_position_lat',
type=BASE_TYPES[0x85], # sint32
def_num=3,
units='semicircles',
),
4: Field(
name='start_position_long',
type=BASE_TYPES[0x85], # sint32
def_num=4,
units='semicircles',
),
5: Field(
name='end_position_lat',
type=BASE_TYPES[0x85], # sint32
def_num=5,
units='semicircles',
),
6: Field(
name='end_position_long',
type=BASE_TYPES[0x85], # sint32
def_num=6,
units='semicircles',
),
7: Field( # Time (includes pauses)
name='total_elapsed_time',
type=BASE_TYPES[0x86], # uint32
def_num=7,
scale=1000,
units='s',
),
8: Field( # Timer Time (excludes pauses)
name='total_timer_time',
type=BASE_TYPES[0x86], # uint32
def_num=8,
scale=1000,
units='s',
),
9: Field(
name='total_distance',
type=BASE_TYPES[0x86], # uint32
def_num=9,
scale=100,
units='m',
),
10: Field(
name='total_cycles',
type=BASE_TYPES[0x86], # uint32
def_num=10,
units='cycles',
subfields=(
SubField(
name='total_strides',
def_num=10,
type=BASE_TYPES[0x86], # uint32
units='strides',
ref_fields=(
ReferenceField(
name='sport',
def_num=25,
value='running',
raw_value=1,
),
ReferenceField(
name='sport',
def_num=25,
value='walking',
raw_value=11,
),
),
),
),
),
11: Field(
name='total_calories',
type=BASE_TYPES[0x84], # uint16
def_num=11,
units='kcal',
),
12: Field( # If New Leaf
name='total_fat_calories',
type=BASE_TYPES[0x84], # uint16
def_num=12,
units='kcal',
),
13: Field(
name='avg_speed',
type=BASE_TYPES[0x84], # uint16
def_num=13,
scale=1000,
units='m/s',
components=(
ComponentField(
name='enhanced_avg_speed',
def_num=110,
scale=1000,
units='m/s',
accumulate=False,
bits=16,
bit_offset=0,
),
),
),
14: Field(
name='max_speed',
type=BASE_TYPES[0x84], # uint16
def_num=14,
scale=1000,
units='m/s',
components=(
ComponentField(
name='enhanced_max_speed',
def_num=111,
scale=1000,
units='m/s',
accumulate=False,
bits=16,
bit_offset=0,
),
),
),
15: Field(
name='avg_heart_rate',
type=BASE_TYPES[0x02], # uint8
def_num=15,
units='bpm',
),
16: Field(
name='max_heart_rate',
type=BASE_TYPES[0x02], # uint8
def_num=16,
units='bpm',
),
17: Field( # total_cycles / total_timer_time if non_zero_avg_cadence otherwise total_cycles / total_elapsed_time
name='avg_cadence',
type=BASE_TYPES[0x02], # uint8
def_num=17,
units='rpm',
subfields=(
SubField(
name='avg_running_cadence',
def_num=17,
type=BASE_TYPES[0x02], # uint8
units='strides/min',
ref_fields=(
ReferenceField(
name='sport',
def_num=25,
value='running',
raw_value=1,
),
),
),
),
),
18: Field(
name='max_cadence',
type=BASE_TYPES[0x02], # uint8
def_num=18,
units='rpm',
subfields=(
SubField(
name='max_running_cadence',
def_num=18,
type=BASE_TYPES[0x02], # uint8
units='strides/min',
ref_fields=(
ReferenceField(
name='sport',
def_num=25,
value='running',
raw_value=1,
),
),
),
),
),
19: Field( # total_power / total_timer_time if non_zero_avg_power otherwise total_power / total_elapsed_time
name='avg_power',
type=BASE_TYPES[0x84], # uint16
def_num=19,
units='watts',
),
20: Field(
name='max_power',
type=BASE_TYPES[0x84], # uint16
def_num=20,
units='watts',
),
21: Field(
name='total_ascent',
type=BASE_TYPES[0x84], # uint16
def_num=21,
units='m',
),
22: Field(
name='total_descent',
type=BASE_TYPES[0x84], # uint16
def_num=22,
units='m',
),
23: Field(
name='intensity',
type=FIELD_TYPES['intensity'],
def_num=23,
),
24: Field(
name='lap_trigger',
type=FIELD_TYPES['lap_trigger'],
def_num=24,
),
25: Field(
name='sport',
type=FIELD_TYPES['sport'],
def_num=25,
),
26: Field(
name='event_group',
type=BASE_TYPES[0x02], # uint8
def_num=26,
),
32: Field( # # of lengths of swim pool
name='num_lengths',
type=BASE_TYPES[0x84], # uint16
def_num=32,
units='lengths',
),
33: Field(
name='normalized_power',
type=BASE_TYPES[0x84], # uint16
def_num=33,
units='watts',
),
34: Field(
name='left_right_balance',
type=FIELD_TYPES['left_right_balance_100'],
def_num=34,
),
35: Field(
name='first_length_index',
type=BASE_TYPES[0x84], # uint16
def_num=35,
),
37: Field(
name='avg_stroke_distance',
type=BASE_TYPES[0x84], # uint16
def_num=37,
scale=100,
units='m',
),
38: Field(
name='swim_stroke',
type=FIELD_TYPES['swim_stroke'],
def_num=38,
),
39: Field(
name='sub_sport',
type=FIELD_TYPES['sub_sport'],
def_num=39,
),
40: Field( # # of active lengths of swim pool
name='num_active_lengths',
type=BASE_TYPES[0x84], # uint16
def_num=40,
units='lengths',
),
41: Field(
name='total_work',
type=BASE_TYPES[0x86], # uint32
def_num=41,
units='J',
),
42: Field(
name='avg_altitude',
type=BASE_TYPES[0x84], # uint16
def_num=42,
| |
import collections
import datetime
import decimal
import json
import time
import unittest
import uuid
from unittest import TestCase
from unittest.util import safe_repr
import dateutil.parser
import django.test
import django.urls
import django.utils.timezone
import mock
import rest_framework.test
# noinspection PyUnresolvedReferences
from behave import * # noqa
from behave import when, step
import api.tests.factories
import jetstream.tests.tas_api_mock_utils
@step('we create a new user')
def create_new_user(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
user = api.tests.factories.UserFactory.create(
username=context.persona['username'],
is_staff=False,
is_superuser=False
)
user.set_password(context.persona['password'])
user.save()
context.persona['user'] = user
@step('we create a new admin user')
def create_new_admin_user(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
user = api.tests.factories.UserFactory.create(
username=context.persona['username'], is_staff=True, is_superuser=True
)
user.set_password(context.persona['password'])
user.save()
context.persona['user'] = user
@step('I log in')
def i_log_in(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
client = rest_framework.test.APIClient()
context.persona['client'] = client
with django.test.modify_settings(
AUTHENTICATION_BACKENDS={
'prepend': 'django.contrib.auth.backends.ModelBackend',
'remove': ['django_cyverse_auth.authBackends.MockLoginBackend']
}
):
login_result = client.login(
username=context.persona['username'],
password=context.persona['password']
)
context.test.assertTrue(login_result)
@step(
'I log in with valid XSEDE project required and default quota plugin enabled'
)
def i_log_in_with_valid_xsede_project_required_and_default_quota_plugin_enabled(
context
):
"""
:type context: behave.runner.Context
"""
assert context.persona
client = rest_framework.test.APIClient()
context.persona['client'] = client
with django.test.override_settings(
AUTHENTICATION_BACKENDS=[
'django_cyverse_auth.authBackends.MockLoginBackend'
],
ALWAYS_AUTH_USER=context.persona['username'],
DEFAULT_QUOTA_PLUGINS=[
'jetstream.plugins.quota.default_quota.JetstreamSpecialAllocationQuota'
],
):
with django.test.modify_settings(
VALIDATION_PLUGINS={
'prepend':
'jetstream.plugins.auth.validation.XsedeProjectRequired',
'remove':
'atmosphere.plugins.auth.validation.AlwaysAllow'
}
):
login_result = client.login(
username=context.persona['username'],
password=context.<PASSWORD>['password']
)
context.persona['login_result'] = login_result
context.test.assertTrue(login_result)
if 'user' not in context.persona:
import core.models
context.persona[
'user'
] = core.models.AtmosphereUser.objects.get_by_natural_key(
context.persona['username']
)
assert context.persona['user'].username == context.persona[
'username']
@step('I try to log in with valid XSEDE project required')
def i_log_in_with_valid_xsede_project_required(context):
"""
:type context: behave.runner.Context
"""
assert context.persona
client = rest_framework.test.APIClient()
context.persona['client'] = client
is_tas_up = True
if hasattr(context.scenario, '_row') and context.scenario._row:
example = dict(
zip(context.scenario._row.headings, context.scenario._row.cells)
)
is_tas_up = example.get('is_tas_up', 'Yes') == 'Yes'
with django.test.override_settings(
AUTHENTICATION_BACKENDS=[
'django_cyverse_auth.authBackends.MockLoginBackend'
],
ALWAYS_AUTH_USER=context.persona['username'],
DEFAULT_QUOTA_PLUGINS=[
'jetstream.plugins.quota.default_quota.JetstreamSpecialAllocationQuota'
],
VALIDATION_PLUGINS=[
'jetstream.plugins.auth.validation.XsedeProjectRequired'
]
):
with mock.patch.multiple(
'jetstream.allocation',
tacc_api_post=mock.DEFAULT,
tacc_api_get=mock.DEFAULT,
) as mock_methods:
mock_methods[
'tacc_api_post'
].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_post(
context, is_tas_up
)
mock_methods[
'tacc_api_get'
].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_get(
context, is_tas_up
)
login_result = client.login(
username=context.persona['username'],
password=context.persona['password']
)
context.persona['login_result'] = login_result
@step('the login attempt should fail')
def login_should_fail(context):
context.test.assertFalse(context.persona['login_result'])
context.persona.pop('user', None)
@step('the login attempt should succeed')
def login_should_succeed(context):
context.test.assertTrue(context.persona['login_result'])
import core.models
context.persona['user'
] = core.models.AtmosphereUser.objects.get_by_natural_key(
context.persona['username']
)
assert context.persona['user'].username == context.persona['username']
@step('I get my allocation sources from the API I should see')
def get_allocation_sources_from_api(context):
assert context.persona
client = context.persona['client']
response = client.get('/api/v2/allocation_sources')
context.persona['response'] = response
context.test.assertEqual(response.status_code, 200)
api_allocation_sources = []
for raw_result in response.data['results']:
api_result = {}
for heading in context.table.headings:
raw_value = raw_result[heading]
cleaned_value = raw_value
if isinstance(raw_value, datetime.datetime):
rounded_datetime = raw_value.replace(microsecond=0)
formatted_datetime = datetime.datetime.strftime(
rounded_datetime, u'%Y-%m-%d %H:%M:%S%z'
)
cleaned_value = formatted_datetime
if heading == 'start_date':
# a datetime formatted as a string
parsed_datetime = dateutil.parser.parse(raw_value)
rounded_datetime = parsed_datetime.replace(microsecond=0)
formatted_datetime = datetime.datetime.strftime(
rounded_datetime, u'%Y-%m-%d %H:%M:%S%z'
)
cleaned_value = formatted_datetime
api_result[heading] = cleaned_value
api_allocation_sources.append(api_result)
raw_expected_allocation_sources = [
dict(zip(row.headings, row.cells)) for row in context.table
]
expected_allocation_sources = []
transform_map = {
'name': unicode,
'compute_allowed': int,
'start_date': str,
'end_date': unicode,
'compute_used': decimal.Decimal,
'global_burn_rate': decimal.Decimal,
'updated': str,
'renewal_strategy': unicode,
'user_compute_used': decimal.Decimal,
'user_burn_rate': decimal.Decimal,
'user_snapshot_updated': str
}
for raw_row in raw_expected_allocation_sources:
clean_row = {}
for key, value in raw_row.iteritems():
transform = transform_map[key]
clean_row[key] = None if value == 'None' else transform(value)
expected_allocation_sources.append(clean_row)
context.test.maxDiff = None
context.test.assertItemsEqual(
expected_allocation_sources, api_allocation_sources
)
# For debugging, use `assertListEqual` below if `assertItemsEqual` above is not clear
# context.test.assertListEqual(expected_allocation_sources, api_allocation_sources)
@step('we create an allocation source through the API')
def we_create_allocation_source_through_api(context):
assert context.persona
client = context.persona['client']
for row in context.table:
response = client.post(
'/api/v2/allocation_sources', {
'renewal_strategy': row['renewal_strategy'],
'name': row['name'],
'compute_allowed': row['compute_allowed']
}
)
if 'uuid' in response.data and response.data['uuid']:
allocation_source_ids = context.persona.get(
'allocation_source_ids', {}
)
allocation_source_ids[row['name']] = response.data['uuid']
context.persona['allocation_source_ids'] = allocation_source_ids
@step(
'we assign allocation source "{allocation_source_name}" to user "{username}" via the API'
)
def assign_allocation_source_to_user_via_api(
context, allocation_source_name, username
):
assert context.persona
client = context.persona['client']
allocation_source_id = context.persona['allocation_source_ids'][
allocation_source_name]
context.persona['response'] = client.post(
'/api/v2/user_allocation_sources', {
'username': username,
'source_id': allocation_source_id
}
)
@step('we create a provider "{provider_location}"')
def set_up_provider(context, provider_location):
assert context.persona
import core.models
provider = api.tests.factories.ProviderFactory.create(
location=provider_location, public=True, type__name='mock'
)
core.models.ProviderCredential.objects.get_or_create(
provider=provider, key='auth_url', value='https://localhost/'
)
core.models.ProviderCredential.objects.get_or_create(
provider=provider, key='project_name', value='some_project'
)
core.models.ProviderCredential.objects.get_or_create(
provider=provider, key='region_name', value='some_region'
)
core.models.ProviderCredential.objects.get_or_create(
provider=provider, key='admin_url', value='https://localhost/'
)
context.persona['provider'] = provider
@step(
'we create an account for the current persona on provider "{provider_location}"'
)
def create_jetstream_account(context, provider_location):
"""This does not use the factory
We want to test the default quota plugin.
NOTE: At the moment this step only works with Jetstream and TAS API
"""
assert context.persona
import core.models
context.test.assertIn(
'jetstream', django.conf.settings.INSTALLED_APPS,
'Step only works with Jetstream setup. '
'Please use "we create an identity for the current persona ..."'
)
provider = core.models.Provider.objects.get(location=provider_location)
user = context.persona['user']
with mock.patch(
'service.driver.get_account_driver', autospec=True
) as mock_get_account_driver:
mock_account_driver = mock.MagicMock(provider)
def mock_create_account_method(
username,
password=<PASSWORD>,
project_name=None,
role_name=None,
quota=None,
max_quota=False
):
factory_identity = api.tests.factories.IdentityFactory.create_identity(
created_by=user, provider=provider, quota=quota
)
return factory_identity
mock_account_driver.create_account = mock.MagicMock(
side_effect=mock_create_account_method
)
mock_get_account_driver.return_value = mock_account_driver
with mock.patch.multiple(
'jetstream.allocation',
tacc_api_post=mock.DEFAULT,
tacc_api_get=mock.DEFAULT,
) as mock_methods:
mock_methods[
'tacc_api_post'
].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_post(
context
)
mock_methods[
'tacc_api_get'
].side_effect = jetstream.tests.tas_api_mock_utils._make_mock_tacc_api_get(
context
)
with django.test.override_settings(
DEFAULT_QUOTA_PLUGINS=[
'jetstream.plugins.quota.default_quota.JetstreamSpecialAllocationQuota'
]
):
import core.plugins
core.plugins.DefaultQuotaPluginManager.list_of_classes = getattr(
django.conf.settings, 'DEFAULT_QUOTA_PLUGINS', []
)
new_identity = core.models.user.create_new_account_for(
provider, user
)
context.persona['user_identity'] = new_identity
@step(
'we create an identity for the current persona on provider "{provider_location}"'
)
def create_identity(context, provider_location):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
user_identity = api.tests.factories.IdentityFactory.create_identity(
created_by=context.persona['user'], provider=provider
)
context.persona['user_identity'] = user_identity
@step('I should have the following quota on provider "{}"')
def should_have_quota_on_provider(context, provider_location):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
username = context.persona['username']
user_identity = core.models.Identity.objects.get(
created_by__username=username, provider=provider
)
expected_quota = dict([(row[0], int(row[1])) for row in context.table.rows])
quota_keys = expected_quota.keys()
actual_quota = user_identity.quota
actual_quota_dict = dict(
[(key, getattr(actual_quota, key)) for key in quota_keys]
)
context.test.assertDictEqual(expected_quota, actual_quota_dict)
@step(
'we make the current identity the admin on provider "{provider_location}"'
)
def we_make_the_current_identity_the_admin_on_provider(
context, provider_location
):
assert context.persona
import core.models
provider = core.models.Provider.objects.get(location=provider_location)
user_identity = context.persona['user_identity']
core.models.AccountProvider.objects.get_or_create(
provider=provider, identity=user_identity
)
core.models.Identity.update_credential(
user_identity, 'key', 'admin', replace=True
)
core.models.Identity.update_credential(
user_identity, 'secret', 'adminsecret', replace=True
)
core.models.Identity.update_credential(
user_identity, 'secret', 'adminsecret', replace=True
)
@step('we create a provider machine for current persona')
def create_provider_machine(context):
assert context.persona
user_identity = context.persona['user_identity']
user = context.persona['user']
provider_machine = api.tests.factories.ProviderMachineFactory.create_provider_machine(
user, user_identity
)
context.persona['provider_machine'] = provider_machine
@step('I get the projects via the API')
def get_projects_api(context):
assert context.persona
client = context.persona['client']
url = '/api/v2/projects'
response = client.get(url)
context.persona['response'] = response
@step('I create a project called "{project_name}" via the API')
def create_project_api(context, project_name):
assert context.persona
client = context.persona['client']
user = context.persona['user']
from core.models import AtmosphereUser
assert isinstance(user, AtmosphereUser)
owner_group_name = user.username
url = '/api/v2/projects'
response = client.post(
url, {
'name': project_name,
'description': project_name,
'owner': owner_group_name
}
)
context.persona['response'] = response
@when(
'I create a volume with name "{volume_name}" and size {volume_size:d} using API'
)
def create_volume_api(context, volume_name, volume_size):
assert context.persona
client = context.persona['client']
user_identity = context.persona['user_identity']
import core.models
context.test.assertIsInstance(user_identity, core.models.Identity)
context.test.assertIsInstance(user_identity.provider, core.models.Provider)
url = '/api/v1/provider/{}/identity/{}/volume'.format(
user_identity.provider.uuid, user_identity.uuid
)
post_data = {'name': volume_name, 'size': volume_size}
with mock.patch(
'service.volume.check_over_storage_quota', autospec=True
) as mock_check_over_storage_quota:
mock_check_over_storage_quota.return_value = True
response = client.post(url, post_data, format='json')
context.persona['response'] = response
@step('I get the volumes via the API')
def get_volumes_api(context):
assert context.persona
client = context.persona['client']
user_identity = context.persona['user_identity']
import core.models
context.test.assertIsInstance(user_identity, core.models.Identity)
context.test.assertIsInstance(user_identity.provider, core.models.Provider)
url = '/api/v1/provider/{}/identity/{}/volume'.format(
user_identity.provider.uuid, user_identity.uuid
)
with mock.patch(
'service.volume.check_over_storage_quota', autospec=True
) as mock_check_over_storage_quota:
mock_check_over_storage_quota.return_value = True
response = client.get(url)
context.persona['response'] = response
@step('we get the project volumes via the API')
def get_project_volumes_api(context):
assert context.persona
client = context.persona['client']
user_identity = context.persona['user_identity']
project_volume_id = context.persona['project_volume_id']
import core.models
context.test.assertIsInstance(user_identity, core.models.Identity)
context.test.assertIsInstance(user_identity.provider, core.models.Provider)
url = '/api/v2/project_volumes/{}'.format(project_volume_id)
response = client.get(url)
context.persona['response'] = response
@when(
'I associate volume "{volume_id_var}" with project "{project_id_var}" via the API'
)
def associate_volume_with_project(context, volume_id_var, project_id_var):
assert context.persona
client = context.persona['client']
user_identity = context.persona['user_identity']
import core.models
context.test.assertIsInstance(user_identity, core.models.Identity)
context.test.assertIsInstance(user_identity.provider, core.models.Provider)
url = '/api/v2/project_volumes'
volume_id = context.persona[volume_id_var]
project_id = context.persona[project_id_var]
post_data = {'project': project_id, 'volume': volume_id}
response = client.post(url, post_data, format='json')
context.persona['response'] = response
@step('we create an active instance')
def create_active_instance(context):
assert context.persona
user = context.persona['user']
user_identity = context.persona['user_identity']
provider_machine = context.persona['provider_machine']
import core.models
context.test.assertIsInstance(provider_machine, core.models.ProviderMachine)
provider = provider_machine.provider
active_instance = api.tests.factories.InstanceFactory.create(
name='Instance in active',
provider_alias=uuid.uuid4(),
source=provider_machine.instance_source,
created_by=user,
created_by_identity=user_identity,
start_date=django.utils.timezone.now()
)
active_status = api.tests.factories.InstanceStatusFactory.create(
name='active'
)
single_cpu_size = api.tests.factories.SizeFactory.create(
name='single_cpu_size',
provider=provider,
cpu=1,
disk=100,
root=10,
mem=4096
)
api.tests.factories.InstanceHistoryFactory.create(
status=active_status,
activity='',
instance=active_instance,
size=single_cpu_size
)
context.persona['active_instance'] = active_instance
@step('I set "{key}" to attribute "{attribute}" of "{persona_var}"')
@step('I set "{key}" to another variable "{persona_var}"')
def set_key_to_persona_var_and_attribute(
context, key, persona_var, attribute=None
):
assert context.persona is not None, u'no persona is setup'
if attribute:
context.persona[key] = getattr(context.persona[persona_var], attribute)
else:
context.persona[key] = context.persona[persona_var]
@step(u'I set "{key}" to key "{other_key}" of "{persona_var}"')
def set_key_to_key_of_persona_var(context, key, persona_var, other_key):
assert context.persona
context.test.assertIn(persona_var, context.persona)
context.test.assertIsInstance(context.persona[persona_var], dict)
context.test.assertIn(other_key, | |
ax.grid(True, axis="x")
if y_grid:
ax.grid(True, axis="y")
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO if possible, I would like to move the contour
# intensity information into the legend too and label the
# iso proportions rather than the raw density values
artist_kws = {}
artist = partial(mpl.patches.Patch)
self._add_legend(
ax, artist, True, False, "layer", 1, artist_kws, {},
)
def plot_univariate_density(
self,
multiple,
common_norm,
common_grid,
fill,
legend,
estimate_kws,
plot_kws,
ax,
):
# Preprocess the matplotlib keyword dictionaries
if fill:
artist = mpl.collections.PolyCollection
else:
artist = mpl.lines.Line2D
plot_kws = _normalize_kwargs(plot_kws, artist)
# Input checking
_check_argument("multiple", ["layer", "stack", "fill"], multiple)
# Check for log scaling on the data axis
data_axis = getattr(ax, f"{self.data_variable}axis")
log_scale = data_axis.get_scale() == "log"
# Always share the evaluation grid when stacking
if "hue" in self.variables and multiple in ("stack", "fill"):
common_grid = True
# Do the computation
densities = self._compute_univariate_density(
self.data_variable,
common_norm,
common_grid,
estimate_kws,
log_scale
)
# Note: raises when no hue and multiple != layer. A problem?
densities, baselines = self._resolve_multiple(densities, multiple)
# Control the interaction with autoscaling by defining sticky_edges
# i.e. we don't want autoscale margins below the density curve
sticky_density = (0, 1) if multiple == "fill" else (0, np.inf)
if multiple == "fill":
# Filled plots should not have any margins
sticky_support = densities.index.min(), densities.index.max()
else:
sticky_support = []
# Handle default visual attributes
if "hue" not in self.variables:
if fill:
if self.var_types[self.data_variable] == "datetime":
# Avoid drawing empty fill_between on date axis
# https://github.com/matplotlib/matplotlib/issues/17586
scout = None
default_color = plot_kws.pop(
"color", plot_kws.pop("facecolor", None)
)
if default_color is None:
default_color = "C0"
else:
scout = ax.fill_between([], [], **plot_kws)
default_color = tuple(scout.get_facecolor().squeeze())
plot_kws.pop("color", None)
else:
scout, = ax.plot([], [], **plot_kws)
default_color = scout.get_color()
if scout is not None:
scout.remove()
default_alpha = .25 if multiple == "layer" else .75
alpha = plot_kws.pop("alpha", default_alpha) # TODO make parameter?
# Now iterate through the subsets and draw the densities
# We go backwards so stacked densities read from top-to-bottom
for sub_vars, _ in self._semantic_subsets("hue", reverse=True):
# Extract the support grid and density curve for this level
key = tuple(sub_vars.items())
try:
density = densities[key]
except KeyError:
continue
support = density.index
fill_from = baselines[key]
# Modify the matplotlib attributes from semantic mapping
if "hue" in self.variables:
color = self._hue_map(sub_vars["hue"])
else:
color = default_color
artist_kws = self._artist_kws(
plot_kws, fill, False, multiple, color, alpha
)
# Either plot a curve with observation values on the x axis
if "x" in self.variables:
if fill:
artist = ax.fill_between(
support, fill_from, density, **artist_kws
)
else:
artist, = ax.plot(support, density, **artist_kws)
artist.sticky_edges.x[:] = sticky_support
artist.sticky_edges.y[:] = sticky_density
# Or plot a curve with observation values on the y axis
else:
if fill:
artist = ax.fill_betweenx(
support, fill_from, density, **artist_kws
)
else:
artist, = ax.plot(density, support, **artist_kws)
artist.sticky_edges.x[:] = sticky_density
artist.sticky_edges.y[:] = sticky_support
# --- Finalize the plot ----
default_x = default_y = ""
if self.data_variable == "x":
default_y = "Density"
if self.data_variable == "y":
default_x = "Density"
self._add_axis_labels(ax, default_x, default_y)
if "hue" in self.variables and legend:
if fill:
artist = partial(mpl.patches.Patch)
else:
artist = partial(mpl.lines.Line2D, [], [])
self._add_legend(
ax, artist, fill, False, multiple, alpha, plot_kws, {},
)
def plot_bivariate_density(
self,
common_norm,
fill,
levels,
thresh,
color,
legend,
cbar,
cbar_ax,
cbar_kws,
estimate_kws,
contour_kws,
ax,
):
contour_kws = contour_kws.copy()
estimator = KDE(**estimate_kws)
if "hue" not in self.variables:
common_norm = False
# See other notes about GH2135
cols = list(self.variables)
all_data = self.plot_data[cols].dropna()
# Check for log scaling on iether axis
scalex = ax.xaxis.get_scale() == "log"
scaley = ax.yaxis.get_scale() == "log"
log_scale = scalex, scaley
# Loop through the subsets and estimate the KDEs
densities, supports = {}, {}
for sub_vars, sub_data in self._semantic_subsets("hue", from_comp_data=True):
# Extract the data points from this sub set and remove nulls
sub_data = sub_data[cols].dropna()
observations = sub_data[["x", "y"]]
# Extract the weights for this subset of observations
if "weights" in self.variables:
weights = sub_data["weights"]
else:
weights = None
# Check that KDE will not error out
variance = observations[["x", "y"]].var()
if not variance.all() or variance.isna().any():
msg = "Dataset has 0 variance; skipping density estimate."
warnings.warn(msg, UserWarning)
continue
# Estimate the density of observations at this level
observations = observations["x"], observations["y"]
density, support = estimator(*observations, weights=weights)
# Transform the support grid back to the original scale
if log_scale is not None:
xx, yy = support
if log_scale[0]:
xx = np.power(10, xx)
if log_scale[1]:
yy = np.power(10, yy)
support = xx, yy
# Apply a scaling factor so that the integral over all subsets is 1
if common_norm:
density *= len(sub_data) / len(all_data)
key = tuple(sub_vars.items())
densities[key] = density
supports[key] = support
# Define a grid of iso-proportion levels
if isinstance(levels, Number):
levels = np.linspace(thresh, 1, levels)
else:
if min(levels) < 0 or max(levels) > 1:
raise ValueError("levels must be in [0, 1]")
# Transfrom from iso-proportions to iso-densities
if common_norm:
common_levels = self._quantile_to_level(
list(densities.values()), levels,
)
draw_levels = {k: common_levels for k in densities}
else:
draw_levels = {
k: self._quantile_to_level(d, levels)
for k, d in densities.items()
}
# Get a default single color from the attribute cycle
scout, = ax.plot([], color=color)
default_color = scout.get_color()
scout.remove()
# Define the coloring of the contours
if "hue" in self.variables:
for param in ["cmap", "colors"]:
if param in contour_kws:
msg = f"{param} parameter ignored when using hue mapping."
warnings.warn(msg, UserWarning)
contour_kws.pop(param)
else:
coloring_given = set(contour_kws) & {"cmap", "colors"}
if fill and not coloring_given:
cmap = self._cmap_from_color(default_color)
contour_kws["cmap"] = cmap
if not fill and not coloring_given:
contour_kws["colors"] = [default_color]
# Choose the function to plot with
# TODO could add a pcolormesh based option as well
# Which would look something like element="raster"
if fill:
contour_func = ax.contourf
else:
contour_func = ax.contour
# Loop through the subsets again and plot the data
for sub_vars, _ in self._semantic_subsets("hue"):
if "hue" in sub_vars:
color = self._hue_map(sub_vars["hue"])
if fill:
contour_kws["cmap"] = self._cmap_from_color(color)
else:
contour_kws["colors"] = [color]
key = tuple(sub_vars.items())
if key not in densities:
continue
density = densities[key]
xx, yy = supports[key]
label = contour_kws.pop("label", None)
cset = contour_func(
xx, yy, density,
levels=draw_levels[key],
**contour_kws,
)
if "hue" not in self.variables:
cset.collections[0].set_label(label)
# Add a color bar representing the contour heights
# Note: this shows iso densities, not iso proportions
# See more notes in histplot about how this could be improved
if cbar:
cbar_kws = {} if cbar_kws is None else cbar_kws
ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)
# --- Finalize the plot
self._add_axis_labels(ax)
if "hue" in self.variables and legend:
# TODO if possible, I would like to move the contour
# intensity information into the legend too and label the
# iso proportions rather than the raw density values
artist_kws = {}
if fill:
artist = partial(mpl.patches.Patch)
else:
artist = partial(mpl.lines.Line2D, [], [])
self._add_legend(
ax, artist, fill, False, "layer", 1, artist_kws, {},
)
def plot_univariate_ecdf(self, estimate_kws, legend, plot_kws, ax):
# TODO see notes elsewhere about GH2135
cols = list(self.variables)
estimator = ECDF(**estimate_kws)
# Set the draw style to step the right way for the data varible
drawstyles = dict(x="steps-post", y="steps-pre")
plot_kws["drawstyle"] = drawstyles[self.data_variable]
# Loop through the subsets, transform and plot the data
for sub_vars, sub_data in self._semantic_subsets(
"hue", reverse=True, from_comp_data=True,
):
# Compute the ECDF
sub_data = sub_data[cols].dropna()
if sub_data.empty:
continue
observations = sub_data[self.data_variable]
weights = sub_data.get("weights", None)
stat, vals = estimator(observations, weights)
# Assign attributes based on semantic mapping
artist_kws = plot_kws.copy()
if "hue" in self.variables:
artist_kws["color"] = self._hue_map(sub_vars["hue"])
# Work out the orientation of the plot
if self.data_variable == "x":
plot_args = vals, stat
stat_variable = "y"
else:
plot_args = stat, vals
stat_variable = "x"
if estimator.stat == "count":
top_edge = len(observations)
else:
top_edge = 1
# Draw the line for this subset
artist, = ax.plot(*plot_args, **artist_kws)
sticky_edges = getattr(artist.sticky_edges, stat_variable)
sticky_edges[:] = 0, top_edge
# --- Finalize the plot ----
stat = estimator.stat.capitalize()
default_x = default_y = ""
if self.data_variable == "x":
default_y = stat
if self.data_variable == | |
<filename>gmso/tests/test_forcefield.py
import lxml
import pytest
import unyt as u
from lxml.etree import DocumentInvalid
from sympy import sympify
from gmso.core.forcefield import ForceField
from gmso.exceptions import (
ForceFieldParseError,
MissingAtomTypesError,
MissingPotentialError,
)
from gmso.tests.base_test import BaseTest
from gmso.tests.utils import allclose_units_mixed, get_path
class TestForceField(BaseTest):
@pytest.fixture
def ff(self):
return ForceField(get_path("ff-example0.xml"))
@pytest.fixture
def named_groups_ff(self):
return ForceField(get_path("ff-example1.xml"))
@pytest.fixture
def opls_ethane_foyer(self):
return ForceField(
get_path(filename=get_path("oplsaa-ethane_foyer.xml"))
)
def test_ff_name_version_from_xml(self, ff):
assert ff.name == "ForceFieldOne"
assert ff.version == "0.4.1"
def test_scaling_factors_from_xml(self, ff):
assert ff.scaling_factors["nonBonded14Scale"] == 0.67
assert ff.scaling_factors["electrostatics14Scale"] == 0.5
@pytest.mark.parametrize(
"unit_name,unit_value",
[
("energy", u.Unit(u.K * u.kb)),
("mass", u.gram / u.mol),
("temperature", u.K),
("charge", u.coulomb),
("angle", u.rad),
("time", u.ps),
("distance", u.nm),
],
)
def test_units_from_xml(self, ff, unit_name, unit_value):
assert len(ff.units.keys()) == 7
assert ff.units[unit_name] == unit_value
def test_ff_atomtypes_from_xml(self, ff):
assert len(ff.atom_types) == 3
assert "Ar" in ff.atom_types
assert "Xe" in ff.atom_types
assert sympify("r") in ff.atom_types["Ar"].independent_variables
assert ff.atom_types["Ar"].parameters["A"] == u.unyt_quantity(
0.1, u.kcal / u.mol
)
assert ff.atom_types["Ar"].parameters["B"] == u.unyt_quantity(4.0, u.nm)
assert ff.atom_types["Ar"].parameters["C"] == u.unyt_quantity(
0.5, u.kcal / u.mol * u.nm ** 6
)
assert ff.atom_types["Ar"].mass == u.unyt_quantity(39.948, u.amu)
assert ff.atom_types["Ar"].charge == u.unyt_quantity(0.0, u.coulomb)
assert ff.atom_types["Ar"].description == "Argon atom"
assert ff.atom_types["Ar"].definition == "Ar"
assert ff.atom_types["Ar"].expression == sympify(
"(A*exp(-B/r) - C/r**6)"
)
assert sympify("r") in ff.atom_types["Xe"].independent_variables
assert "A" in ff.atom_types["Xe"].parameters
assert ff.atom_types["Xe"].parameters["A"] == u.unyt_quantity(
0.2, u.kcal / u.mol
)
assert ff.atom_types["Xe"].parameters["B"] == u.unyt_quantity(5.0, u.nm)
assert ff.atom_types["Xe"].parameters["C"] == u.unyt_quantity(
0.3, u.kcal / u.mol * u.nm ** 6
)
assert ff.atom_types["Xe"].mass == u.unyt_quantity(131.293, u.amu)
assert ff.atom_types["Xe"].charge == u.unyt_quantity(0.0, u.coulomb)
assert ff.atom_types["Xe"].description == "Xenon atom"
assert ff.atom_types["Xe"].definition == "Xe"
assert ff.atom_types["Xe"].expression == sympify(
"(A*exp(-B/r) - C/r**6)"
)
assert ff.atom_types["Li"].charge == u.unyt_quantity(1.0, u.coulomb)
def test_ff_bondtypes_from_xml(self, ff):
assert len(ff.bond_types) == 2
assert "Ar~Ar" in ff.bond_types
assert "Xe~Xe" in ff.bond_types
assert sympify("r") in ff.bond_types["Ar~Ar"].independent_variables
assert ff.bond_types["Ar~Ar"].parameters["r_eq"] == u.unyt_quantity(
10.0, u.nm
)
assert ff.bond_types["Ar~Ar"].parameters["k"] == u.unyt_quantity(
10000, u.kJ / u.mol
)
assert ff.bond_types["Ar~Ar"].member_types == ("Ar", "Ar")
assert sympify("r") in ff.bond_types["Xe~Xe"].independent_variables
assert ff.bond_types["Xe~Xe"].parameters["r_eq"] == u.unyt_quantity(
10.0, u.nm
)
assert ff.bond_types["Xe~Xe"].parameters["k"] == u.unyt_quantity(
20000, u.kJ / u.mol
)
assert ff.bond_types["Xe~Xe"].member_types == ("Xe", "Xe")
def test_ff_angletypes_from_xml(self, ff):
assert len(ff.angle_types) == 2
assert "Ar~Ar~Ar" in ff.angle_types
assert "Xe~Xe~Xe" in ff.angle_types
assert sympify("r") in ff.angle_types["Ar~Ar~Ar"].independent_variables
assert ff.angle_types["Ar~Ar~Ar"].parameters["r_eq"] == u.unyt_quantity(
10.0, u.nm
)
assert ff.angle_types["Ar~Ar~Ar"].parameters["z"] == u.unyt_quantity(
100, u.kJ / u.mol
)
assert ff.angle_types["Ar~Ar~Ar"].member_types == ("Ar", "Ar", "Ar")
assert sympify("r") in ff.angle_types["Xe~Xe~Xe"].independent_variables
assert ff.angle_types["Xe~Xe~Xe"].parameters["r_eq"] == u.unyt_quantity(
10.0, u.nm
)
assert ff.angle_types["Xe~Xe~Xe"].parameters["z"] == u.unyt_quantity(
20, u.kJ / u.mol
)
assert ff.angle_types["Xe~Xe~Xe"].member_classes == ("Xe", "Xe", "Xe")
def test_ff_dihedraltypes_from_xml(self, ff):
assert len(ff.dihedral_types) == 2
assert "Xe~Xe~Xe~Xe" in ff.dihedral_types
assert "Ar~Ar~Ar~Ar" in ff.dihedral_types
assert (
sympify("r")
in ff.dihedral_types["Ar~Ar~Ar~Ar"].independent_variables
)
assert ff.dihedral_types["Ar~Ar~Ar~Ar"].parameters[
"r_eq"
] == u.unyt_quantity(10.0, u.nm)
assert ff.dihedral_types["Ar~Ar~Ar~Ar"].parameters[
"z"
] == u.unyt_quantity(100, u.kJ / u.mol)
assert ff.dihedral_types["Ar~Ar~Ar~Ar"].member_classes == (
"Ar",
"Ar",
"Ar",
"Ar",
)
assert (
sympify("r")
in ff.dihedral_types["Xe~Xe~Xe~Xe"].independent_variables
)
assert ff.dihedral_types["Xe~Xe~Xe~Xe"].parameters[
"r_eq"
] == u.unyt_quantity(10.0, u.nm)
assert ff.dihedral_types["Xe~Xe~Xe~Xe"].parameters[
"z"
] == u.unyt_quantity(20, u.kJ / u.mol)
assert ff.dihedral_types["Xe~Xe~Xe~Xe"].member_classes == (
"Xe",
"Xe",
"Xe",
"Xe",
)
def test_ff_impropertypes_from_xml(self, ff):
assert len(ff.improper_types) == 1
assert "Xe~Xe~Xe~Xe" in ff.improper_types
assert (
sympify("r")
in ff.improper_types["Xe~Xe~Xe~Xe"].independent_variables
)
assert ff.improper_types["Xe~Xe~Xe~Xe"].parameters[
"r_eq"
] == u.unyt_quantity(10.0, u.nm)
assert ff.improper_types["Xe~Xe~Xe~Xe"].parameters[
"z"
] == u.unyt_quantity(20, u.kJ / u.mol)
assert ff.improper_types["Xe~Xe~Xe~Xe"].member_types == (
"Xe",
"Xe",
"Xe",
"Xe",
)
def test_ff_pairpotentialtypes_from_xml(self, ff):
assert len(ff.pairpotential_types) == 1
assert "Xe~Xe" in ff.pairpotential_types
assert (
sympify("r")
in ff.pairpotential_types["Xe~Xe"].independent_variables
)
assert ff.pairpotential_types["Xe~Xe"].parameters[
"sigma"
] == u.unyt_quantity(10.0, u.nm)
assert ff.pairpotential_types["Xe~Xe"].parameters[
"k"
] == u.unyt_quantity(0.1, u.kJ / u.mol)
assert ff.pairpotential_types["Xe~Xe"].member_types == ("Xe", "Xe")
def test_ff_charmm_xml(self):
charm_ff = ForceField(get_path("trimmed_charmm.xml"))
assert charm_ff.name == "topologyCharmm"
assert "*~CS~SS~*" in charm_ff.dihedral_types
# Test list of parameters
assert isinstance(
charm_ff.dihedral_types["*~CE1~CE1~*"].parameters["k"], list
)
# This ensures that even though the parameters is a list, they can be hashed (by equality checks)
assert (
charm_ff.dihedral_types["*~CE1~CE1~*"]
== charm_ff.dihedral_types["*~CE1~CE1~*"]
)
assert len(charm_ff.dihedral_types["*~CE1~CE1~*"].parameters["k"]) == 2
# Test Correct Parameter Values
assert charm_ff.dihedral_types["*~CE1~CE1~*"].parameters["k"] == [
u.unyt_quantity(0.6276, u.kJ),
u.unyt_quantity(35.564, u.kJ),
]
def test_non_unique_params(self):
with pytest.raises(DocumentInvalid):
ForceField(get_path("ff-example-nonunique-params.xml"))
def test_missing_params(self):
with pytest.raises(ForceFieldParseError):
ForceField(get_path("ff-example-missing-parameter.xml"))
def test_elementary_charge_to_coulomb(self, ff):
elementary_charge = ff.atom_types["Li"].charge.to(u.elementary_charge)
assert elementary_charge.units == u.Unit(u.elementary_charge)
def test_atomclass_groups_charm_buck_ff(self):
ff = ForceField(get_path("opls_charmm_buck.xml"))
assert len(ff.atom_class_groups["CT"]) == 2
def test_ff_periodic_dihedrals_from_alphanumeric_symbols(self):
ff = ForceField(get_path("opls_charmm_buck.xml"))
assert "A" in ff.atom_types["buck_O"].parameters
with pytest.raises(TypeError):
assert len(
ff.dihedral_types["opls_140~*~*~opls_140"].parameters["c0"]
)
assert len(ff.dihedral_types["NH2~CT1~C~O"].parameters["delta"]) == 1
def test_ff_from_etree(self):
ff_etree = lxml.etree.parse(get_path("opls_charmm_buck.xml"))
ff = ForceField(ff_etree)
assert ff
def test_ff_from_etree_iterable(self):
ff_etrees = [
lxml.etree.parse(get_path("opls_charmm_buck.xml")),
lxml.etree.parse(get_path("trimmed_charmm.xml")),
]
ff = ForceField(ff_etrees)
assert ff
def test_ff_mixed_type_error(self):
with pytest.raises(TypeError):
ff = ForceField([5, "20"])
def test_named_potential_groups(self, named_groups_ff):
assert named_groups_ff.potential_groups["BuckinghamPotential"]
assert (
named_groups_ff.angle_types["Xe~Xe~Xe"]
in named_groups_ff.potential_groups["HarmonicAngle"].values()
)
assert len(named_groups_ff.potential_groups["BuckinghamPotential"]) == 3
assert len(named_groups_ff.potential_groups["HarmonicBond"]) == 2
assert len(named_groups_ff.potential_groups["HarmonicAngle"]) == 2
assert len(named_groups_ff.potential_groups["PeriodicProper"]) == 2
assert len(named_groups_ff.potential_groups["RBProper"]) == 1
assert len(named_groups_ff.potential_groups["LJ"]) == 1
def test_potential_types_by_expression(self, named_groups_ff):
atom_types_grouped_by_expression = (
named_groups_ff.group_atom_types_by_expression()
)
bond_types_grouped_by_expression = (
named_groups_ff.group_bond_types_by_expression()
)
angle_types_grouped_by_expression = (
named_groups_ff.group_angle_types_by_expression()
)
dihedral_types_grouped_by_expression = (
named_groups_ff.group_dihedral_types_by_expression()
)
improper_types_grouped_by_expression = (
named_groups_ff.group_improper_types_by_expression()
)
pairpotential_types_grouped_by_expression = (
named_groups_ff.group_pairpotential_types_by_expression()
)
assert (
len(atom_types_grouped_by_expression["A*exp(-B/r) - C/r**6"]) == 3
)
assert len(bond_types_grouped_by_expression["0.5*k*(r - r_eq)**2"]) == 2
assert (
len(angle_types_grouped_by_expression["0.5*z*(r - r_eq)**2"]) == 2
)
assert (
len(dihedral_types_grouped_by_expression["0.5*z*(r - r_eq)**2"])
== 2
)
assert (
len(improper_types_grouped_by_expression["0.5*z*(r - r_eq)**2"])
== 1
)
assert (
len(
pairpotential_types_grouped_by_expression[
"4*k*(-sigma**6/r**6 + sigma**12/r**12)"
]
)
== 1
)
def test_forcefield_missing_atom_types(self):
with pytest.raises(MissingAtomTypesError):
ff = ForceField(
get_path(filename=get_path("ff_missing_atom_types.xml"))
)
def test_forcefield_missing_atom_types_non_strict(self):
ff = ForceField(
get_path(filename=get_path("ff_missing_atom_types.xml")),
strict=False,
)
def test_forcefeld_get_potential_atom_type(self, opls_ethane_foyer):
at = opls_ethane_foyer.get_potential("atom_type", key=["opls_135"])
assert at.expression == sympify(
"ep * ((sigma/r)**12 - (sigma/r)**6) + q / (e0 * r)"
)
params = at.parameters
assert "ep" in params
assert "sigma" in params
assert "e0" in params
assert sympify("r") in at.independent_variables
assert allclose_units_mixed(
params.values(),
[
0.276144 * u.kJ / u.mol,
0.35 * u.nm,
8.8542e-12 * u.Unit("A**2*s**4/(kg*m**3)"),
-0.18 * u.C,
],
)
def test_forcefield_get_parameters_atom_type(self, opls_ethane_foyer):
params = opls_ethane_foyer.get_parameters("atom_type", key=["opls_140"])
assert allclose_units_mixed(
params.values(),
[
0.12552 * u.kJ / u.mol,
0.25 * u.nm,
8.8542e-12 * u.Unit("A**2*s**4/(kg*m**3)"),
0.06 * u.C,
],
)
def test_forcefield_get_parameters_atom_type_copy(self, opls_ethane_foyer):
params = opls_ethane_foyer.get_parameters(
"atom_type", key=["opls_140"], copy=False
)
params_copy = opls_ethane_foyer.get_parameters(
"atom_type", key=["opls_140"], copy=True
)
assert allclose_units_mixed(params.values(), params_copy.values())
def test_forcefield_get_potential_bond_type(self, opls_ethane_foyer):
bt = opls_ethane_foyer.get_potential(
"bond_type", key=["opls_135", "opls_140"]
)
assert bt.name == "BondType-Harmonic-2"
params = bt.parameters
assert "k" in params
assert "r_eq" in params
assert sympify("r") in bt.independent_variables
assert allclose_units_mixed(
params.values(), [284512.0 * u.kJ / u.nm ** 2, 0.109 * u.nm]
)
def test_forcefield_get_potential_bond_type_reversed(
self, opls_ethane_foyer
):
assert opls_ethane_foyer.get_potential(
"bond_type", ["opls_135", "opls_140"]
) == opls_ethane_foyer.get_potential(
"bond_type", ["opls_140", "opls_135"]
)
def test_forcefield_get_parameters_bond_type(self, opls_ethane_foyer):
params = opls_ethane_foyer.get_parameters(
"bond_type", key=["opls_135", "opls_135"]
)
assert allclose_units_mixed(
params.values(), [224262.4 * u.kJ / u.nm ** 2, 0.1529 * u.nm]
)
def test_forcefield_get_potential_angle_type(self, opls_ethane_foyer):
at = opls_ethane_foyer.get_potential(
"angle_type", key=["opls_135", "opls_135", "opls_140"]
)
assert at.name == "AngleType-Harmonic-1"
params = at.parameters
assert "k" in params
assert "theta_eq" in params
assert sympify("theta") in at.independent_variables
assert allclose_units_mixed(
params.values(),
[313.8 * u.kJ / u.radian ** 2, 1.932079482 * u.radian],
)
def test_forcefield_get_potential_angle_type_reversed(
self, opls_ethane_foyer
):
assert opls_ethane_foyer.get_potential(
"angle_type", ["opls_135", "opls_135", "opls_140"]
) == opls_ethane_foyer.get_potential(
"angle_type", ["opls_140", "opls_135", "opls_135"]
)
def test_forcefield_get_parameters_angle_type(self, opls_ethane_foyer):
params = opls_ethane_foyer.get_parameters(
"angle_type", key=["opls_140", "opls_135", "opls_140"]
)
assert allclose_units_mixed(
params.values(),
[276.144 * u.kJ / u.radian ** 2, 1.8814649337 * u.radian],
)
def test_forcefield_get_potential_dihedral_type(self, opls_ethane_foyer):
dt = opls_ethane_foyer.get_potential(
"dihedral_type",
key=["opls_140", "opls_135", "opls_135", "opls_140"],
)
assert dt.name == "DihedralType-RB-Proper-1"
params = dt.parameters
assert "c0" in params
assert "c1" in params
assert "c2" in params
assert "c3" in params
assert "c4" in params
assert "c5" in params
assert sympify("phi") in dt.independent_variables
assert allclose_units_mixed(
params.values(),
[0.6276, 1.8828, 0.0, -2.5104, 0.0, 0.0] * u.kJ / u.mol,
)
def test_forcefield_get_parameters_dihedral_type(self, opls_ethane_foyer):
params = opls_ethane_foyer.get_parameters(
"dihedral_type",
key=["opls_140", "opls_135", "opls_135", "opls_140"],
)
assert allclose_units_mixed(
params.values(),
[0.6276, 1.8828, 0.0, -2.5104, 0.0, 0.0] * u.kJ / u.mol,
)
def test_forcefield_get_potential_non_exisistent_group(
self, opls_ethane_foyer
):
with pytest.raises(ValueError):
opls_ethane_foyer.get_potential("non_group", ["a", "b", "c"])
def test_forcefield_get_potential_non_string_key(self, opls_ethane_foyer):
with pytest.raises(TypeError):
opls_ethane_foyer.get_potential("atom_type", key=[111])
def test_get_atom_type_missing(self, opls_ethane_foyer):
with pytest.raises(MissingPotentialError):
opls_ethane_foyer._get_atom_type("opls_359", warn=False)
with pytest.warns(UserWarning):
opls_ethane_foyer._get_atom_type("opls_359", warn=True)
def test_get_bond_type_missing(self, opls_ethane_foyer):
with pytest.raises(MissingPotentialError):
opls_ethane_foyer._get_bond_type(
["opls_359", "opls_600"], warn=False
)
with pytest.warns(UserWarning):
opls_ethane_foyer._get_bond_type(
["opls_359", "opls_600"], warn=True
)
def test_get_angle_type_missing(self, opls_ethane_foyer):
with pytest.raises(MissingPotentialError):
opls_ethane_foyer._get_angle_type(
["opls_359", "opls_600", "opls_700"], warn=False
)
with pytest.warns(UserWarning):
opls_ethane_foyer._get_angle_type(
["opls_359", "opls_600", "opls_700"], warn=True
)
def test_get_dihedral_type_missing(self, opls_ethane_foyer):
with pytest.raises(MissingPotentialError):
opls_ethane_foyer._get_dihedral_type(
["opls_359", "opls_600", "opls_700", "opls_800"], warn=False
)
with pytest.warns(UserWarning):
opls_ethane_foyer._get_dihedral_type(
["opls_359", "opls_600", "opls_700", "opls_800"], warn=True
)
def test_get_improper_type_missing(self, opls_ethane_foyer):
with pytest.raises(MissingPotentialError):
opls_ethane_foyer._get_improper_type(
["opls_359", "opls_600", "opls_700", "opls_800"], warn=False
)
with pytest.warns(UserWarning):
| |
None,
env_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
handlers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionHandlerArgs']]]]] = None,
inbound_services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionLibraryArgs']]]]] = None,
manual_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionManualScalingArgs']]] = None,
noop_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
runtime: Optional[pulumi.Input[str]] = None,
runtime_api_version: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
threadsafe: Optional[pulumi.Input[bool]] = None,
version_id: Optional[pulumi.Input[str]] = None,
vpc_access_connector: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionVpcAccessConnectorArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StandardAppVersionArgs.__new__(StandardAppVersionArgs)
__props__.__dict__["automatic_scaling"] = automatic_scaling
__props__.__dict__["basic_scaling"] = basic_scaling
__props__.__dict__["delete_service_on_destroy"] = delete_service_on_destroy
if deployment is None and not opts.urn:
raise TypeError("Missing required property 'deployment'")
__props__.__dict__["deployment"] = deployment
if entrypoint is None and not opts.urn:
raise TypeError("Missing required property 'entrypoint'")
__props__.__dict__["entrypoint"] = entrypoint
__props__.__dict__["env_variables"] = env_variables
__props__.__dict__["handlers"] = handlers
__props__.__dict__["inbound_services"] = inbound_services
__props__.__dict__["instance_class"] = instance_class
__props__.__dict__["libraries"] = libraries
__props__.__dict__["manual_scaling"] = manual_scaling
__props__.__dict__["noop_on_destroy"] = noop_on_destroy
__props__.__dict__["project"] = project
if runtime is None and not opts.urn:
raise TypeError("Missing required property 'runtime'")
__props__.__dict__["runtime"] = runtime
__props__.__dict__["runtime_api_version"] = runtime_api_version
if service is None and not opts.urn:
raise TypeError("Missing required property 'service'")
__props__.__dict__["service"] = service
__props__.__dict__["threadsafe"] = threadsafe
__props__.__dict__["version_id"] = version_id
__props__.__dict__["vpc_access_connector"] = vpc_access_connector
__props__.__dict__["name"] = None
super(StandardAppVersion, __self__).__init__(
'gcp:appengine/standardAppVersion:StandardAppVersion',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automatic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionAutomaticScalingArgs']]] = None,
basic_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionBasicScalingArgs']]] = None,
delete_service_on_destroy: Optional[pulumi.Input[bool]] = None,
deployment: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionDeploymentArgs']]] = None,
entrypoint: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionEntrypointArgs']]] = None,
env_variables: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
handlers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionHandlerArgs']]]]] = None,
inbound_services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
instance_class: Optional[pulumi.Input[str]] = None,
libraries: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionLibraryArgs']]]]] = None,
manual_scaling: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionManualScalingArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
noop_on_destroy: Optional[pulumi.Input[bool]] = None,
project: Optional[pulumi.Input[str]] = None,
runtime: Optional[pulumi.Input[str]] = None,
runtime_api_version: Optional[pulumi.Input[str]] = None,
service: Optional[pulumi.Input[str]] = None,
threadsafe: Optional[pulumi.Input[bool]] = None,
version_id: Optional[pulumi.Input[str]] = None,
vpc_access_connector: Optional[pulumi.Input[pulumi.InputType['StandardAppVersionVpcAccessConnectorArgs']]] = None) -> 'StandardAppVersion':
"""
Get an existing StandardAppVersion resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['StandardAppVersionAutomaticScalingArgs']] automatic_scaling: Automatic scaling is based on request rate, response latencies, and other application metrics.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionBasicScalingArgs']] basic_scaling: Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.
Structure is documented below.
:param pulumi.Input[bool] delete_service_on_destroy: If set to `true`, the service will be deleted if it is the last version.
:param pulumi.Input[pulumi.InputType['StandardAppVersionDeploymentArgs']] deployment: Code and application artifacts that make up this version.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionEntrypointArgs']] entrypoint: The entrypoint for the application.
Structure is documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] env_variables: Environment variables available to the application.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionHandlerArgs']]]] handlers: An ordered list of URL-matching patterns that should be applied to incoming requests.
The first matching URL handles the request and other request handlers are not attempted.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input[str]]] inbound_services: A list of the types of messages that this application is able to receive.
Each value may be one of `INBOUND_SERVICE_MAIL`, `INBOUND_SERVICE_MAIL_BOUNCE`, `INBOUND_SERVICE_XMPP_ERROR`, `INBOUND_SERVICE_XMPP_MESSAGE`, `INBOUND_SERVICE_XMPP_SUBSCRIBE`, `INBOUND_SERVICE_XMPP_PRESENCE`, `INBOUND_SERVICE_CHANNEL_PRESENCE`, and `INBOUND_SERVICE_WARMUP`.
:param pulumi.Input[str] instance_class: Instance class that is used to run this version. Valid values are
AutomaticScaling: F1, F2, F4, F4_1G
BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8
Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['StandardAppVersionLibraryArgs']]]] libraries: Configuration for third-party Python runtime libraries that are required by the application.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['StandardAppVersionManualScalingArgs']] manual_scaling: A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.
Structure is documented below.
:param pulumi.Input[str] name: Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.
:param pulumi.Input[bool] noop_on_destroy: If set to `true`, the application version will not be deleted.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] runtime: Desired runtime. Example python27.
:param pulumi.Input[str] runtime_api_version: The version of the API in the given runtime environment.
Please see the app.yaml reference for valid values at `https://cloud.google.com/appengine/docs/standard/<language>/config/appref`\
Substitute `<language>` with `python`, `java`, `php`, `ruby`, `go` or `nodejs`.
:param pulumi.Input[str] service: AppEngine service resource
:param pulumi.Input[bool] threadsafe: Whether multiple requests can be dispatched to this version at once.
:param pulumi.Input[str] version_id: Relative name of the version within the service. For example, `v1`. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-".
:param pulumi.Input[pulumi.InputType['StandardAppVersionVpcAccessConnectorArgs']] vpc_access_connector: Enables VPC connectivity for standard apps.
Structure is documented below.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _StandardAppVersionState.__new__(_StandardAppVersionState)
__props__.__dict__["automatic_scaling"] = automatic_scaling
__props__.__dict__["basic_scaling"] = basic_scaling
__props__.__dict__["delete_service_on_destroy"] = delete_service_on_destroy
__props__.__dict__["deployment"] = deployment
__props__.__dict__["entrypoint"] = entrypoint
__props__.__dict__["env_variables"] = env_variables
__props__.__dict__["handlers"] = handlers
__props__.__dict__["inbound_services"] = inbound_services
__props__.__dict__["instance_class"] = instance_class
__props__.__dict__["libraries"] = libraries
__props__.__dict__["manual_scaling"] = manual_scaling
__props__.__dict__["name"] = name
__props__.__dict__["noop_on_destroy"] = noop_on_destroy
__props__.__dict__["project"] = project
__props__.__dict__["runtime"] = runtime
__props__.__dict__["runtime_api_version"] = runtime_api_version
__props__.__dict__["service"] = service
__props__.__dict__["threadsafe"] = threadsafe
__props__.__dict__["version_id"] = version_id
__props__.__dict__["vpc_access_connector"] = vpc_access_connector
return StandardAppVersion(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automaticScaling")
def automatic_scaling(self) -> pulumi.Output[Optional['outputs.StandardAppVersionAutomaticScaling']]:
"""
Automatic scaling is based on request rate, response latencies, and other application metrics.
Structure is documented below.
"""
return pulumi.get(self, "automatic_scaling")
@property
@pulumi.getter(name="basicScaling")
def basic_scaling(self) -> pulumi.Output[Optional['outputs.StandardAppVersionBasicScaling']]:
"""
Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity.
Structure is documented below.
"""
return pulumi.get(self, "basic_scaling")
@property
@pulumi.getter(name="deleteServiceOnDestroy")
def delete_service_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the service will be deleted if it is the last version.
"""
return pulumi.get(self, "delete_service_on_destroy")
@property
@pulumi.getter
def deployment(self) -> pulumi.Output['outputs.StandardAppVersionDeployment']:
"""
Code and application artifacts that make up this version.
Structure is documented below.
"""
return pulumi.get(self, "deployment")
@property
@pulumi.getter
def entrypoint(self) -> pulumi.Output['outputs.StandardAppVersionEntrypoint']:
"""
The entrypoint for the application.
Structure is documented below.
"""
return pulumi.get(self, "entrypoint")
@property
@pulumi.getter(name="envVariables")
def env_variables(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Environment variables available to the application.
"""
return pulumi.get(self, "env_variables")
@property
@pulumi.getter
def handlers(self) -> pulumi.Output[Sequence['outputs.StandardAppVersionHandler']]:
"""
An ordered list of URL-matching patterns that should be applied to incoming requests.
The first matching URL handles the request and other request handlers are not attempted.
Structure is documented below.
"""
return pulumi.get(self, "handlers")
@property
@pulumi.getter(name="inboundServices")
def inbound_services(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of the types of messages that this application is able to receive.
Each value may be one of `INBOUND_SERVICE_MAIL`, `INBOUND_SERVICE_MAIL_BOUNCE`, `INBOUND_SERVICE_XMPP_ERROR`, `INBOUND_SERVICE_XMPP_MESSAGE`, `INBOUND_SERVICE_XMPP_SUBSCRIBE`, `INBOUND_SERVICE_XMPP_PRESENCE`, `INBOUND_SERVICE_CHANNEL_PRESENCE`, and `INBOUND_SERVICE_WARMUP`.
"""
return pulumi.get(self, "inbound_services")
@property
@pulumi.getter(name="instanceClass")
def instance_class(self) -> pulumi.Output[str]:
"""
Instance class that is used to run this version. Valid values are
AutomaticScaling: F1, F2, F4, F4_1G
BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8
Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen.
"""
return pulumi.get(self, "instance_class")
@property
@pulumi.getter
def libraries(self) -> pulumi.Output[Optional[Sequence['outputs.StandardAppVersionLibrary']]]:
"""
Configuration for third-party Python runtime libraries that are required by the application.
Structure is documented below.
"""
return pulumi.get(self, "libraries")
@property
@pulumi.getter(name="manualScaling")
def manual_scaling(self) -> pulumi.Output[Optional['outputs.StandardAppVersionManualScaling']]:
"""
A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time.
Structure is documented below.
"""
return pulumi.get(self, "manual_scaling")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="noopOnDestroy")
def noop_on_destroy(self) -> pulumi.Output[Optional[bool]]:
"""
If set to `true`, the application version will | |
on success
type: str
sample: OL7_X86_64_STANDARD_10
build_spec_file:
description:
- The path to the build specification file for this Environment. The default location if not specified is build_spec.yaml
returned: on success
type: str
sample: build_spec_file_example
stage_execution_timeout_in_seconds:
description:
- Timeout for the Build Stage Execution. Value in seconds.
returned: on success
type: int
sample: 56
build_source_collection:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- Collection of Build sources. In case of UPDATE operation, replaces existing Build sources list. Merging with existing
Build Sources is not supported.
returned: on success
type: complex
contains:
name:
description:
- Name of the Build source. This must be unique within a BuildSourceCollection. The name can be used by
customers to locate the working directory pertinent to this repository.
returned: on success
type: str
sample: name_example
connection_type:
description:
- The type of Source Provider.
returned: on success
type: str
sample: GITHUB
repository_url:
description:
- Url for repository
returned: on success
type: str
sample: repository_url_example
branch:
description:
- branch name
returned: on success
type: str
sample: branch_example
repository_id:
description:
- The Devops Code Repository Id
returned: on success
type: str
sample: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
connection_id:
description:
- Connection identifier pertinent to GITHUB source provider
returned: on success
type: str
sample: "ocid1.connection.oc1..xxxxxxEXAMPLExxxxxx"
primary_build_source:
description:
- Name of the BuildSource in which the build_spec.yml file need to be located. If not specified, the 1st entry in the
BuildSource collection will be chosen as Primary.
returned: on success
type: str
sample: primary_build_source_example
steps:
description:
- The details about all the steps in a Build Stage
returned: on success
type: complex
contains:
name:
description:
- Name of the step.
returned: on success
type: str
sample: name_example
state:
description:
- State of the step.
returned: on success
type: str
sample: WAITING
time_started:
description:
- Time when the step started.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_finished:
description:
- Time when the step finished.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
exported_variables:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of exported variables
returned: on success
type: complex
contains:
name:
description:
- Name of the parameter (Case-sensitive).
returned: on success
type: str
sample: name_example
value:
description:
- value of the argument
returned: on success
type: str
sample: value_example
delivered_artifacts:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of Artifacts delivered via DeliverArtifactStage
returned: on success
type: complex
contains:
deploy_artifact_id:
description:
- The OCID of the deploy artifact definition
returned: on success
type: str
sample: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
output_artifact_name:
description:
- Name of the output artifact defined in the build spec
returned: on success
type: str
sample: output_artifact_name_example
artifact_type:
description:
- Type of Artifact Delivered
returned: on success
type: str
sample: GENERIC_ARTIFACT
artifact_repository_id:
description:
- The OCID of the artifact registry repository used by the DeliverArtifactStage
returned: on success
type: str
sample: "ocid1.artifactrepository.oc1..xxxxxxEXAMPLExxxxxx"
delivered_artifact_id:
description:
- The OCID of the artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: "ocid1.deliveredartifact.oc1..xxxxxxEXAMPLExxxxxx"
path:
description:
- Path of the repository where artifact was pushed
returned: on success
type: str
sample: path_example
version:
description:
- Version of the artifact pushed
returned: on success
type: str
sample: version_example
delivered_artifact_hash:
description:
- The Hash of the OCIR artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: delivered_artifact_hash_example
image_uri:
description:
- The imageUri of the OCIR artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: image_uri_example
artifact_override_parameters:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of artifact override arguments at the time of deployment.
returned: on success
type: complex
contains:
deploy_artifact_id:
description:
- The OCID of the artifact to which this parameter applies.
returned: on success
type: str
sample: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- Name of the parameter (case-sensitive).
returned: on success
type: str
sample: name_example
value:
description:
- Value of the parameter.
returned: on success
type: str
sample: value_example
deployment_id:
description:
- Identifier of the Deployment Trigerred.
returned: on success
type: str
sample: "ocid1.deployment.oc1..xxxxxxEXAMPLExxxxxx"
commit_info:
description:
- ""
returned: on success
type: complex
contains:
repository_url:
description:
- Repository URL
returned: on success
type: str
sample: repository_url_example
repository_branch:
description:
- Name of the repository branch.
returned: on success
type: str
sample: repository_branch_example
commit_hash:
description:
- Commit Hash pertinent to the repository URL and Branch specified.
returned: on success
type: str
sample: commit_hash_example
build_outputs:
description:
- ""
returned: on success
type: complex
contains:
exported_variables:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of exported variables
returned: on success
type: complex
contains:
name:
description:
- Name of the parameter (Case-sensitive).
returned: on success
type: str
sample: name_example
value:
description:
- value of the argument
returned: on success
type: str
sample: value_example
delivered_artifacts:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of Artifacts delivered via DeliverArtifactStage
returned: on success
type: complex
contains:
deploy_artifact_id:
description:
- The OCID of the deploy artifact definition
returned: on success
type: str
sample: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
output_artifact_name:
description:
- Name of the output artifact defined in the build spec
returned: on success
type: str
sample: output_artifact_name_example
artifact_type:
description:
- Type of Artifact Delivered
returned: on success
type: str
sample: GENERIC_ARTIFACT
artifact_repository_id:
description:
- The OCID of the artifact registry repository used by the DeliverArtifactStage
returned: on success
type: str
sample: "ocid1.artifactrepository.oc1..xxxxxxEXAMPLExxxxxx"
delivered_artifact_id:
description:
- The OCID of the artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: "ocid1.deliveredartifact.oc1..xxxxxxEXAMPLExxxxxx"
path:
description:
- Path of the repository where artifact was pushed
returned: on success
type: str
sample: path_example
version:
description:
- Version of the artifact pushed
returned: on success
type: str
sample: version_example
delivered_artifact_hash:
description:
- The Hash of the OCIR artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: delivered_artifact_hash_example
image_uri:
description:
- The imageUri of the OCIR artifact pushed by the DeliverArtifactStage
returned: on success
type: str
sample: image_uri_example
artifact_override_parameters:
description:
- ""
returned: on success
type: complex
contains:
items:
description:
- List of artifact override arguments at the time of deployment.
returned: on success
type: complex
contains:
deploy_artifact_id:
description:
- The OCID of the artifact to which this parameter applies.
returned: on success
type: str
sample: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- Name of the parameter (case-sensitive).
returned: on success
type: str
sample: name_example
value:
description:
- Value of the parameter.
returned: on success
type: str
sample: value_example
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "Usage of system tag keys. These predefined keys are scoped to namespaces. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\":
\\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"project_id": "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx",
"build_pipeline_id": "ocid1.buildpipeline.oc1..xxxxxxEXAMPLExxxxxx",
"build_run_source": {
"source_type": "MANUAL",
"trigger_id": "ocid1.trigger.oc1..xxxxxxEXAMPLExxxxxx",
"trigger_info": {
"display_name": "display_name_example",
"actions": [{
"type": "TRIGGER_BUILD_PIPELINE",
"filter": {
"trigger_source": "DEVOPS_CODE_REPOSITORY",
"events": [],
"include": {
"head_ref": "head_ref_example",
"base_ref": "base_ref_example"
}
},
"build_pipeline_id": "ocid1.buildpipeline.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"repository_id": "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
},
"build_run_arguments": {
"items": [{
"name": "name_example",
"value": "value_example"
}]
},
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "ACCEPTED",
"lifecycle_details": "lifecycle_details_example",
"build_run_progress": {
"time_started": "2013-10-20T19:20:30+01:00",
"time_finished": "2013-10-20T19:20:30+01:00",
"build_pipeline_stage_run_progress": {
"stage_display_name": "stage_display_name_example",
"build_pipeline_stage_type": "BUILD",
"build_pipeline_stage_id": "ocid1.buildpipelinestage.oc1..xxxxxxEXAMPLExxxxxx",
"time_started": "2013-10-20T19:20:30+01:00",
"time_finished": "2013-10-20T19:20:30+01:00",
"status": "ACCEPTED",
"build_pipeline_stage_predecessors": {
"items": [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"actual_build_runner_shape": "actual_build_runner_shape_example",
"actual_build_runner_shape_config": {
"ocpus": 1.2,
"memory_in_gbs": 1.2
},
"image": "OL7_X86_64_STANDARD_10",
"build_spec_file": "build_spec_file_example",
"stage_execution_timeout_in_seconds": 56,
"build_source_collection": {
"items": [{
"name": "name_example",
"connection_type": "GITHUB",
"repository_url": "repository_url_example",
"branch": "branch_example",
"repository_id": "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx",
"connection_id": "ocid1.connection.oc1..xxxxxxEXAMPLExxxxxx"
}]
},
"primary_build_source": "primary_build_source_example",
"steps": [{
"name": "name_example",
"state": "WAITING",
"time_started": "2013-10-20T19:20:30+01:00",
"time_finished": "2013-10-20T19:20:30+01:00"
}],
"exported_variables": {
"items": [{
"name": "name_example",
| |
<reponame>JackToppen/stem-cell-patterning_Python
import random as r
import csv
import cv2
import pickle
import math
import psutil
from backend import *
class Simulation:
""" This class makes sure any subclasses have the necessary
attributes to run a simulation.
"""
def __init__(self, name, output_path):
# set name and separator
self.name = name
self.separator = os.path.sep
# make the following paths
self.main_path = output_path + self.name + self.separator # path to main simulation directory
self.templates_path = os.path.abspath("templates") + self.separator # path to the YAML template directory
self.images_path = self.main_path + name + "_images" + self.separator # path to images output directory
self.values_path = self.main_path + name + "_values" + self.separator # path to CSV output directory
# hold the running number of agents and the step to begin at (updated by continuation mode)
self.number_agents = 0
self.current_step = 0
self.beginning_step = 1
# hold the names of the agent arrays and the names of any graphs (each agent is a node)
self.agent_array_names = list()
self.graph_names = list()
# store the runtimes of methods with @record_time decorator
self.method_times = dict()
"""
The following instance variables can be updated through template files located in the "templates"
directory. The values must be specified using YAML syntax.
(general.yaml)
1 # How many frames per second of the output video that collects all step images? Ex. 6
2 fps: 6
3
(simulation.py)
keys = template_params(paths.templates + "general.yaml")
self.fps = keys["fps"]
"""
# get values from general YAML file
keys = template_params(self.templates_path + "general.yaml") # read keys from general.yaml
self.num_to_start = keys["num_to_start"]
self.cuda = keys["cuda"]
self.end_step = keys["end_step"]
self.size = np.array(keys["size"])
self.output_values = keys["output_values"]
self.output_images = keys["output_images"]
self.record_initial_step = keys["record_initial_step"]
self.image_quality = keys["image_quality"]
self.video_quality = keys["video_quality"]
self.fps = keys["fps"]
def agent_initials(self):
""" Adds agents into the simulation and specify any values the agents should have.
The agent arrays will default to float64, 1-dim arrays of zeros. Use the
parameters to adjust the data type, 2-dim size, and initial conditions. The
"agent_type" keyword is used to apply initial conditions to the group of agents
marked with the same agent type in add_agents().
"""
# add agents to the simulation
self.add_agents(self.num_to_start)
# create the following agent arrays with initial conditions.
self.agent_array("locations", override=np.random.rand(self.number_agents, 3) * self.size)
self.agent_array("radii", func=lambda: 5)
def steps(self):
""" Specify any Simulation instance methods called before/during/after
the simulation, see example below.
Example:
self.before_steps()
for self.current_step in range(self.beginning_step, self.end_step + 1):
self.during_steps()
self.after_steps()
"""
# if True, record starting values/image for the simulation
if self.record_initial_step:
self.record_initials()
# iterate over all steps specified
for self.current_step in range(self.beginning_step, self.end_step + 1):
# records step run time and prints the current step and number of agents
self.info()
# save multiple forms of information about the simulation at the current step
self.step_image()
self.step_values()
self.temp()
self.data()
# ends the simulation by creating a video from all of the step images
self.create_video()
def add_agents(self, number, agent_type=None):
""" Adds number of agents to the simulation potentially with agent_type marker.
- number: the number of agents being added
- agent_type: string marker used to apply initial conditions to only these
agents
"""
# determine bounds for array slice and increase total agents
begin = self.number_agents
self.number_agents += number
# if an agent type is passed
if agent_type is not None:
# make sure holder for types exists
if not hasattr(self, "agent_types"):
self.agent_types = dict()
# set key value to tuple of the array slice
self.agent_types[agent_type] = (begin, self.number_agents)
def agent_array(self, array_name, agent_type=None, dtype=float, vector=None, func=None, override=None):
""" Adds an agent array to the simulation used to hold values for all agents.
- array_name: the name of the variable made for the agent array
- agent_type: string marker from add_agents()
- dtype: the data type of the array
- vector: if 2-dimensional, the length of the vector for each agent
- func: a function called for each index of the array to specify initial
parameters
- override: pass existing array instead of generating a new array
"""
# if using existing array
if override is not None:
# make sure array has correct length
if override.shape[0] != self.number_agents:
raise Exception("Length of override array does not match number of agents in simulation!")
# create instance variable and add array name to holder
else:
self.__dict__[array_name] = override
self.agent_array_names.append(array_name)
# otherwise check if instance variable exists and try to make new array
elif not hasattr(self, array_name):
# add array name to holder
self.agent_array_names.append(array_name)
# get the dimensions of the array
if vector is None:
size = self.number_agents # 1-dimensional array
else:
size = (self.number_agents, vector) # 2-dimensional array (1-dimensional of vectors)
# if using object types, make NoneType array, otherwise make array of zeros
if dtype == str or dtype == object:
self.__dict__[array_name] = np.empty(size, dtype=object)
else:
self.__dict__[array_name] = np.zeros(size, dtype=dtype)
# only apply initial condition if not NoneType
if func is not None:
# get bounds for applying initial conditions to array
if agent_type is None:
begin = 0
end = self.number_agents
else:
begin = self.agent_types[agent_type][0]
end = self.agent_types[agent_type][1]
# iterate through array applying function
for i in range(begin, end):
self.__dict__[array_name][i] = func()
def agent_graph(self, graph_name):
""" Adds graph to the simulation.
- graph_name: the name of the instance variable made for the graph
"""
# create instance variable for graph and add graph name to holder
self.__dict__[graph_name] = Graph(self.number_agents)
self.graph_names.append(graph_name)
def assign_bins(self, max_agents, distance):
""" Generalizes agent locations to a bins within lattice imposed on
the agent space, used for accelerating neighbor searches.
- max_agents: the current maximum number of agents that can fit
into a bin
- distance: the radius of search length
"""
# run until all agents have been put into bins
while True:
# calculate the dimensions of the bins array and the bins helper array, include extra bins for agents that
# may fall outside of the simulation space
bins_help_size = np.ceil(self.size / distance).astype(int) + 3
bins_size = np.append(bins_help_size, max_agents)
# create the bins arrays
bins_help = np.zeros(bins_help_size, dtype=int) # holds the number of agents in each bin
bins = np.zeros(bins_size, dtype=int) # holds the indices of each agent in a bin
# generalize the agent locations to bin indices and offset by 1 to prevent missing agents outside space
bin_locations = np.floor_divide(self.locations, distance).astype(int) + 1
# use JIT function from backend.py to speed up placement of agents
bins, bins_help = assign_bins_jit(self.number_agents, bin_locations, bins, bins_help, max_agents)
# break the loop if all agents were accounted for or revalue the maximum number of agents based on and run
# one more time
current_max_agents = np.amax(bins_help)
if max_agents >= current_max_agents:
break
else:
max_agents = current_max_agents * 2 # double to prevent continual updating
return bins, bins_help, bin_locations, max_agents
@record_time
def get_neighbors(self, graph_name, distance, clear=True):
""" Finds all neighbors, within fixed radius, for each each agent.
- graph_name: name of the instance variable pointing to the graph
- distance: the radius of search length
- clear: if removing existing edges, otherwise all edges are saved
"""
# get graph object reference and if desired, remove all existing edges in the graph
graph = self.__dict__[graph_name]
if clear:
graph.delete_edges(None)
# assign each of the agents to bins, updating the max agents in a bin (if necessary)
bins, bins_help, bin_locations, graph.max_agents = self.assign_bins(graph.max_agents, distance)
# run until all edges are accounted for
while True:
# get the total amount of edges able to be stored and make the following arrays
length = self.number_agents * graph.max_neighbors
edges = np.zeros((length, 2), dtype=int) # hold all edges
if_edge = np.zeros(length, dtype=bool) # say if each edge exists
edge_count = np.zeros(self.number_agents, dtype=int) # hold count of edges per agent
# if using CUDA GPU
if self.cuda:
# allow the following arrays to be passed to the GPU
edges = cuda.to_device(edges)
if_edge = cuda.to_device(if_edge)
edge_count = cuda.to_device(edge_count)
# specify threads-per-block and blocks-per-grid values
tpb = 72
bpg = math.ceil(self.number_agents / tpb)
# call the CUDA | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import telegram
from telegram.error import Unauthorized, TelegramError
import random
THRESHOLD_PLAYERS = 10
with open("api_key.txt", 'r') as f:
TOKEN = f.read().rstrip()
bot = telegram.Bot(token=TOKEN)
class Player:
def __init__(self, id, hand):
self.hand = hand
self.id = id
def get_hand(self):
return self.hand
def get_id(self):
return self.id
def remove_card(self, id):
if 0 <= id < len(self.hand):
return self.hand.pop(id)
return None
def get_formatted_hand(self):
text = "Your current hand:\n\n"
for i in range(len(self.hand)):
text += "(" + str(i) + ") " + str(self.hand[i]) + "\n"
return text
def add_card(self, c):
self.hand.append(c)
def insert_card(self, c, i):
self.hand.insert(i, c)
def set_hand(self, hand):
self.hand = hand
class Card:
"""
For a typical card, we have 0-9 as values. 10 implies a Skip,
11 implies a Reverse, and 12 implies a Draw Two. Each of these
has an associated color: 'R', 'Y', 'G', 'B'
If a card has a value of 13, it's Wild. If it has a value of
14, it's a Draw Four Wild.
"""
def __init__(self, value, color):
self.value = value
self.color = color
def get_color(self):
return self.color
def get_value(self):
return self.value
def check_valid_color(self):
return self.color in ['R', 'Y', 'G', 'B']
def check_valid_value(self):
return self.value >= 0
def is_wild(self):
if self.value == 13 or self.value == 14:
return True
return False
# Just for wilds.
def set_color(self, c):
if self.value == 13 or self.value == 14:
self.color = c
def __str__(self):
text = self.color
if self.value < 10:
text += str(self.value)
elif self.value == 10:
text += " Skip"
elif self.value == 11:
text += " Reverse"
elif self.value == 12:
text += " Draw Two"
elif self.value == 13:
text += " Wild"
elif self.value == 14:
text += " Wild Draw Four"
return text
class Deck:
def __init__(self, num_players):
self.deck = []
self.played = []
for i in range(0, 15):
for c in ['R', 'Y', 'G', 'B']:
if i < 10:
self.deck.append(Card(i, c))
self.deck.append(Card(i, c))
elif i < 13:
self.deck.append(Card(i, c))
elif i < 15:
self.deck.append(Card(i, ''))
# If we have more than 10 players, add more cards in proportion.
for i in range(0, max(0, num_players - THRESHOLD_PLAYERS)):
for j in range(0, 15):
for c in ['R', 'Y', 'G', 'B']:
if j < 10:
self.deck.append(Card(i, c))
self.deck.append(Card(i, c))
elif j < 13:
self.deck.append(Card(i, c))
else:
self.deck.append(Card(i, ''))
random.shuffle(self.deck)
def double_deck(self):
if len(self.deck) <= 0 and len(self.played) <= 0:
for i in range(0, 15):
for c in ['R', 'Y', 'G', 'B']:
if i < 10:
self.deck.append(Card(i, c))
self.deck.append(Card(i, c))
elif i < 13:
self.deck.append(Card(i, c))
elif i < 15:
self.deck.append(Card(i, ''))
random.shuffle(self.deck)
def reshuffle(self):
if len(self.deck) <= 0 < len(self.played):
self.deck = random.shuffle(self.played[:-1])
self.played = self.played[-1]
def draw_card(self):
if len(self.deck) <= 0 and len(self.played) <= 0:
self.double_deck()
if len(self.deck) <= 0:
self.reshuffle()
return self.deck.pop()
def draw_n_cards(self, n):
cards = []
for i in range(n):
cards.append(self.draw_card())
return cards
def get_topmost_card(self):
if len(self.played) > 0:
return self.played[-1]
return None
def draw_hand(self):
hand = []
for i in range(7):
hand.append(self.draw_card())
return hand
def play_card(self, c):
if c.is_wild():
self.played.append(c)
return
if c.check_valid_color() and c.check_valid_value():
self.played.append(c)
def check_valid_play(self, c):
top = self.get_topmost_card()
if top is None:
return True
if top.get_color() == c.get_color() or top.get_value() == c.get_value() or c.is_wild():
return True
return False
def return_card(self, c):
if c.check_valid_color() and c.check_valid_value():
self.deck.insert(0, c)
def set_wild(self, c):
if c.lower() in ['r', 'y', 'g', 'b'] and self.get_topmost_card().is_wild():
self.played[-1].set_color(c.upper())
class Game:
def __init__(self, chat_id, players):
self.turn = 0
self.players = {}
self.players_and_names = players
self.players_and_ready = {}
self.ready_to_play = False
self.deck = Deck(len(players))
self.waiting_for_wild = False
self.waiting_for_wild_id = ""
self.waiting_for_wild_name = ""
self.uno_pending = False
self.uno_pending_id = ""
self.skip_pending = False
self.dir = False
self.reversed = False
self.draw_fours_pending = 0
self.draw_twos_pending = 0
self.chat_id = chat_id
self.hpt_lap = -1
self.advanced_rules = False
self.waiting_for_seven = False
self.waiting_for_seven_id = ""
self.waiting_for_seven_name = ""
self.last_num_cards_drawn = 0
count = 0
for user_id, name in players.items():
self.send_message(name + " has been added to the game.\n")
self.players[user_id] = Player(count, self.deck.draw_hand())
self.players_and_ready[user_id] = False
count += 1
self.send_message("Everything has been set up. Waiting for players to /ready.\n")
def send_message(self, text):
try:
bot.send_message(chat_id=self.chat_id, text=text)
except TelegramError as e:
raise e
def set_hpt_lap(self, lap):
self.hpt_lap = lap
def get_hpt_lap(self):
return self.hpt_lap
def get_players_and_ready(self):
return self.players_and_ready
def set_ready_to_play(self, val):
if val != False and val != True:
self.send_message("Ready to play must be a Boolean value.")
return
self.ready_to_play = val
def get_ready_to_play(self):
return self.ready_to_play
def set_advanced_rules(self, val):
if val != False and val != True:
self.send_message("Advanced rules to play must be a Boolean value.")
return
self.advanced_rules = val
def is_advanced_rules(self):
return self.advanced_rules
def play_initial_card(self):
if self.deck.get_topmost_card() is None:
card = self.deck.draw_card()
while card.value >= 10:
self.deck.return_card(card)
card = self.deck.draw_card()
self.deck.play_card(card)
else:
self.send_message("The starting card has already been played.")
def check_for_win(self):
for p in self.players.keys():
if len(self.players.get(p, []).get_hand()) <= 0 and not self.waiting_for_wild:
return p
return None
def get_player_id_by_num(self, n):
for p in self.players.keys():
player = self.players[p]
if player.get_id() == n:
return p
return ""
def get_player_name_by_num(self, n):
for p in self.players.keys():
player = self.players[p]
if player.get_id() == n:
return self.players_and_names[p]
return ""
def get_player_by_num(self, n):
for p in self.players.keys():
player = self.players[p]
if player.get_id() == n:
return player
return None
def play_zero(self):
dir = -1 if self.reversed else 1
last_hand = None
if dir == 1:
iter_range = range(len(self.players))
else:
iter_range = range(len(self.players) - 1, -1, -1)
for i in iter_range:
player = self.get_player_by_num(i)
next_player = self.get_player_by_num((i + dir) % len(self.players))
if last_hand is None:
last_hand = player.get_hand()
player.set_hand(next_player.get_hand())
if i == len(self.players) - 1:
player.set_hand(last_hand)
def play_seven(self, user_id_1, user_id_2):
player_1 = self.players.get(user_id_1)
player_2 = self.players.get(user_id_2)
if self.players.get(user_id_1).get_id() != self.turn:
self.send_message("It is not currently your turn!")
return False
if not self.waiting_for_seven:
self.send_message("A seven is not on top of the played pile.")
return False
if user_id_1 != self.waiting_for_seven_id:
self.send_message("You cannot swap! Waiting for %s to swap." % self.waiting_for_seven_name)
return False
if player_1 is None:
self.send_message("You don't seem to exist!")
return False
if player_2 is None:
self.send_message("The player you chose doesn't seem to exist!")
return False
if user_id_1 == user_id_2:
self.send_message("You cannot swap hands with yourself!")
return False
temp_hand = player_1.get_hand()
player_1.set_hand(player_2.get_hand())
player_2.set_hand(temp_hand)
self.waiting_for_seven = False
self.waiting_for_seven_id = ""
self.waiting_for_seven_name = ""
return True
def play_card(self, id, card_id):
player = self.players.get(id)
if player is None:
self.send_message("You don't seem to exist!")
return False
if player.get_id() != self.turn:
self.send_message("It is not currently your turn!")
return False
if self.advanced_rules and self.waiting_for_seven:
self.send_message("You cannot play a card; waiting for %s to choose a player for swapping hands." %
self.waiting_for_seven_name)
return False
if self.waiting_for_wild:
self.send_message("You cannot play a card; waiting for %s to set the wild color." %
self.waiting_for_wild_name)
return False
if self.uno_pending:
self.send_message("You cannot play a card; Uno is pending.")
if self.waiting_for_seven:
self.send_message("You cannot play a card; a seven is pending.")
card = player.remove_card(card_id)
if card is None:
self.send_message("You cannot remove the card with this ID.")
return False
if not self.deck.check_valid_play(card):
self.send_message("This is not a valid card.")
player.insert_card(card, card_id)
return False
self.deck.play_card(card)
if self.advanced_rules and card.get_value() == 0:
self.play_zero()
self.send_message("Everyone's hands have rotated!")
if self.advanced_rules and card.get_value() == 7:
self.waiting_for_seven = True
self.waiting_for_seven_id = id
self.waiting_for_seven_name = self.players_and_names[id]
self.send_message("Now choose a player with whom you'll swap hands using /seven [player_num]!")
if card.is_wild():
self.waiting_for_wild = True
self.waiting_for_wild_id = id
self.waiting_for_wild_name = self.players_and_names[id]
self.send_message("Now choose a color using /wild R, Y, G, or B!")
if card.get_value() == 10:
self.skip_pending = True
return True
if card.get_value() == 11:
self.reversed = not self.reversed
self.send_message("The direction of the game has been reversed!")
if card.get_value() == 12:
self.draw_twos_pending += 1
if card.get_value() == 14:
self.draw_fours_pending += 1
return True
def is_uno_pending(self):
return self.uno_pending
def is_skip_pending(self):
return self.skip_pending
def is_seven_pending(self):
return self.waiting_for_seven
def set_skip_pending(self, val):
if val != False and val != True:
self.send_message("Skip pending must be a Boolean value.")
return
self.skip_pending = val
def check_uno_caller(self, id):
if self.players_and_names.get(id) is None:
self.send_message("You are not in the game!")
return -1
if id != self.uno_pending_id:
self.players[self.uno_pending_id].add_card(self.deck.draw_card())
self.uno_pending = False
self.uno_pending_id = ""
return | |
airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 3)
self.assertEqual(all_sessions['scheduled'], active_session)
self.assertEqual(all_sessions['cancelled'], stopped_session)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_active(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List ACTIVE sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'active'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 1)
self.assertEqual(all_sessions['scheduled'], active_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_completed(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List COMPLETED sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'completed'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 1)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_list_session_all(self):
try:
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '99h', 'query_count': 2 })", server=self.master)
active_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '50ms', 'query_count': 2 })", server=self.master)
completed_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT ADVISOR({'action': 'start', 'duration': '1600m', 'query_count': 2 })", server=self.master)
stopped_session = results['results'][0]['$1']['session']
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
results = self.run_cbq_query(query="SELECT airportname FROM `{0}` WHERE lower(city) = 'lyon' AND country = 'France'".format(self.bucket_name), server=self.master)
# Stop session
results = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'stop', 'session': '{0}'}})".format(stopped_session), server=self.master)
# List ALL sessions
results = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status':'all'}) as List", server=self.master)
all_sessions = dict()
for task in results['results'][0]['List']:
all_sessions[task['tasks_cache']['state']] = task['tasks_cache']['name']
self.assertEqual(len(all_sessions), 3)
self.assertEqual(all_sessions['scheduled'], active_session)
self.assertEqual(all_sessions['cancelled'], stopped_session)
self.assertEqual(all_sessions['completed'], completed_session)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_start_session_duration_value(self):
durations = ['3600000000000ns','3600000000us','3600000ms','3600s','60m', '1h']
try:
for duration in durations:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '{0}'}})".format(duration), server=self.master)
session = start['results'][0]['$1']['session']
active = self.run_cbq_query(query="SELECT ADVISOR({'action':'list'}) as List", server=self.master)
delay = active['results'][0]['List'][0]['tasks_cache']['delay']
self.assertEqual(delay, '1h0m0s')
abort = self.run_cbq_query(query="SELECT ADVISOR({{'action':'abort', 'session':'{0}'}}) as Abort".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_duration_completed(self):
durations = ['1800000000ns','1800000us','1800ms','1.8s','0.03m', '0.0005h']
try:
for duration in durations:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '{0}'}})".format(duration), server=self.master)
session = start['results'][0]['$1']['session']
self.sleep(3)
complete = self.run_cbq_query(query="SELECT ADVISOR({'action':'list','status':'completed'}) as List", server=self.master)
name = complete['results'][0]['List'][0]['tasks_cache']['name']
delay = complete['results'][0]['List'][0]['tasks_cache']['delay']
state = complete['results'][0]['List'][0]['tasks_cache']['state']
self.assertEqual(delay, '1.8s')
self.assertEqual(name, session)
self.assertEqual(state, "completed")
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_response_below(self):
responses = ['100000000ns','100000us','100ms','0.1s', '0.000027h']
query1=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'lyon' AND country = 'France'"
query2=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'grenoble' AND country = 'France'"
query3=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'nice' AND country = 'France'"
try:
for response in responses:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '60s', 'response': '{0}'}})".format(response), server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
results = self.run_cbq_query(query=query2, server=self.master)
results = self.run_cbq_query(query=query3, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
run_count = get['results'][0]['Get'][0][0]['recommended_indexes'][0]['statements'][0]['run_count']
self.assertEqual(run_count, 1)
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_response_above(self):
responses = ['9000000000000ns','9000000000us','9000000ms','9000s', '0.25h']
query1=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'lyon' AND country = 'France'"
query2=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'grenoble' AND country = 'France'"
query3=f"SELECT airportname FROM `{self.bucket_name}` WHERE type = 'airport' AND lower(city) = 'nice' AND country = 'France'"
try:
for response in responses:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '60s', 'response': '{0}'}})".format(response), server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
results = self.run_cbq_query(query=query2, server=self.master)
results = self.run_cbq_query(query=query3, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
advise = get['results'][0]['Get'][0]
self.assertEqual(advise, [[]])
purge = self.run_cbq_query(query="SELECT ADVISOR({{'action':'purge', 'session':'{0}'}}) as Purge".format(session), server=self.master)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_profile(self):
self.users = [{"id": "johnDoe", "name": "<NAME>", "password": "<PASSWORD>"}]
self.create_users()
grant = self.run_cbq_query(query="GRANT {0} to {1}".format("admin", self.users[0]['id']),server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query2=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query3=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action': 'start', 'duration': '180s', 'profile': '{0}'}})".format(self.users[0]['id']), server=self.master)
session = start['results'][0]['$1']['session']
# Run query as other user
# results = self.curl_with_roles(query1)
# results = self.curl_with_roles(query1)
results = self.run_cbq_query(query=query1, username=self.users[0]['id'], password=self.users[0]['password'], server=self.master)
results = self.run_cbq_query(query=query1, username=self.users[0]['id'], password=self.users[0]['password'], server=self.master)
# run query as current user
results = self.run_cbq_query(query=query2, server=self.master)
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_all(self):
self.users = [{"id": "joaoDoe", "name": "<NAME>", "password": "<PASSWORD>"}]
self.create_users()
user_id = self.users[0]['id']
user_pwd = self.users[0]['password']
grant = self.run_cbq_query(query=f"GRANT admin to {user_id}",server=self.master)
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
query2=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "grenoble" AND country = "France"'
query3=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "nice" AND country = "France"'
try:
start = self.run_cbq_query(query="SELECT ADVISOR({{'action':'start', 'duration':'40m', 'profile': '{0}', 'query_count':5, 'response':'50ms'}})".format(self.users[0]['id']), server=self.master)
session = start['results'][0]['$1']['session']
# Run query as other user
results = self.run_cbq_query(query=query1, username=user_id, password=<PASSWORD>, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=user_pwd, server=self.master)
# Run query as current user
results = self.run_cbq_query(query=query2, server=self.master)
# Run query as other user
results = self.run_cbq_query(query=query1, username=user_id, password=<PASSWORD>, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=<PASSWORD>, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=<PASSWORD>, server=self.master)
results = self.run_cbq_query(query=query1, username=user_id, password=<PASSWORD>, server=self.master)
# Stop and get session
stop = self.run_cbq_query(query="SELECT ADVISOR({{'action':'stop', 'session':'{0}'}}) as Stop".format(session), server=self.master)
get = self.run_cbq_query(query="SELECT ADVISOR({{'action':'get', 'session':'{0}'}}) as Get".format(session), server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
for statement in index['statements']:
self.assertEqual(statement['statement'], query1)
self.assertEqual(statement['run_count'], 5)
# Purge and list session
purge = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'purge', 'session':'{session}'}}) as Get", server=self.master)
list_all = self.run_cbq_query(query="SELECT ADVISOR({'action':'list', 'status': 'all'}) as List", server=self.master)
self.assertEqual(list_all['results'][0]['List'],[])
except Exception as e:
self.log.error("Advisor session failed: {0}".format(e))
self.fail()
def test_session_cbo(self):
advise_index = "CREATE INDEX adv_lower_city_country_type ON `travel-sample`(lower(`city`),`country`) WHERE `type` = 'airport'"
advise_stats = "UPDATE STATISTICS FOR `travel-sample`(lower(`city`), `country`, `type`)"
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
# update stats to ensure CBO is used
stats = self.run_cbq_query(query=f"update statistics for `{self.bucket_name}`(type)", server=self.master)
try:
start = self.run_cbq_query(query="SELECT ADVISOR({'action':'start', 'duration':'40m'})", server=self.master)
session = start['results'][0]['$1']['session']
results = self.run_cbq_query(query=query1, server=self.master)
stop = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'stop', 'session':'{session}'}}) as Stop", server=self.master)
get = self.run_cbq_query(query=f"SELECT ADVISOR({{'action':'get', 'session':'{session}'}}) as Get", server=self.master)
# Check advise
for index in get['results'][0]['Get'][0][0]['recommended_indexes']:
self.assertEqual(index['index'], advise_index)
self.assertEqual(index['update_statistics'], advise_stats)
except Exception as e:
self.log.error(f"Advisor session failed: {e}")
self.fail()
def test_session_query_txn(self):
query1=f'SELECT airportname FROM `{self.bucket_name}` WHERE type = "airport" AND lower(city) = "lyon" AND country = "France"'
close_txn = ['ROLLBACK WORK', 'COMMIT']
try:
for rollback_or_commit in | |
R.reduce_by(sum_values, 0)
sum_by_type = reduce_to_sums_by(by_type)
eq(R.into(
{},
R.compose(sum_by_type, R.map(R.adjust(R.multiply(10), 1))),
sum_input),
{"A": 800, "B": 800, "C": 500})
def describe_reduced():
@pytest.fixture
def stop_if_gte_10():
def _fn(acc, v):
result = acc + v
if result >= 10:
result = R.reduced(result)
return result
return _fn
def it_wraps_a_value():
v = {}
eq(R.reduced(v)._transducer_value, v)
def it_flags_value_as_reduced():
eq(R.reduced({})._transducer_reduced, True)
def it_short_circuits_reduce(stop_if_gte_10):
eq(R.reduce(stop_if_gte_10, 0, [1, 2, 3, 4, 5]), 10)
def describe_reduce_right():
@pytest.fixture
def avg():
return lambda a, b: (a + b) / 2
def it_folds_lists_in_the_right_order():
eq(R.reduce_right(lambda a, b: a + b, "", ["a", "b", "c", "d"]), "abcd")
def it_folds_subtract_over_arrays_in_the_right_order():
eq(R.reduce_right(lambda a, b: a - b, 0, [1, 2, 3, 4]), -2)
def it_folds_simple_functions_over_arrays_with_the_supplied_accumulator(avg):
eq(R.reduce_right(avg, 54, [12, 4, 10, 6]), 12)
def it_returns_the_accumulator_for_an_empty_array(avg):
eq(R.reduce_right(avg, 0, []), 0)
def it_is_curried(avg):
something = R.reduce_right(avg, 54)
rcat = R.reduce_right(R.concat, "")
eq(something([12, 4, 10, 6]), 12)
eq(rcat(["1", "2", "3", "4"]), "1234")
def it_correctly_reports_the_arity_of_curried_versions(avg):
something = R.reduce_right(avg, 0)
eq(get_arity(something), 1)
def describe_aperture():
@pytest.fixture
def seven_ls():
return [1, 2, 3, 4, 5, 6, 7]
def it_creates_a_list_of_n_tuples_from_a_list(seven_ls):
eq(R.aperture(1, seven_ls), [[1], [2], [3], [4], [5], [6], [7]])
eq(R.aperture(2, seven_ls), [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]])
eq(R.aperture(3, seven_ls), [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7]])
eq(R.aperture(4, [1, 2, 3, 4]), [[1, 2, 3, 4]])
def it_returns_an_empty_list_when_n_gt_list_length(seven_ls):
eq(R.aperture(6, [1, 2, 3]), [])
eq(R.aperture(1, []), [])
def it_is_curried(seven_ls):
pairwise = R.aperture(2)
eq(pairwise(seven_ls), [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]])
def it_can_act_as_a_transducer(seven_ls):
eq(R.into([], R.aperture(2), seven_ls), [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]])
def describe_append():
def it_adds_the_element_to_the_end_of_the_list():
eq(R.append("z", ["x", "y"]), ["x", "y", "z"])
eq(R.append(["a", "z"], ["x", "y"]), ["x", "y", ["a", "z"]])
def it_works_on_empty_list():
eq(R.append(1, []), [1])
def it_is_curried():
eq(inspect.isfunction(R.append(4)), True)
eq(R.append(1)([4, 3, 2]), [4, 3, 2, 1])
def describe_chain():
@pytest.fixture
def add1():
return lambda x: [x + 1]
@pytest.fixture
def dec():
return lambda x: [x - 1]
@pytest.fixture
def times2():
return lambda x: [x * 2]
def it_maps_a_function_over_a_nested_list_and_returns_the_shallow_flattened_result(
times2):
eq(R.chain(times2, [1, 2, 3, 1, 0, 10, -3, 5, 7]), [2, 4, 6, 2, 0, 20, -6, 10, 14])
eq(R.chain(times2, [1, 2, 3]), [2, 4, 6])
def it_does_not_flatten_recursively():
f = lambda xs: [xs[0]] if len(xs) else []
eq(R.chain(f, [[1], [[2], 100], [], [3, [4]]]), [1, [2], 3])
def it_maps_a_function_into_a_shallow_flat_result(into_array, times2):
eq(into_array(R.chain(times2), [1, 2, 3, 4]), [2, 4, 6, 8])
def it_interprets_function_as_a_monad():
h = lambda r: r * 2
f = lambda a: lambda r: r + a
bound = R.chain(f, h)
# // (>>=) :: (r -> a) -> (a -> r -> b) -> (r -> b)
# // h >>= f = \w -> f (h w) w
eq(bound(10), (10 * 2) + 10)
eq(R.chain(R.append, R.head)([1, 2, 3]), [1, 2, 3, 1])
def it_dispatches_to_objects_that_implement_chain(add1):
class MappingWithChain(collections.UserDict):
def chain(self, fn):
return fn(self["x"])
obj = MappingWithChain(x=100)
eq(R.chain(add1, obj), [101])
def it_dispatches_to_transformer_objects(add1):
eq(_is_transformer(R.chain(add1, list_xf)), True)
def it_composes(times2, dec):
mdouble = R.chain(times2)
mdec = R.chain(dec)
eq(mdec(mdouble([10, 20, 30])), [19, 39, 59])
def it_can_compose_transducer_style(times2, dec, into_array):
mdouble = R.chain(times2)
mdec = R.chain(dec)
xcomp = R.compose(mdec, mdouble)
eq(into_array(xcomp, [10, 20, 30]), [18, 38, 58])
def it_is_curried(add1):
flat_inc = R.chain(add1)
eq(flat_inc([1, 2, 3, 4, 5, 6]), [2, 3, 4, 5, 6, 7])
def it_correctly_reports_the_arity_of_curried_versions(add1):
inc = R.chain(add1)
eq(get_arity(inc), 1)
def describe_contains():
def it_returns_true_if_an_element_is_in_a_list():
eq(R.contains(7, [1, 2, 3, 9, 8, 7, 100, 200, 300]), True)
def it_returns_false_if_an_element_is_not_in_a_list():
eq(R.contains(99, [1, 2, 3, 9, 8, 7, 100, 200, 300]), False)
def it_returns_false_for_the_empty_list():
eq(R.contains(1, []), False)
def it_has_r_equals_semantics(just):
eq(R.contains(float("nan"), [float("nan")]), True)
eq(R.contains(just([42]), [just([42])]), True)
def it_is_curried():
eq(inspect.isfunction(R.contains(7)), True)
eq(R.contains(7)([1, 2, 3]), False)
eq(R.contains(7)([1, 2, 7, 3]), True)
def it_is_curried_like_a_binary_operator_that_accepts_an_initial_placeholdern():
is_digit = R.contains(R.__, ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"])
eq(inspect.isfunction(is_digit), True)
eq(is_digit("0"), True)
eq(is_digit("1"), True)
eq(is_digit("x"), False)
def describe_drop():
def it_skips_the_first_n_elements_from_a_list_returning_the_remainder():
eq(R.drop(3, ["a", "b", "c", "d", "e", "f", "g"]), ["d", "e", "f", "g"])
def it_returns_an_empty_array_if_n_is_too_large():
eq(R.drop(20, ["a", "b", "c", "d", "e", "f", "g"]), [])
def it_returns_an_equivalent_list_if_n_is_lte_0():
eq(R.drop(0, [1, 2, 3]), [1, 2, 3])
eq(R.drop(-1, [1, 2, 3]), [1, 2, 3])
eq(R.drop(float("-inf"), [1, 2, 3]), [1, 2, 3])
def it_never_returns_the_input_array(not_equal):
xs = [1, 2, 3]
eq(not_equal(id(R.drop(0, xs)), id(xs)), True)
eq(not_equal(id(R.drop(-1, xs)), id(xs)), True)
def it_can_operate_on_strings():
eq(R.drop(3, "Ramda"), "da")
eq(R.drop(4, "Ramda"), "a")
eq(R.drop(5, "Ramda"), "")
eq(R.drop(6, "Ramda"), "")
def it_dispatches_when_given_a_transformer_in_list_position():
drop3 = R.drop(3)
eq(drop3(list_xf).n, 3)
eq(drop3(list_xf).xf, list_xf)
def it_can_act_as_a_transducer(into_array):
drop3 = R.drop(3)
eq(into_array(drop3, [1, 2, 3, 4]), [4])
eq(into_array(drop3, [1, 2, 3]), [])
def describe_drop_last():
def it_skips_the_last_n_elements_from_a_list_returning_the_remainder():
eq(R.drop_last(3, ["a", "b", "c", "d", "e", "f", "g"]), ["a", "b", "c", "d"])
def it_returns_an_empty_array_if_n_is_too_large():
eq(R.drop_last(20, ["a", "b", "c", "d", "e", "f", "g"]), [])
def it_returns_an_equivalent_list_if_n_is_lte_0():
eq(R.drop_last(0, [1, 2, 3]), [1, 2, 3])
eq(R.drop_last(-1, [1, 2, 3]), [1, 2, 3])
eq(R.drop_last(float("-inf"), [1, 2, 3]), [1, 2, 3])
def it_never_returns_the_input_array(not_equal):
xs = [1, 2, 3]
eq(not_equal(R.drop_last(0, xs), xs), True)
eq(not_equal(R.drop_last(-1, xs), xs), True)
def it_can_operate_on_strings():
eq(R.drop_last(3, "Ramda"), "Ra")
def it_is_curried():
drop_last2 = R.drop_last(2)
eq(drop_last2(["a", "b", "c", "d", "e"]), ["a", "b", "c"])
eq(drop_last2(["x", "y", "z"]), ["x"])
def it_dispatches_when_given_a_transformer_in_list_position():
drop_last2 = R.drop_last(2)
eq(drop_last2(list_xf).n, 2)
eq(drop_last2(list_xf).full, False)
eq(drop_last2(list_xf).xf, list_xf)
def it_can_act_as_a_transducer(into_array):
drop_last2 = R.drop_last(2)
eq(into_array(drop_last2, [1, 2, 3, 4]), [1, 2])
eq(into_array(drop_last2, []), [])
def describe_drop_last_while():
def it_skips_elements_while_the_function_reports_true():
eq(R.drop_last_while(lambda x: x >= 5, [1, 3, 5, 7, 9]), [1, 3])
def it_returns_an_empty_list_for_an_empty_list():
eq(R.drop_last_while(lambda _: False, []), [])
eq(R.drop_last_while(lambda _: False, []), [])
def it_starts_at_the_right_arg_and_acknowledges_none():
sublist = R.drop_last_while(lambda x: x is not None, [1, 3, None, 5, 7])
eq(len(sublist), 3)
eq(sublist[0], 1)
eq(sublist[1], 3)
eq(sublist[2], None)
def it_is_curried():
drop_gt_7 = R.drop_last_while(lambda x: x > 7)
eq(drop_gt_7([1, 3, 5, 7, 9]), [1, 3, 5, 7])
eq(drop_gt_7([1, 3, 5]), [1, 3, 5])
def it_can_act_as_a_transducer(into_array):
drop_lt_7 = R.drop_last_while(lambda x: x < 7)
eq(into_array(drop_lt_7, [1, 3, 5, 7, 9, 1, 2]), [1, 3, 5, 7, 9])
eq(into_array(drop_lt_7, [1, 3, 5]), [])
def describe_drop_repeats_with():
@pytest.fixture
def objs():
return [{"i": 1}, {"i": 2}, {"i": 3}, {"i": 4}, {"i": 5}, {"i": 3}]
@pytest.fixture
def objs2():
return[
{"i": 1}, {"i": 1}, {"i": 1}, {"i": 2}, {"i": 3},
{"i": 3}, {"i": 4}, {"i": 4}, {"i": 5}, {"i": 3}]
@pytest.fixture
def eq_i():
return R.eq_props("i")
def it_removes_repeated_elements_based_on_predicate(objs, objs2, eq_i):
eq(R.drop_repeats_with(eq_i, objs2), objs)
eq(R.drop_repeats_with(eq_i, objs), objs)
def it_keeps_elements_from_the_left(eq_i):
eq(
R.drop_repeats_with(
eq_i,
[{"i": 1, "n": 1}, {"i": 1, "n": 2}, {"i": 1, "n": 3},
{"i": 4, "n": 1}, {"i": 4, "n": 2}]),
[{"i": 1, "n": 1}, {"i": 4, "n": 1}])
def it_returns_an_empty_array_for_an_empty_array(eq_i):
eq(R.drop_repeats_with(eq_i, []), [])
def it_is_curried(objs, objs2, eq_i):
eq(inspect.isfunction(R.drop_repeats_with(eq_i)), True)
eq(R.drop_repeats_with(eq_i)(objs), objs)
eq(R.drop_repeats_with(eq_i)(objs2), objs)
def it_can_act_as_a_transducer(eq_i, objs, objs2):
eq(R.into([], R.drop_repeats_with(eq_i), objs2), objs)
def describe_drop_repeats():
@pytest.fixture
def objs():
return [1, 2, 3, 4, 5, 3, 2]
@pytest.fixture
def objs2():
return [1, 2, 2, 2, 3, 4, 4, 5, 5, 3, 2, 2]
def it_removes_repeated_elements(objs, objs2):
eq(R.drop_repeats(objs2), objs)
eq(R.drop_repeats(objs), objs)
def it_returns_an_empty_array_for_an_empty_array():
eq(R.drop_repeats([]), [])
def it_can_act_as_a_transducer(objs2, objs):
eq(R.into([], R.drop_repeats, objs2), objs)
def it_has_equals_semantics(just):
# eq(get_arity(R.drop_repeats([0, -0])), 2)
# eq(get_arity(R.drop_repeats([-0, 0])), 2)
eq(len(R.drop_repeats([float("nan"), float("nan")])), 1)
eq(len(R.drop_repeats([just([42]), just([42])])), 1)
def describe_drop_while():
def it_skips_elements_while_the_function_reports_true():
eq(R.drop_while(lambda x: x < 5, [1, 3, 5, 7, 9]), [5, 7, 9])
def it_returns_an_empty_list_for_an_empty_list():
eq(R.drop_while(lambda _: True, []), [])
eq(R.drop_while(lambda _: True, []), [])
def it_starts_at_the_right_arg_and_acknowledges_none():
sublist = R.drop_while(lambda x: x is not None, [1, 3, None, 5, 7])
eq(len(sublist), 3)
eq(sublist[0], None)
eq(sublist[1], 5)
eq(sublist[2], 7)
def it_is_curried():
drop_lt_7 = R.drop_while(lambda x: x < 7)
eq(drop_lt_7([1, 3, 5, 7, 9]), [7, 9])
eq(drop_lt_7([2, 4, 6, 8, 10]), [8, 10])
def it_can_act_as_a_transducer(into_array):
eq(into_array(R.drop_while(lambda x: x < 7), [1, 3, 5, 7, 9]), [7, 9])
def describe_ends_with():
def it_should_return_true_when_a_string_ends_with_the_provided_value():
eq(R.ends_with("c", "abc"), True)
def it_should_return_true_when_a_long_string_ends_with_the_provided_value():
eq(R.ends_with("ology", "astrology"), True)
def it_should_return_false_when_a_string_does_not_end_with_the_provided_value():
eq(R.ends_with("b", "abc"), False)
def it_should_return_false_when_a_long_string_does_not_end_with_the_provided_value():
eq(R.ends_with("olog", "astrology"), False)
def it_should_return_true_when_an_array_ends_with_the_provided_value():
eq(R.ends_with(["c"], ["a", "b", "c"]), True)
def it_should_return_true_when_an_array_ends_with_the_provided_values():
eq(R.ends_with(["b", "c"], ["a", "b", "c"]), True)
def it_should_return_false_when_an_array_does_not_end_with_the_provided_value():
eq(R.ends_with(["b"], ["a", "b", "c"]), False)
def it_should_return_false_when_an_array_does_not_end_with_the_provided_values():
eq(R.ends_with(["a", "b"], ["a", "b", "c"]), False)
def describe_find():
obj1 = {"x": 100}
obj2 = {"x": 200}
a = [11, 10, 9, "cow", obj1, 8, 7, 100, 200, 300, obj2, 4, 3, 2, 1, 0]
def it_returns_the_first_element_that_satisfies_the_predicate(
even, is_str, gt100, x_gt100):
eq(R.find(even, a), 10)
eq(R.find(gt100, a), 200)
eq(R.find(is_str, a), "cow")
eq(R.find(x_gt100, a), obj2)
def it_transduces_the_first_element_that_satisfies_the_predicate_into_an_array(
into_array, even, gt100, is_str, x_gt100):
eq(into_array(R.find(even), a), | |
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
import numpy
from pero.enums import *
from pero.properties import *
from pero import Frame
from pero import MarkerLegend
from . series import Series
from . import utils
class Rectangles(Series):
"""
Abstract base class for various types of rectangular data series. To specify
the position of labels and active points used for tooltips the 'anchor'
property must be set.
Any property of the rectangle line and fill can be dynamic, expecting the
raw data point as a 'source'. By this, the line and fill can be set
independently for each data point. However, be sure that all dynamic
properties return reasonable value for UNDEF to be used for legend. If raw
'data' property is not specified a sequence of internal raw data is created
as ((x,y),) coordinates according to the 'anchor' property.
Properties:
data: tuple, list, numpy.ndarray or UNDEF
Specifies the sequence of the raw data points.
anchor: str
Specifies the position within rectangles to be used to display
labels and tooltip as any item from the pero.POSITION_LRTBC enum.
x_offset: tuple, list, numpy.ndarray or UNDEF
Specifies the sequence of the x-offsets in data units.
y_offset: tuple, list, numpy.ndarray or UNDEF
Specifies the sequence of the y-offsets in data units.
spacing: int, float or tuple
Specifies the absolute spacing added around each rectangle in
device units, as a single value or values for individual sides
starting from top.
width_limit: int or float
Specifies the minimum display width of the rectangle.
height_limit: int or float
Specifies the minimum display height of the rectangle.
line properties:
Includes pero.LineProperties to specify the outline.
fill properties:
Includes pero.FillProperties to specify the fill.
"""
data = SequenceProperty(UNDEF, dynamic=False)
anchor = EnumProperty(POS_CENTER, enum=POSITION_LRTBC, dynamic=False)
x_offset = Property(UNDEF, dynamic=False)
y_offset = Property(UNDEF, dynamic=False)
spacing = QuadProperty(0, dynamic=False)
width_limit = NumProperty(1, dynamic=False)
height_limit = NumProperty(1, dynamic=False)
line = Include(LineProperties)
fill = Include(FillProperties)
def __init__(self, **overrides):
"""Initializes a new instance of the Rectangles series base."""
# init legend
if 'legend' not in overrides:
overrides['legend'] = MarkerLegend(
text = lambda d: d.title,
show_marker = True,
show_line = False,
marker = MARKER_SQUARE,
marker_line_color = lambda d: d.color.darker(0.2),
marker_fill_color = lambda d: d.color)
# init base
super().__init__(**overrides)
# init buffers
self._left_data = []
self._right_data = []
self._top_data = []
self._bottom_data = []
self._x_data = []
self._y_data = []
self._raw_data = []
self._limits = None
# extract data
self.extract_data()
# lock properties
self.lock_property('data')
self.lock_property('anchor')
self.lock_property('x_offset')
self.lock_property('y_offset')
self.lock_property('x', raise_error=False)
self.lock_property('y', raise_error=False)
self.lock_property('width', raise_error=False)
self.lock_property('height', raise_error=False)
self.lock_property('left', raise_error=False)
self.lock_property('right', raise_error=False)
self.lock_property('top', raise_error=False)
self.lock_property('bottom', raise_error=False)
def get_limits(self, x_range=None, y_range=None, exact=False):
"""Gets current data limits using whole range or specified crops."""
# check data
if self._limits is None:
return None
# init limits
limits = self._limits
# apply crop
if x_range or y_range:
limits_top_left = utils.calc_limits_unsorted(
data=(self._left_data, self._top_data),
crops=(x_range, y_range),
extend=False)
limits_bottom_right = utils.calc_limits_unsorted(
data=(self._right_data, self._bottom_data),
crops=(x_range, y_range),
extend=False)
limits = utils.combine_limits(limits_top_left, limits_bottom_right)
# finalize limits
return self.finalize_limits(limits, exact)
def get_labels(self, canvas=None, source=UNDEF, **overrides):
"""Gets series labels."""
return self.prepare_labels(self._x_data, self._y_data, self._raw_data)
def get_tooltip(self, x, y, limit):
"""Gets nearest data point tooltip."""
return self.prepare_tooltip(self._x_data, self._y_data, self._raw_data, x, y, limit)
def extract_data(self):
"""Extracts coordinates from raw data."""
# reset buffers
self._left_data = []
self._right_data = []
self._top_data = []
self._bottom_data = []
self._x_data = []
self._y_data = []
self._raw_data = []
self._limits = None
# get data size
size = utils.extract_data_size(self, 'data', 'x', 'y', 'width', 'height', 'left', 'right', 'top', 'bottom')
# extract data
self._left_data, left_raw = utils.extract_data(self, 'left', self.data, size, self.x_mapper)
self._right_data, right_raw = utils.extract_data(self, 'right', self.data, size, self.x_mapper)
self._top_data, top_raw = utils.extract_data(self, 'top', self.data, size, self.y_mapper)
self._bottom_data, bottom_raw = utils.extract_data(self, 'bottom', self.data, size, self.y_mapper)
self._x_data, x_raw = utils.extract_data(self, 'x', self.data, size, self.x_mapper)
self._y_data, y_raw = utils.extract_data(self, 'y', self.data, size, self.y_mapper)
width_data, width_raw = utils.extract_data(self, 'width', self.data, size)
height_data, height_raw = utils.extract_data(self, 'height', self.data, size)
# calc missing data
if not self.has_property('x'):
self._x_data = 0.5*(self._left_data + self._right_data)
x_raw = self._x_data
if not self.has_property('y'):
self._y_data = 0.5*(self._top_data + self._bottom_data)
y_raw = self._y_data
if not self.has_property('left'):
self._left_data = self._x_data - 0.5*width_data
left_raw = self._left_data
if not self.has_property('right'):
self._right_data = self._x_data + 0.5*width_data
right_raw = self._right_data
if not self.has_property('top'):
self._top_data = self._y_data + 0.5*height_data
top_raw = self._top_data
if not self.has_property('bottom'):
self._bottom_data = self._y_data - 0.5*height_data
bottom_raw = self._bottom_data
# set anchor and raw data
if self.anchor == POS_LEFT:
self._x_data = self._left_data
self._raw_data = numpy.array([left_raw, y_raw]).T
elif self.anchor == POS_RIGHT:
self._x_data = self._right_data
self._raw_data = numpy.array([right_raw, y_raw]).T
elif self.anchor == POS_TOP:
self._y_data = self._top_data
self._raw_data = numpy.array([x_raw, top_raw]).T
elif self.anchor == POS_BOTTOM:
self._y_data = self._bottom_data
self._raw_data = numpy.array([x_raw, bottom_raw]).T
else:
self._raw_data = numpy.array([x_raw, y_raw]).T
# set raw data
if self.data is not UNDEF:
self._raw_data = numpy.array(self.data)
# apply offset
if self.x_offset is not UNDEF:
if isinstance(self.x_offset, (tuple, list)):
self.x_offset = numpy.array(self.x_offset)
self._x_data = self._x_data + self.x_offset
self._left_data = self._left_data + self.x_offset
self._right_data = self._right_data + self.x_offset
if self.y_offset is not UNDEF:
if isinstance(self.y_offset, (tuple, list)):
self.y_offset = numpy.array(self.y_offset)
self._y_data = self._y_data + self.y_offset
self._top_data = self._top_data + self.y_offset
self._bottom_data = self._bottom_data + self.y_offset
# init full limits
if len(self._raw_data) > 0:
self._limits = (
(min(self._left_data.min(), self._right_data.min()),
max(self._left_data.max(), self._right_data.max())),
(min(self._bottom_data.min(), self._top_data.min()),
max(self._bottom_data.max(), self._top_data.max())))
def draw(self, canvas, source=UNDEF, **overrides):
"""Uses given canvas to draw the series."""
# check if visible
if not self.is_visible(source, overrides):
return
# get properties
tag = self.get_property('tag', source, overrides)
x_scale = self.get_property('x_scale', source, overrides)
y_scale = self.get_property('y_scale', source, overrides)
spacing = self.get_property('spacing', source, overrides)
width_limit = self.get_property('width_limit', source, overrides)
height_limit = self.get_property('height_limit', source, overrides)
frame = self.get_property('frame', source, overrides)
color = self.get_property('color', source, overrides)
# get data
left_data = self._left_data
right_data = self._right_data
top_data = self._top_data
bottom_data = self._bottom_data
raw_data = self._raw_data
spacing = spacing or (0, 0, 0, 0)
width_limit = width_limit or 0
height_limit = height_limit or 0
# check data
if len(left_data) == 0:
return
# scale coords
left_data = x_scale.scale(left_data)
right_data = x_scale.scale(right_data)
top_data = y_scale.scale(top_data)
bottom_data = y_scale.scale(bottom_data)
# get default colors
default_line_color = color.darker(0.2)
default_fill_color = color
# start drawing group
with canvas.group(tag, "series"):
# draw rectangles
for i, data in enumerate(raw_data):
# get coords
x = left_data[i] + spacing[3]
y = top_data[i] + spacing[0]
width = right_data[i] - spacing[1] - x
height = bottom_data[i] - spacing[2] - y
if width < 0:
width *= -1
x -= width
if height < 0:
height *= -1
y -= height
# apply clipping
bbox = Frame(x, y, width, height)
if not frame.overlaps(bbox):
continue
# set pen and brush
canvas.set_pen_by(self, source=data, overrides=overrides)
canvas.set_brush_by(self, source=data, overrides=overrides)
# set default colors
if self.get_property('line_color', source=data, overrides=overrides) is UNDEF:
canvas.line_color = default_line_color
if self.get_property('fill_color', source=data, overrides=overrides) is UNDEF:
canvas.fill_color = default_fill_color
# draw rectangle
canvas.draw_rect(x, y, max(width_limit, width), max(height_limit, height))
class Rects(Rectangles):
"""
This type of series plots raw data as individual rectangles defined by the
center 'x' and 'y' coordinates, 'width' and 'height'. Data can be provided
either directly by specifying the 'x', 'y', 'width' and 'height' properties
or as a sequence of raw 'data' points together with 'x', 'y', 'width' and
'height' coordinates selectors. All the coordinates are expected to be in
real data units.
Properties:
x: int, float, tuple, list, numpy.ndarray, callable, None or UNDEF
Specifies the sequence of center x-coordinates in real data units or
a function to retrieve the coordinates from the raw data.
y: int, float, tuple, list, numpy.ndarray, callable, None or UNDEF
Specifies the sequence of center y-coordinates in real data units or
a function to retrieve the coordinates from the | |
<reponame>ayesha-omarali/sentry<gh_stars>0
"""
sentry.coreapi
~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# TODO: We should make the API a class, and UDP/HTTP just inherit from it
# This will make it so we can more easily control logging with various
# metadata (rather than generic log messages which aren't useful).
from __future__ import absolute_import, print_function
import base64
import jsonschema
import logging
import re
import six
import zlib
from collections import MutableMapping
from django.core.exceptions import SuspiciousOperation
from django.utils.crypto import constant_time_compare
from gzip import GzipFile
from six import BytesIO
from time import time
from sentry import filters
from sentry.cache import default_cache
from sentry.interfaces.base import get_interface
from sentry.event_manager import EventManager
from sentry.models import ProjectKey
from sentry.tasks.store import preprocess_event, \
preprocess_event_from_reprocessing
from sentry.utils import json
from sentry.utils.auth import parse_auth_header
from sentry.utils.http import origin_from_request
from sentry.utils.data_filters import is_valid_ip, \
is_valid_release, is_valid_error_message, FilterStatKeys
from sentry.utils.strings import decompress
from sentry.utils.canonical import CANONICAL_TYPES
_dist_re = re.compile(r'^[a-zA-Z0-9_.-]+$')
class APIError(Exception):
http_status = 400
msg = 'Invalid request'
name = None
def __init__(self, msg=None, name=None):
if msg:
self.msg = msg
if self.name:
self.name = name
def __str__(self):
return self.msg or ''
class APIUnauthorized(APIError):
http_status = 401
msg = 'Unauthorized'
class APIForbidden(APIError):
http_status = 403
class APIRateLimited(APIError):
http_status = 429
msg = 'Creation of this event was denied due to rate limiting'
name = 'rate_limit'
def __init__(self, retry_after=None):
self.retry_after = retry_after
class Auth(object):
def __init__(self, auth_vars, is_public=False):
self.client = auth_vars.get('sentry_client')
self.version = six.text_type(auth_vars.get('sentry_version'))
self.secret_key = auth_vars.get('sentry_secret')
self.public_key = auth_vars.get('sentry_key')
self.is_public = is_public
class ClientContext(object):
def __init__(self, agent=None, version=None, project_id=None, ip_address=None):
# user-agent (i.e. raven-python)
self.agent = agent
# protocol version
self.version = version
# project instance
self.project_id = project_id
self.project = None
self.ip_address = ip_address
def bind_project(self, project):
self.project = project
self.project_id = project.id
def bind_auth(self, auth):
self.agent = auth.client
self.version = auth.version
def get_tags_context(self):
return {'project': self.project_id, 'agent': self.agent, 'protocol': self.version}
class ClientLogHelper(object):
def __init__(self, context):
self.context = context
self.logger = logging.getLogger('sentry.api')
def debug(self, *a, **k):
self.logger.debug(*a, **self._metadata(**k))
def info(self, *a, **k):
self.logger.info(*a, **self._metadata(**k))
def warning(self, *a, **k):
self.logger.warning(*a, **self._metadata(**k))
def error(self, *a, **k):
self.logger.error(*a, **self._metadata(**k))
def _metadata(self, tags=None, extra=None, **kwargs):
if not extra:
extra = {}
if not tags:
tags = {}
context = self.context
project = context.project
if project:
project_label = '%s/%s' % (project.organization.slug, project.slug)
else:
project_label = 'id=%s' % (context.project_id, )
tags.update(context.get_tags_context())
tags['project'] = project_label
extra['tags'] = tags
extra['agent'] = context.agent
extra['protocol'] = context.version
extra['project'] = project_label
kwargs['extra'] = extra
return kwargs
class ClientApiHelper(object):
def __init__(self, agent=None, version=None, project_id=None, ip_address=None):
self.context = ClientContext(
agent=agent,
version=version,
project_id=project_id,
ip_address=ip_address,
)
self.log = ClientLogHelper(self.context)
def auth_from_request(self, request):
result = {k: request.GET[k] for k in six.iterkeys(
request.GET) if k[:7] == 'sentry_'}
if request.META.get('HTTP_X_SENTRY_AUTH', '')[:7].lower() == 'sentry ':
if result:
raise SuspiciousOperation(
'Multiple authentication payloads were detected.')
result = parse_auth_header(request.META['HTTP_X_SENTRY_AUTH'])
elif request.META.get('HTTP_AUTHORIZATION', '')[:7].lower() == 'sentry ':
if result:
raise SuspiciousOperation(
'Multiple authentication payloads were detected.')
result = parse_auth_header(request.META['HTTP_AUTHORIZATION'])
if not result:
raise APIUnauthorized('Unable to find authentication information')
origin = self.origin_from_request(request)
auth = Auth(result, is_public=bool(origin))
# default client to user agent
if not auth.client:
auth.client = request.META.get('HTTP_USER_AGENT')
return auth
def origin_from_request(self, request):
"""
Returns either the Origin or Referer value from the request headers.
"""
if request.META.get('HTTP_ORIGIN') == 'null':
return 'null'
return origin_from_request(request)
def project_key_from_auth(self, auth):
if not auth.public_key:
raise APIUnauthorized('Invalid api key')
# Make sure the key even looks valid first, since it's
# possible to get some garbage input here causing further
# issues trying to query it from cache or the database.
if not ProjectKey.looks_like_api_key(auth.public_key):
raise APIUnauthorized('Invalid api key')
try:
pk = ProjectKey.objects.get_from_cache(public_key=auth.public_key)
except ProjectKey.DoesNotExist:
raise APIUnauthorized('Invalid api key')
# a secret key may not be present which will be validated elsewhere
if not constant_time_compare(pk.secret_key, auth.secret_key or pk.secret_key):
raise APIUnauthorized('Invalid api key')
if not pk.is_active:
raise APIUnauthorized('API key is disabled')
if not pk.roles.store:
raise APIUnauthorized('Key does not allow event storage access')
return pk
def project_id_from_auth(self, auth):
return self.project_key_from_auth(auth).project_id
def decode_data(self, encoded_data):
try:
return encoded_data.decode('utf-8')
except UnicodeDecodeError as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
self.log.debug(six.text_type(e), exc_info=True)
raise APIError('Bad data decoding request (%s, %s)' %
(type(e).__name__, e))
def decompress_deflate(self, encoded_data):
try:
return zlib.decompress(encoded_data).decode('utf-8')
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
self.log.debug(six.text_type(e), exc_info=True)
raise APIError('Bad data decoding request (%s, %s)' %
(type(e).__name__, e))
def decompress_gzip(self, encoded_data):
try:
fp = BytesIO(encoded_data)
try:
f = GzipFile(fileobj=fp)
return f.read().decode('utf-8')
finally:
f.close()
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
self.log.debug(six.text_type(e), exc_info=True)
raise APIError('Bad data decoding request (%s, %s)' %
(type(e).__name__, e))
def decode_and_decompress_data(self, encoded_data):
try:
try:
return decompress(encoded_data).decode('utf-8')
except zlib.error:
return base64.b64decode(encoded_data).decode('utf-8')
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
self.log.debug(six.text_type(e), exc_info=True)
raise APIError('Bad data decoding request (%s, %s)' %
(type(e).__name__, e))
def safely_load_json_string(self, json_string):
try:
if isinstance(json_string, six.binary_type):
json_string = json_string.decode('utf-8')
obj = json.loads(json_string)
assert isinstance(obj, dict)
except Exception as e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
self.log.debug(six.text_type(e), exc_info=True)
raise APIError('Bad data reconstructing object (%s, %s)' %
(type(e).__name__, e))
return obj
def parse_client_as_sdk(self, value):
if not value:
return {}
try:
name, version = value.split('/', 1)
except ValueError:
try:
name, version = value.split(' ', 1)
except ValueError:
return {}
return {
'name': name,
'version': version,
}
def should_filter(self, project, data, ip_address=None):
"""
returns (result: bool, reason: string or None)
Result is True if an event should be filtered
The reason for filtering is passed along as a string
so that we can store it in metrics
"""
if ip_address and not is_valid_ip(project, ip_address):
return (True, FilterStatKeys.IP_ADDRESS)
release = data.get('release')
if release and not is_valid_release(project, release):
return (True, FilterStatKeys.RELEASE_VERSION)
message_interface = data.get('sentry.interfaces.Message', {})
error_message = message_interface.get('formatted', ''
) or message_interface.get('message', '')
if error_message and not is_valid_error_message(project, error_message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for exception_interface in data.get('sentry.interfaces.Exception', {}).get('values', []):
message = u': '.join(filter(None, map(exception_interface.get, ['type', 'value'])))
if message and not is_valid_error_message(project, message):
return (True, FilterStatKeys.ERROR_MESSAGE)
for filter_cls in filters.all():
filter_obj = filter_cls(project)
if filter_obj.is_enabled() and filter_obj.test(data):
return (True, six.text_type(filter_obj.id))
return (False, None)
def validate_data(self, data):
return data
def ensure_does_not_have_ip(self, data):
if 'sentry.interfaces.Http' in data:
if 'env' in data['sentry.interfaces.Http']:
data['sentry.interfaces.Http']['env'].pop('REMOTE_ADDR', None)
if 'sentry.interfaces.User' in data:
data['sentry.interfaces.User'].pop('ip_address', None)
if 'sdk' in data:
data['sdk'].pop('client_ip', None)
def insert_data_to_database(self, data, start_time=None, from_reprocessing=False):
if start_time is None:
start_time = time()
# we might be passed some sublcasses of dict that fail dumping
if isinstance(data, DOWNGRADE_DATA_TYPES):
data = dict(data.items())
cache_key = 'e:{1}:{0}'.format(data['project'], data['event_id'])
default_cache.set(cache_key, data, timeout=3600)
task = from_reprocessing and \
preprocess_event_from_reprocessing or preprocess_event
task.delay(cache_key=cache_key, start_time=start_time,
event_id=data['event_id'])
class MinidumpApiHelper(ClientApiHelper):
def origin_from_request(self, request):
# We don't use an origin here
return None
def auth_from_request(self, request):
key = request.GET.get('sentry_key')
if not key:
raise APIUnauthorized('Unable to find authentication information')
# Minidump requests are always "trusted". We at this point only
# use is_public to identify requests that have an origin set (via
# CORS)
auth = Auth({'sentry_key': key}, is_public=False)
auth.client = 'sentry-minidump'
return auth
class SecurityApiHelper(ClientApiHelper):
report_interfaces = ('sentry.interfaces.Csp', 'hpkp', 'expectct', 'expectstaple')
def origin_from_request(self, request):
# In the case of security reports, the origin is not available at the
# dispatch() stage, as we need to parse it out of the request body, so
# we do our own CORS check once we have parsed it.
return None
def auth_from_request(self, request):
key = request.GET.get('sentry_key')
if not key:
raise APIUnauthorized('Unable to find authentication information')
auth = Auth(
{
'sentry_key': key,
}, is_public=True
)
auth.client = request.META.get('HTTP_USER_AGENT')
return auth
def should_filter(self, project, data, ip_address=None):
for name in self.report_interfaces:
if name in data:
interface = get_interface(name)
if interface.to_python(data[name]).should_filter(project):
return (True, FilterStatKeys.INVALID_CSP)
return super(SecurityApiHelper, self).should_filter(project, data, ip_address)
def validate_data(self, data):
try:
interface = get_interface(data.pop('interface'))
report = data.pop('report')
except KeyError:
raise APIForbidden('No report or interface data')
# To support testing, we can either accept a buillt interface instance, or the raw data in
# which case we build the instance ourselves
try:
instance = report | |
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VERBOSE_RERUN_MSG = "Use -v or -vv option for more info."
SPINNER_COLOR = "green"
class VersionCmdTexts:
HELP = "Displays the version of the installed nctl application."
INITIAL_PLATFORM_VERSION = "Failed to get platform version."
KUBECTL_INT_ERROR_MSG = "Falied to check platform version. This may occur for example due to invalid path to " \
"kubectl config, invalid k8s credentials or k8s cluster being unavailable. Check your " \
"KUBECONFIG environment variable and make sure that the k8s cluster is online."
OTHER_ERROR_MSG = "Unexpected error occurred during platform version check."
TABLE_APP_ROW_NAME = "nctl application"
TABLE_PLATFORM_ROW_NAME = "nauta platform"
TABLE_HEADERS = ["Component", "Version"]
class MountCmdTexts:
HELP = "Displays a command that can be used to mount client's folders on his/her local machine."
MAIN_MSG = """Use the following command to mount those folders:
- replace <MOUNTPOINT> with a proper location on your local machine)
- replace <NAUTA_FOLDER> with one of the following:
- input - User's private input folder (read/write)
(can be accessed as /mnt/input/home from training script).
- output - User's private output folder (read/write)
(can be accessed as /mnt/output/home from training script).
- input-shared - Shared input folder (read/write)
(can be accessed as /mnt/input/root/public from training script).
- output-shared - Shared output folder (read/write)
(can be accessed as /mnt/output/root/public from training script).
- input-output-ro - Full input and output directories, read only.
Additionally, each experiment has a special folder that can be accessed
as /mnt/output/experiment from training script. This folder is shared by Samba
as output/<EXPERIMENT_NAME>.
--------------------------------------------------------------------"""
HELP_L = "Displays a list of nauta folders mounted on a local machine. If run using admin credentials, displays " \
"mounts of all users."
USER_IS_ADMIN_ERROR_MSG = "NAUTA doesn't create shares for administrators. Please execute this command as a " \
"regular user."
ADMIN_CHECK_ERROR_MSG = "Problems detected while verifying that current user is an administrator."
GET_MOUNT_COMMAND_ERROR_MSG = "Error detected while gathering data needed for mounting Samba share."
UNMOUNT_COMMAND_MSG = "Use following command to unmount previously mounted folder:"
UNMOUNT_OPTIONS_MSG = "In case of problems with unmounting (disconnected disk etc.) try out -f (force) or -l " \
"(lazy) options. For more info about these options refer to man umount."
UNMOUNT_OPTIONS_OSX_MSG = "In case of problems with unmounting (disconnected disk etc.) try out -f (force) " \
"option. For more info about these options refer to man umount."
MOUNTS_LIST_COMMAND_ERROR_MSG = "Error detected while gathering list of mounted shares."
class CmdsCommonTexts:
INVALID_REGEX_ERROR_MSG = "Regular expression provided for name filtering is invalid: {name}"
OTHER_ERROR_MSG = "Failed to get experiments list."
class VerifyCmdTexts:
HELP = "Command verifies whether all external components required by nctl are installed in proper versions. " \
"If something is missing, the application displays detailed information about it."
KUBECTL_NOT_INSTALLED_ERROR_MSG = "kubectl is not installed."
KUBECTL_INVALID_VERSION_ERROR_MSG = "the installed version of kubectl ({installed_version}) is not " \
"supported, supported version {supported_versions_sign} {expected_version}"
GET_K8S_NAMESPACE_ERROR_MSG = "Failed to get current Kubernetes namespace."
VERSION_CHECKING_MSG = "Checking version of {dependency_name}. Installed version: ({installed_version}). " \
"Supported version {supported_versions_sign} {expected_version}."
DEPENDENCY_VERIFICATION_SUCCESS_MSG = "{dependency_name} verified successfully."
INVALID_VERSION_WARNING_MSG = "Warning: the installed version of {dependency_name} ({installed_version}) is " \
"not supported, supported version {supported_versions_sign} " \
"{expected_version}"
DEPENDENCY_NOT_INSTALLED_ERROR_MSG = "{dependency_name} is not installed. Check installation manual for more " \
"information."
DEPENDENCY_VERSION_CHECK_ERROR_MSG = "Failed to get {dependency_name} version."
DEPENDENCY_VERIFICATION_OTHER_ERROR_MSG = "{dependency_name} - exception during verification."
OS_SUPPORTED_MSG = "This OS is supported."
CHECKING_CONNECTION_TO_CLUSTER_MSG = "Checking connection to the cluster..."
CHECKING_OS_MSG = "Checking operating system..."
VERIFYING_DEPENDENCY_MSG = "Verifying {dependency_name} ..."
CHECKING_PORT_FORWARDING_FROM_CLUSTER_MSG = "Checking port forwarding from cluster..."
class UserCmdTexts:
HELP = "Command for creating/deleting/listing users of the platform. Can only be run by a platform " \
"administrator."
class UserListCmdTexts:
HELP = "List users."
HELP_C = "If given - command displays c last rows."
TABLE_HEADERS = ["Name", "Creation date", "Date of last submitted job", "Number of running jobs",
"Number of queued jobs"]
OTHER_ERROR_MSG = "Failed to get users list."
class UserCreateCmdTexts:
SHORT_HELP = "Command used to create a new user on the platform. Can only be run by a platform administrator."
HELP = """
Command used to create a new user on the platform. Can only be run by a platform administrator.
USERNAME - is a name of user which should be created.
"""
HELP_L = "If given - content of the generated user's config file is displayed on the screen only."
HELP_F = "Name of file where user's configuration will be stored. If not given configuration is stored in the " \
"config. file."
ADD_USER_ERROR_MSG = "User {username} has not been created."
REMOVE_USER_ERROR_MSG = "Additional error appeared when the system tried to remove artifacts of a non-created " \
"{username} user. Please contact an administrator to completely remove those artifacts."
F_L_OPTIONS_EXCLUSION_ERROR_MSG = "Both -f/--filename and -l/--list_only options cannot be given. Please " \
"choose one of them."
NAME_VALIDATION_ERROR_MSG = "Error detected while validating user name: {username}."
USER_NOT_ADMIN_ERROR_MSG = "Only administrators can create new users."
USER_ALREADY_EXISTS_ERROR_MSG = "User {username} already exists."
USER_BEING_REMOVED_ERROR_MSG = "User {username} is still being removed."
USER_VERIFICATION_ERROR_MSG = "Problems detected while verifying user with user name: {username}."
PASSWORD_GATHER_ERROR_MSG = "The app encountered problems while gathering user's password."
CERT_GATHER_ERROR_MSG = "The app encountered problems while gathering server's certifcate authority."
USER_ADD_ERROR_MSG = "Error detected while adding of a user."
USER_CREATION_SUCCESS_MSG = "User {username} has been added successfully."
USER_NOT_READY_ERROR_MSG = "User {username} is still not ready."
CONFIG_CREATION_ERROR_MSG = "Problems during creation of the kubeconfig with user's configuration."
LIST_ONLY_HEADER = "Please use the following kubectl config to connect to this user.\n" \
"----------------------------------------------------------------"
CONFIG_SAVE_SUCCESS_MSG = "Configuration has been saved to the {filename} file."
CONFIG_SAVE_FAIL_MSG = "File with configuration wasn't saved."
CONFIG_SAVE_FAIL_INSTRUCTIONS_MSG = "Content of the generated config file is as follows. Please copy it " \
"to a file manually."
CREATING_USER_PROGRESS_MSG = "Creating user {username}..."
class UserDeleteCmdTexts:
SHORT_HELP = "Command used to delete a user from the platform. Can be only run by a platform administrator."
HELP = """
Command used to delete a user from the platform. Can be only run by a platform administrator.
USERNAME - is a name of user which should be deleted.
"""
HELP_PR = "If this option is added, the command removes all of client's artifacts."
USER_NOT_ADMIN_ERROR_MSG = "Only administrators can delete users."
USER_NOT_EXISTS_ERROR_MSG = "User {username} doesn't exists."
USER_BEING_REMOVED_ERROR_MSG = "User is still being removed."
USER_PRESENCE_VERIFICATION_ERROR_MSG = "Problems during verifying users presence."
DELETE_CONFIRM_MSG = "User {username} is about to be deleted. Do you want to continue?"
DELETE_ABORT_MSG = "Operation of deleting of a user was aborted."
PURGE_ERROR_MSG = "Some artifacts belonging to a user weren't removed."
DELETE_IN_PROGRESS_MSG = "User is still being deleted. Please check status of this user in a while."
DELETE_SUCCESS_MSG = "User {username} has been deleted."
PROXY_ERROR_LOG_MSG = "Error during closing of a proxy for elasticsearch."
PROXY_ERROR_USER_MSG = "Elasticsearch proxy hasn't been closed properly. Check whether it still exists, if " \
"yes - close it manually."
OTHER_ERROR_LOG_MSG = "Error during deleting a user of a user."
OTHER_ERROR_USER_MSG = "User hasn't been deleted due to technical reasons."
DELETION_CHECK_PRESENCE = "Checking presence of a user that is going to be deleted..."
DELETION_START_DELETING = "Deleting of a user is starting now..."
DELETION_START_PURGING = "Purging of a user is starting now..."
DELETION_VERIFICATION_OF_DELETING = "Verifying, whether a user has been deleted properly..."
DELETION_DELETING_NAMESPACE = "- deleting user's namespace"
DELETION_DELETING_USERS_OBJECTS = "- deleting user's objects"
DELETION_DELETING_USERS_EXPERIMENTS = "- deleting user experiments' logs"
class LaunchCmdTexts:
HELP = "Command for launching web user-interface or tensorboard. It | |
following channels have been marked as bad:", self.raw.info["bads"])
if save_to_raw:
# raw needs to be reloaded for this with the mastoid channels still present
mne_data_root = os.path.join(self.data_root, "eeg", "mne")
tmp = load_raw(
self.subject,
mne_data_root=mne_data_root,
interpolate_bad_channels=False,
reference_mastoids=False,
)
tmp.info["bads"] = bads
mne_data_filepath = os.path.join(
mne_data_root, "{}-raw.fif".format(self.subject)
)
log.info(
"Updating bad channel information in: {}".format(mne_data_filepath)
)
tmp.save(mne_data_filepath, overwrite=True, verbose=False)
def interpolate_bad_channels(self):
if len(self.raw.info["bads"]) > 0:
log.warn(
"The following channels are interpolated: {}. "
"This overwrites the channel data. "
"To undo this, the raw data needs to be reloaded.".format(
self.raw.info["bads"]
)
)
self.raw.interpolate_bads()
else:
print("No bad channels that need to be interpolated.")
def plot_bad_channel_topo(self):
bads = [self.raw.ch_names.index(ch) for ch in self.raw.info["bads"]]
# print bads
# topo = np.zeros((64), dtype=float)
topo = self.raw[0:64, 0][0].squeeze()
# print topo.shape
mask = np.zeros(64, dtype=bool)
mask[bads] = True
mask_params = dict(marker="", markeredgecolor="r", linewidth=0, markersize=4)
layout = Biosemi64Layout()
pos = layout.projected_xy_coords()
# print pos.shape
plt.figure(figsize=(5, 5))
mne.viz.plot_topomap(
topo,
pos,
res=2,
sensors="k.",
names=layout.channel_names(),
show_names=True,
cmap="RdBu_r",
vmin=-1,
vmax=1,
# vmin=vmin, vmax=vmax,
axis=plt.gca(),
contours=False,
mask=mask,
mask_params=mask_params,
)
## check the trial events
def check_trial_events(self, verbose=False, plot_fig=False):
# assert self.filtered is False
assert self.downsampled is False
raw = self.raw
trial_events = mne.find_events(raw, stim_channel="STI 014", shortest_event=0)
if verbose:
print(trial_events)
if plot_fig:
plt.figure(figsize=(17, 10))
axes = plt.gca()
mne.viz.plot_events(
trial_events, raw.info["sfreq"], raw.first_samp, axes=axes
)
print("1st event at ", raw.times[trial_events[0, 0]])
print("last event at ", raw.times[trial_events[-1, 0]])
trial_event_times = raw.times[trial_events[:, 0]]
self.trial_events = trial_events
self.trial_event_times = trial_event_times
def check_trial_audio_onset_merge(
self, use_audio_onsets=True, verbose=None, plot_fig=False
):
# assert self.filtered is False
assert self.downsampled is False
raw = self.raw
## check whether trial and audio events are merged correctly
merged_events = merge_trial_and_audio_onsets(
raw, use_audio_onsets=use_audio_onsets, inplace=False
)
if verbose:
for event in merged_events:
print(event)
if plot_fig:
plt.figure(figsize=(17, 10))
axes = plt.gca()
mne.viz.plot_events(
merged_events, raw.info["sfreq"], raw.first_samp, axes=axes
)
def merge_trial_and_audio_onsets(self, use_audio_onsets=True):
raw = self.raw
# save original events
self.orig_trial_events = self.trial_events
# merge
merge_trial_and_audio_onsets(
raw, use_audio_onsets=use_audio_onsets, inplace=True
)
# recompute trial_events and times
trial_events = mne.find_events(raw, stim_channel="STI 014", shortest_event=0)
trial_event_times = raw.times[trial_events[:, 0]]
self.trial_events = trial_events
self.trial_event_times = trial_event_times
def check_trial_event_consistency(self):
meta = load_stimuli_metadata(self.data_root, self.stimuli_version)
sfreq = self.raw.info["sfreq"]
n_errors = 0
for i, event in enumerate(self.trial_events[:-1]):
event_id = event[2]
start = event[0]
if event_id < 1000:
stim_id, cond = decode_event_id(event_id)
if cond in [1, 2]:
field = "length_with_cue"
else:
field = "length_without_cue"
sample_len = sfreq * meta[stim_id][field]
else:
sample_len = 1
next_start = self.trial_events[i + 1, 0]
if next_start < start + sample_len:
expected_len = sample_len / float(sfreq)
event_len = (next_start - start) / float(sfreq)
log.warn(
"warning: event {} starts before expected end of {}".format(
self.trial_events[i + 1], event
)
)
log.warn(
"expected length: {:.3f}s, real length: {:.3f}s, delta: {:.3f}s".format(
expected_len, event_len, expected_len - event_len
)
)
n_errors += 1
log.info("{} problems detected.".format(n_errors))
def check_psd(self, fmax=None, average=True):
raw = self.raw
## check PSD
# plot PSD of raw data
if fmax is None:
fmax = raw.info["sfreq"]
plt.figure(figsize=(17, 5))
axes = plt.gca()
raw.plot_psd(
average=average,
area_mode="range",
tmax=10.0,
ax=axes,
picks=self.eeg_picks,
fmax=fmax,
)
def check_channel(self, ch_num):
raw = self.raw
## have a look at 1st channel
channel = raw[ch_num, :][0].squeeze()
print(channel.shape)
plt.figure(figsize=(17, 4))
plt.plot(channel)
###################### bandpass filtering - this will change raw ######################
def bandpass_filter(self):
raw = self.raw
## apply bandpass filter, use 4 processes to speed things up
raw.filter(
0.5,
30,
picks=self.eeg_picks,
filter_length="10s",
l_trans_bandwidth=0.5,
h_trans_bandwidth=0.5,
method="fft",
n_jobs=4,
verbose=True,
)
self.filtered = True
## generate events epochs after bandpass !
def generate_beat_events(self, verbose=None):
assert self.filtered is True
assert self.downsampled is False
raw = self.raw
## generate beat events and epochs before downsampling
# read trial events
if hasattr(self, "trial_events"):
trial_events = self.trial_events
else:
trial_events = mne.find_events(
raw, stim_channel="STI 014", shortest_event=0
)
# print(trial_events)
# generate simple beat events with same ID (10000)
beat_events = generate_beat_events(
trial_events,
version=self.stimuli_version,
beat_event_id_generator=simple_beat_event_id_generator,
verbose=verbose,
)
# FIXME: read from settings
picks = mne.pick_types(
raw.info, meg=False, eeg=True, eog=True, stim=True, exclude=[]
)
event_id = None # any
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.8 # end of each epoch (600ms after the trigger) - longest beat is 0.57s long
detrend = 0 # remove dc
# reject = dict(eog=250e-6) # TODO: optionally reject epochs
beat_epochs = mne.Epochs(
raw,
beat_events,
event_id,
tmin,
tmax,
preload=True,
proj=False,
picks=picks,
verbose=False,
)
print(beat_epochs)
self.beat_epochs = beat_epochs
# ## compute EOG epochs before downsampling
def find_eog_events(self, verbose=None, plot_fig=False):
assert self.filtered is True
assert self.downsampled is False
raw = self.raw
# check for EOG artifacts:
# NOTE: this should NOT be done after resampling!
eog_event_id = 5000
eog_events = mne.preprocessing.find_eog_events(raw, eog_event_id)
if plot_fig:
plt.figure(figsize=(17, 0.5))
axes = plt.gca()
mne.viz.plot_events(
eog_events, raw.info["sfreq"], raw.first_samp, axes=axes
)
# create epochs around EOG events
picks = mne.pick_types(
raw.info, meg=False, eeg=True, eog=True, stim=True, exclude=[]
) # FIXME
tmin = -0.5
tmax = 0.5
eog_epochs = mne.Epochs(
raw,
events=eog_events,
event_id=eog_event_id,
tmin=tmin,
tmax=tmax,
proj=False,
picks=picks,
preload=True,
verbose=False,
)
self.eog_events = eog_events
self.eog_epochs = eog_epochs
###################### down-sampling - this will change raw ######################
def downsample(self):
raw = self.raw
sfreq = self.downsample_sfreq
print(
"""
from doc:
WARNING: The intended purpose of this function is primarily to speed
up computations (e.g., projection calculation) when precise timing
of events is not required, as downsampling raw data effectively
jitters trigger timings. It is generally recommended not to epoch
downsampled data, but instead epoch and then downsample, as epoching
downsampled data jitters triggers.
NOTE: event onset collisions will be reported as warnings
in that case, it might be a good idea to pick either the trial onset or audio onset events
and delete the other ones before downsampling
"""
)
print("down-sampling raw and events stim channel ...")
fast_resample_mne(
raw, sfreq, res_type="sinc_best", preserve_events=True, verbose=True
)
# fast_resample_mne(raw, sfreq, res_type='sinc_fastest', preserve_events=True, verbose=False)
# stim_picks = pick_types(
# self.info, meg=False, ref_meg=False, stim=True, exclude=[]
# )
# stim_picks = np.asanyarray(stim_picks)
# self.raw.resample(
# sfreq,
# stim_picks=stim_picks
# )
# resample epochs
print("down-sampling epochs ...")
self.eog_epochs.resample(sfreq)
self._downsample_epochs()
print("TODO: down-sampling events (not in stim channel) ...")
# TODO: resample events
self.downsampled = True
def _downsample_epochs(self):
sfreq = self.downsample_sfreq
self.beat_epochs.resample(sfreq)
def check_resampled_trial_events(self, plot_fig=False, verbose=None):
assert self.downsampled is True
raw = self.raw
trial_event_times = self.trial_event_times
resampled_trial_events = mne.find_events(
raw, stim_channel="STI 014", shortest_event=0
)
# print resampled_trial_events
if plot_fig:
plt.figure(figsize=(17, 10))
axes = plt.gca()
mne.viz.plot_events(
resampled_trial_events, raw.info["sfreq"], raw.first_samp, axes=axes
) # , color=color, event_id=event_id)
resampled_trial_event_times = raw.times[resampled_trial_events[:, 0]]
# print resampled_trial_event_times
diff = resampled_trial_event_times - trial_event_times
print(
"event onset jitter (min, mean, max):", diff.min(), diff.mean(), diff.max()
)
diff = np.asarray(diff * 1000, dtype=int)
if verbose:
for i, event in enumerate(resampled_trial_events):
print(event, diff[i])
############################ ICA aux functions ############################
# override to change ICA behavior
def _get_ica_data(self):
# return self.raw # fit to raw data
return self.beat_epochs # fit to epochs
def compute_ica(self, method="extended-infomax", random_seed=42, verbose=None):
data = self._get_ica_data()
random_state = np.random.RandomState(random_seed)
###############################################################################
# 1) Fit ICA model using the FastICA algorithm
# Other available choices are `infomax` or `extended-infomax`
# We pass a float value between 0 and 1 to select n_components based on the
# percentage of variance explained by the PCA components.
# ica = ICA(n_components=0.95, method='fastica', random_state=random_state) # capture 95% of variance
ica = ICA(
n_components=1.0, method=method, random_state=random_state, verbose=verbose
) # capture full variance
# ica = ICA(n_components=20, method='fastica', random_state=random_state)
# tstep = Length of data chunks for artifact rejection in seconds.
# ica.fit(raw, picks=eeg_picks, tstep=1.0, verbose=True)
ica.fit(data)
self.ica = ica
## aux functions to be moved to lib
def plot_ica_components(self, picks=None, topomap_size=3.5):
ica = self.ica
if picks is None:
n_components = ica.mixing_matrix_.shape[1]
picks = list(range(n_components))
if len(picks) == 0:
print("nothing selected for plotting")
return
ica.plot_components(
picks=picks, ch_type="eeg", title="", colorbar=True, show=False
)
axes = plt.gcf()
axes.set_size_inches(
min(len(picks), 5) * topomap_size, max(len(picks) / 5.0, 1) * topomap_size
)
plt.show()
def inspect_source_psd(self, ic):
data = self._get_ica_data()
source = self.ica._transform_epochs(data, concatenate=True)[ic]
sfreq = data.info["sfreq"]
plt.figure()
plt.psd(source, Fs=sfreq, NFFT=128, noverlap=0, pad_to=None)
plt.show()
## aux function to score EEG channels by EOG correlation
# FIXME: can this take raw OR epoch input?
def find_eog_artifact_sources(self, plot=True, verbose=None):
| |
tn == all1(N):
print("all ones found:wn:{}, tn:{}, nbt:{}, tni:{}".format(wn,tn,nbt,tni))
#if the number of tiles is smaller than max_fcn,
#and there is a tile in the ALL-1, the bit that should be
#set to one is not the last one, but the ones next to the
#previous tile number
#example, tiles 1 tile in ALL-1
print("nb_tiles {}".format(nbt))
if len(sorted_tile_list) == 1:
#just the all-1 message was received all other fragments were lost
#bitmap sould be [0,0,0,0,0,1]
for _ in range(max_fcn):
#add zeros to all other tiles
bl.append(0)
#add the last bit as 1 -> fcn = 7
#bl.append(1)
if nbt >= 1:
print("a tile in the last fragment")
if max_fcn - 1 != tni:
print("tile is the last of the packet, should be number {}".format(tni))
#if more than one tile is in the ALL-1 message
for _ in range(nbt):
print("append 1")
bl.append(1)
break
else:
while tni-1 > 0:
bl.append(0)
tni -= 1
if nbt == 1:
print("append 1")
bl.append(1)
break
# regular
if wni < wn:
print("MBL00 wn:tn:nb=", wni, tni, bl)
while wni < wn:
bl = bit_list.setdefault(wni, [])
while tni > 0:
print("padding")
bl.append(0)
tni -= 1
bl.append(0)
wni += 1
tni = max_fcn
print("MBL01 wn:tn:nb=", wni, tni, bl)
print("MBL1 nb=", nbt)
assert wni == wn
bl = bit_list.setdefault(wni, [])
while tni > tn:
bl.append(0)
tni -= 1
print("MBL2 wn:tn:nb=", wni, tni, bl)
for _ in range(nbt):
bl.append(1)
if tni == 0:
print("MBL3 wn:tn:nb=", wni, tni, bl)
wni += 1
bl = bit_list.setdefault(wni, [])
tni = max_fcn
else:
print("MBL4 wn:tn:nb=", wni, tni, bl)
tni -= 1
if len(bl) != max_fcn:
#bitmap should be larger
print("max_fcn:{} bl:{} tni:{} ".format(max_fcn, bl,tni))
while tni != 0:
bl.append(0)
tni -= 1
print("tni:{}".format(tni))
print("max_fcn:{} bl:{} tni:{} ".format(max_fcn, bl,tni))
print("bl:{}".format(bl))
return bit_list
def find_missing_tiles_mic_ko_yes_all_1(tile_list, N, window_size):
""" find missing tiles in the tile_list.
return the set of bitmaps for each window in which any tiles are missing.
the bitmap is tranformed into BitBuffer like below.
[
(0, BitBuffer([1, 1, 1, 1, 1, 1, 0])),
(2, BitBuffer([1, l, l, 0, 0, 0, 1])),
]
In this example, the bitmap will be "1110001".
modified to because the last 1 when only there is one window was not set
to one when the all-1 arrives
[
(0, BitBuffer([1, 1, 1, 1, 1, 1, 0])),
(2, BitBuffer([1, l, l, 0, 0, 0, 1])),
]
In this example, the bitmap will be "1110001".
There are problems to create an ack when the all has not arrived and
an ack request is received.
The problem with this bitmap is that is not recognized by the method
in the sender that checks what tiles are missing. It will compare the following:
received bitmap : (0, BitBuffer([1, l, l, 0, 0, 0, 1]))
sender bitmap: (0,BitBuffer([1, 1, 1, 1]))
Also when the ALL-1 is received, the mic is ko and the resulting bit_list
is [], there is a problem since some tiles are missing the result bitmap is
received bitmap : (0, BitBuffer([1, l, l, 0, 0, 0, 0]))
sender bitmap: (0,BitBuffer([1, 1, 1, 1]))
"""
#case when the bit_list is return empty from find_missing_tiles (meaning there are no missing tiles)
#but since the MIC is KO there are some missing tiles
bit_list = make_bit_list_mic_ko(tile_list, N, window_size)
print('find_missing_tiles_mic_ko_yes_all_1 - bit_list -> {}, lenght: {}'.format(bit_list, len(bit_list)))
ret = []
for i in sorted(bit_list.items()):
print(" i, all(i[1]) {},{}".format(i,all(i[1])))
if not all(i[1]):
ret.append((i[0], BitBuffer(i[1])))
#else:
# ret.append((i[0], BitBuffer(i[1])))
print("find_missing_tiles_mic_ko_yes_all_1 ret -> {}".format(ret))
#input('')
return ret
def make_bit_list_mic_ko(tile_list, N, window_size):
""" make a bit list for each window from the tile_list, and return it.
the bit list is going to be used to make the bitmap.
The tile_list passed in the argument should be formed like below.
Note that if nb_tiles doesn't exist, it is assumed as one.
[
{ "w-num": 0, "t-num": 6, "nb_tiles": 3 },
{ "w-num": 0, "t-num": 3, "nb_tiles": 3 },
{ "w-num": 2, "t-num": 5, "nb_tiles": 2 },
{ "w-num": 2, "t-num": 7, "nb_tiles": 1 },
]
In this example, the the bit list will be
{
0: [1, 1, 1, 1, 1, 1, 0],
1: [0, 0, 0, 0, 0, 0, 0],
2: [1, l, l, 0, 0, 0, 1],
}
"""
print("make_bit_list_mic_ko")
max_fcn = window_size - 1
bit_list = {}
wni = 0
sorted_tile_list = sort_tile_list(tile_list, N)
# main
tni = max_fcn
for t in sorted_tile_list:
bl = bit_list.setdefault(wni, [])
wn = t["w-num"]
tn = t["t-num"]
nbt = t.get("nb_tiles", 1)
# all-1
# all-1
if tn == all1(N):
print("all ones found:wn:{}, tn:{}, nbt:{}, tni:{}".format(wn,tn,nbt,tni))
#if the number of tiles is smaller than max_fcn,
#and there is a tile in the ALL-1, the bit that should be
#set to one is not the last one, but the ones next to the
#previous tile number
#example, tiles 1 tile in ALL-1
#input('Only received the all-1, added the 1 for the tiles and the last pos')
print("nb_tiles {} len(sorted_tile_list):{}".format(nbt,len(sorted_tile_list)))
if len(sorted_tile_list) == 1:
#just the all-1 message was received all other fragments were lost
#bitmap sould be [0,0,0,0,0,1] if only one tile in the all-1 message
for _ in range(max_fcn-nbt+1):
#add zeros to all other tiles, until the tiles in the all-1
bl.append(0)
for _ in range(nbt):
#add ones for the tiles in the ALL-1
print("append 1")
bl.append(1)
#add the last bit as 1 -> fcn = 7
#bl.append(1)
print(bl)
#input('Only received the all-1, added the 1 for the tiles and the last pos')
break
elif nbt >= 1:
#more fragments arrived and not only the all-1
print("a tile in the last fragment but we dont know the tile number")
if max_fcn - 1 != tni:
#checks the position, if all other tiles have arrived
print("tile is the last of the packet, should be number {}".format(tni))
for _ in range(tni):
bl.append(0)
#if more than one tile is in the ALL-1 message
for _ in range(nbt):
print("append 1")
bl.append(1)
print(bl)
#input('added the 1 for the tiles and the last pos')
break
else:
while tni-1 > 0:
bl.append(0)
tni -= 1
if nbt == 1:
#will never be True
print("append 1")
bl.append(1)
break
# if tn == all1(N):
# while tni > 0:
# bl.append(0)
# tni -= 1
# if nbt == 1:
# bl.append(1)
# break
# regular
if wni < wn:
print("MBL00 wn:tn:nb=", wni, tni, bl)
while wni < wn:
bl = bit_list.setdefault(wni, [])
while tni > 0:
bl.append(0)
tni -= 1
bl.append(0)
wni += 1
tni = max_fcn
print("MBL01 wn:tn:nb=", wni, tni, bl)
print("MBL1 nb=", nbt)
#assert wni == wn
bl = bit_list.setdefault(wni, [])
while tni > tn:
bl.append(0)
tni -= 1
print("MBL2 wn:tn:nb=", wni, tni, bl)
for _ in range(nbt):
bl.append(1)
if tni == 0:
print("MBL3 wn:tn:nb=", wni, tni, bl)
wni += 1
bl = bit_list.setdefault(wni, [])
tni = max_fcn
else:
print("MBL4 wn:tn:nb=", wni, tni, bl)
tni -= 1
return bit_list
"""
max_fcn = window_size - 1
bit_list = {}
wni = 0
sorted_tile_list = sort_tile_list(tile_list, N)
# main
tni = max_fcn
for t in sorted_tile_list:
print("t:{}".format(t))
bl = bit_list.setdefault(wni, [])
wn = t["w-num"]
tn = t["t-num"]
nbt = t.get("nb_tiles", 1)
# all-1
if tn == all1(N):
print("all ones found:wn:{}, tn:{}, nbt:{}, tni:{}".format(wn,tn,nbt,tni))
#if the number of tiles is smaller than max_fcn,
#and there is a tile in the ALL-1, the bit that should be
#set to one is not the last one, but the ones next to the
#previous tile number
#example, tiles 1 tile in ALL-1
print("nb_tiles {}".format(nbt))
if len(sorted_tile_list) == 1:
#just the all-1 message was received all other fragments were lost
#bitmap sould be [0,0,0,0,0,1]
for _ in range(max_fcn-1):
#add zeros to all other tiles
bl.append(0)
#add the last bit as 1 -> fcn = 7
#bl.append(1)
if nbt >= 1:
print("a | |
<filename>meerk40t/core/node/elem_image.py
import threading
from copy import copy
from PIL.Image import DecompressionBombError
from meerk40t.core.node.node import Node
from meerk40t.core.units import UNITS_PER_INCH
from meerk40t.image.imagetools import RasterScripts
from meerk40t.svgelements import Matrix
class ImageNode(Node):
"""
ImageNode is the bootstrapped node type for the 'elem image' type.
ImageNode contains a main matrix, main image. A processed image and a processed matrix.
The processed matrix must be concated with the main matrix to be accurate.
"""
def __init__(
self,
image=None,
matrix=None,
overscan=None,
direction=None,
dpi=500,
operations=None,
**kwargs,
):
super(ImageNode, self).__init__(type="elem image", **kwargs)
self.image = image
self.matrix = matrix
self.processed_image = None
self.processed_matrix = None
self.process_image_failed = False
self.text = None
self._needs_update = False
self._update_thread = None
self._update_lock = threading.Lock()
self.settings = kwargs
self.overscan = overscan
self.direction = direction
self.dpi = dpi
self.step_x = None
self.step_y = None
self.lock = False
self.invert = False
self.red = 1.0
self.green = 1.0
self.blue = 1.0
self.lightness = 1.0
self.view_invert = False
self.dither = True
self.dither_type = "Floyd-Steinberg"
if operations is None:
operations = list()
self.operations = operations
def __copy__(self):
return ImageNode(
image=self.image,
matrix=copy(self.matrix),
overscan=self.overscan,
direction=self.direction,
dpi=self.dpi,
operations=self.operations,
**self.settings,
)
def __repr__(self):
return "%s('%s', %s, %s)" % (
self.__class__.__name__,
self.type,
str(self.image),
str(self._parent),
)
@property
def active_image(self):
if self.processed_image is not None:
return self.processed_image
else:
return self.image
@property
def active_matrix(self):
if self.processed_matrix is None:
return self.matrix
return self.processed_matrix * self.matrix
def preprocess(self, context, matrix, commands):
"""
Preprocess step during the cut planning stages.
We require a context to calculate the correct step values relative to the device
"""
self.step_x, self.step_y = context.device.dpi_to_steps(self.dpi)
self.matrix *= matrix
self._bounds_dirty = True
self.process_image()
@property
def bounds(self):
if self._bounds_dirty:
image_width, image_height = self.active_image.size
matrix = self.active_matrix
x0, y0 = matrix.point_in_matrix_space((0, 0))
x1, y1 = matrix.point_in_matrix_space((image_width, image_height))
x2, y2 = matrix.point_in_matrix_space((0, image_height))
x3, y3 = matrix.point_in_matrix_space((image_width, 0))
self._bounds_dirty = False
self._bounds = (
min(x0, x1, x2, x3),
min(y0, y1, y2, y3),
max(x0, x1, x2, x3),
max(y0, y1, y2, y3),
)
return self._bounds
def default_map(self, default_map=None):
default_map = super(ImageNode, self).default_map(default_map=default_map)
default_map.update(self.settings)
image = self.active_image
default_map["width"] = image.width
default_map["height"] = image.height
default_map["element_type"] = "Image"
default_map["matrix"] = self.matrix
default_map["dpi"] = self.dpi
default_map["overscan"] = self.overscan
default_map["direction"] = self.direction
return default_map
def drop(self, drag_node):
# Dragging element into element.
if drag_node.type.startswith("elem"):
self.insert_sibling(drag_node)
return True
return False
def revalidate_points(self):
bounds = self.bounds
if bounds is None:
return
if len(self._points) < 9:
self._points.extend([None] * (9 - len(self._points)))
self._points[0] = [bounds[0], bounds[1], "bounds top_left"]
self._points[1] = [bounds[2], bounds[1], "bounds top_right"]
self._points[2] = [bounds[0], bounds[3], "bounds bottom_left"]
self._points[3] = [bounds[2], bounds[3], "bounds bottom_right"]
cx = (bounds[0] + bounds[2]) / 2
cy = (bounds[1] + bounds[3]) / 2
self._points[4] = [cx, cy, "bounds center_center"]
self._points[5] = [cx, bounds[1], "bounds top_center"]
self._points[6] = [cx, bounds[3], "bounds bottom_center"]
self._points[7] = [bounds[0], cy, "bounds center_left"]
self._points[8] = [bounds[2], cy, "bounds center_right"]
def update_point(self, index, point):
return False
def add_point(self, point, index=None):
return False
def update(self, context):
self._needs_update = True
self.text = "Processing..."
context.signal("refresh_scene", "Scene")
if self._update_thread is None:
def clear(result):
if self.process_image_failed:
self.text = "Process image could not exist in memory."
else:
self.text = None
self._needs_update = False
self._update_thread = None
context.signal("refresh_scene", "Scene")
context.signal("image updated", self)
self.processed_image = None
self.processed_matrix = None
self._update_thread = context.threaded(
self.process_image_thread, result=clear, daemon=True
)
def process_image_thread(self):
while self._needs_update:
self._needs_update = False
self.process_image()
# Unset cache.
self.wx_bitmap_image = None
self.cache = None
def process_image(self):
if self.step_x is None:
step = UNITS_PER_INCH / self.dpi
self.step_x = step
self.step_y = step
from PIL import Image, ImageEnhance, ImageFilter, ImageOps
from meerk40t.image.actualize import actualize
from meerk40t.image.imagetools import dither
image = self.image
main_matrix = self.matrix
r = self.red * 0.299
g = self.green * 0.587
b = self.blue * 0.114
v = self.lightness
c = r + g + b
try:
c /= v
r = r / c
g = g / c
b = b / c
except ZeroDivisionError:
pass
if image.mode != "L":
image = image.convert("RGB")
image = image.convert("L", matrix=[r, g, b, 1.0])
if self.invert:
image = image.point(lambda e: 255 - e)
# Calculate device real step.
step_x, step_y = self.step_x, self.step_y
if (
main_matrix.a != step_x
or main_matrix.b != 0.0
or main_matrix.c != 0.0
or main_matrix.d != step_y
):
try:
image, actualized_matrix = actualize(
image,
main_matrix,
step_x=step_x,
step_y=step_y,
inverted=self.invert,
)
except (MemoryError, DecompressionBombError):
self.process_image_failed = True
return
else:
actualized_matrix = Matrix(main_matrix)
if self.invert:
empty_mask = image.convert("L").point(lambda e: 0 if e == 0 else 255)
else:
empty_mask = image.convert("L").point(lambda e: 0 if e == 255 else 255)
# Process operations.
for op in self.operations:
name = op["name"]
if name == "crop":
try:
if op["enable"] and op["bounds"] is not None:
crop = op["bounds"]
left = int(crop[0])
upper = int(crop[1])
right = int(crop[2])
lower = int(crop[3])
image = image.crop((left, upper, right, lower))
except KeyError:
pass
elif name == "edge_enhance":
try:
if op["enable"]:
if image.mode == "P":
image = image.convert("L")
image = image.filter(filter=ImageFilter.EDGE_ENHANCE)
except KeyError:
pass
elif name == "auto_contrast":
try:
if op["enable"]:
if image.mode not in ("RGB", "L"):
# Auto-contrast raises NotImplementedError if P
# Auto-contrast raises OSError if not RGB, L.
image = image.convert("L")
image = ImageOps.autocontrast(image, cutoff=op["cutoff"])
except KeyError:
pass
elif name == "tone":
try:
if op["enable"] and op["values"] is not None:
if image.mode == "L":
image = image.convert("P")
tone_values = op["values"]
if op["type"] == "spline":
spline = ImageNode.spline(tone_values)
else:
tone_values = [q for q in tone_values if q is not None]
spline = ImageNode.line(tone_values)
if len(spline) < 256:
spline.extend([255] * (256 - len(spline)))
if len(spline) > 256:
spline = spline[:256]
image = image.point(spline)
if image.mode != "L":
image = image.convert("L")
except KeyError:
pass
elif name == "contrast":
try:
if op["enable"]:
if op["contrast"] is not None and op["brightness"] is not None:
contrast = ImageEnhance.Contrast(image)
c = (op["contrast"] + 128.0) / 128.0
image = contrast.enhance(c)
brightness = ImageEnhance.Brightness(image)
b = (op["brightness"] + 128.0) / 128.0
image = brightness.enhance(b)
except KeyError:
pass
elif name == "gamma":
try:
if op["enable"] and op["factor"] is not None:
if image.mode == "L":
gamma_factor = float(op["factor"])
def crimp(px):
px = int(round(px))
if px < 0:
return 0
if px > 255:
return 255
return px
if gamma_factor == 0:
gamma_lut = [0] * 256
else:
gamma_lut = [
crimp(pow(i / 255, (1.0 / gamma_factor)) * 255)
for i in range(256)
]
image = image.point(gamma_lut)
if image.mode != "L":
image = image.convert("L")
except KeyError:
pass
elif name == "unsharp_mask":
try:
if (
op["enable"]
and op["percent"] is not None
and op["radius"] is not None
and op["threshold"] is not None
):
unsharp = ImageFilter.UnsharpMask(
radius=op["radius"],
percent=op["percent"],
threshold=op["threshold"],
)
image = image.filter(unsharp)
except (KeyError, ValueError): # Value error if wrong type of image.
pass
elif name == "halftone":
try:
if op["enable"]:
image = RasterScripts.halftone(
image,
sample=op["sample"],
angle=op["angle"],
oversample=op["oversample"],
black=op["black"],
)
except KeyError:
pass
if empty_mask is not None:
background = Image.new(image.mode, image.size, "white")
background.paste(image, mask=empty_mask)
image = background # Mask exists use it to remove any pixels that were pure reject.
if self.dither and self.dither_type is not None:
if self.dither_type != "Floyd-Steinberg":
image = dither(image, self.dither_type)
image = image.convert("1")
inverted_main_matrix = Matrix(main_matrix).inverse()
self.processed_matrix = actualized_matrix * inverted_main_matrix
self.processed_image = image
# self.matrix = actualized_matrix
self.altered()
self.process_image_failed = False
@staticmethod
def line(p):
N = len(p) - 1
try:
m = [(p[i + 1][1] - p[i][1]) / (p[i + 1][0] - p[i][0]) for i in range(0, N)]
except ZeroDivisionError:
m = [1] * N
# b = y - mx
b = [p[i][1] - (m[i] * p[i][0]) for i in range(0, N)]
r = list()
for i in range(0, p[0][0]):
r.append(0)
for i in range(len(p) - 1):
x0 = p[i][0]
x1 = p[i + 1][0]
range_list = [int(round((m[i] * x) + b[i])) for x in range(x0, x1)]
r.extend(range_list)
for i in range(p[-1][0], 256):
r.append(255)
r.append(round(int(p[-1][1])))
return r
@staticmethod
def spline(p):
"""
Spline interpreter.
Returns all integer locations between different spline interpolation values
@param p: points to be quad spline interpolated.
@return: integer y values for given spline points.
"""
try:
N = len(p) - 1
w = [(p[i + 1][0] - | |
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import argparse, json, os, itertools, random, shutil, copy
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--synonyms_action_json', default='synonyms_action.json',
help="JSON file defining synonyms for action parameter values")
parser.add_argument('--template_dir', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
parser.add_argument('--action_template_dir', default='CLEVR_action_templates',
help="Directory containing JSON tempaltes for action questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
parser.add_argument('--action', default=0, type=int,
help="Generate action related questions based on action templates.")
# args = parser.parse_args()
SIZE_CHANGED, SIZE_UNCHANGED, COLOR_CHANGED, COLOR_UNCHANGED, MAT_CHANGED, MAT_UNCHANGED \
= "size_changed", "size_unchanged", "color_changed", "color_unchanged", "mat_changed", "mat_unchanged"
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1: continue
if not include_zero and len(intersection) == 0: continue
if trivial:
trivial_options[(relationship, filters)] = sorted(list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
if args.verbose:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__': continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing': skip = True
if p_type != 'Shape' and v != '': skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not None and j is not None and outputs[i] | |
'Yichun, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f0a\u6625\u5e02')},
'861800451':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861800450':{'en': 'Harbin, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861800453':{'en': 'Mudanjiang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7261\u4e39\u6c5f\u5e02')},
'861800452':{'en': 'Qiqihar, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9f50\u9f50\u54c8\u5c14\u5e02')},
'861800455':{'en': 'Suihua, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u7ee5\u5316\u5e02')},
'861800454':{'en': 'Jiamusi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u4f73\u6728\u65af\u5e02')},
'861800457':{'en': 'Da <NAME>, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5174\u5b89\u5cad\u5730\u533a')},
'861800456':{'en': 'Heihe, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9ed1\u6cb3\u5e02')},
'861768755':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861768754':{'en': 'Wuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u68a7\u5dde\u5e02')},
'861768757':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861768756':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861768751':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861768750':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861768753':{'en': 'Guilin, Guangxi', 'zh': u('\u5e7f\u897f\u6842\u6797\u5e02')},
'861768752':{'en': 'Liuzhou, Guangxi', 'zh': u('\u5e7f\u897f\u67f3\u5dde\u5e02')},
'861768759':{'en': 'Beihai, Guangxi', 'zh': u('\u5e7f\u897f\u5317\u6d77\u5e02')},
'861768758':{'en': 'Yulin, Guangxi', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861812764':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861812765':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861805113':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861812766':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812767':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812760':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861811656':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861812761':{'en': 'Yunfu, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861813578':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861813579':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861812762':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861813572':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861813573':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861813570':{'en': 'Sh<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861812763':{'en': 'Me<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861813576':{'en': 'Z<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861813577':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861813574':{'en': 'Sh<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861813575':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861804738':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861804739':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861804730':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804731':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804732':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804733':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804734':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804735':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804736':{'en': 'Wuhai, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u6d77\u5e02')},
'861804737':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861804226':{'en': 'Quzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u8862\u5dde\u5e02')},
'861804227':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861804224':{'en': '<NAME>', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861804225':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'861804222':{'en': '<NAME>', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861804223':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861804220':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861804221':{'en': 'Jinhua, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u91d1\u534e\u5e02')},
'861804228':{'en': 'Lishui, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u4e3d\u6c34\u5e02')},
'861804229':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861814239':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861813232':{'en': 'Handan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u90af\u90f8\u5e02')},
'861805179':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861805178':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861808909':{'en': 'Lhasa, Tibet', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861808908':{'en': '<NAME>', 'zh': u('\u897f\u85cf\u62c9\u8428\u5e02')},
'861805171':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861805170':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861805173':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861805172':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861805175':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861805174':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861805177':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861805176':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861809508':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861809509':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861802778':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861802779':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861802776':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861802777':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861802774':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861802775':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861802772':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861802773':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861802770':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861802771':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6885\u5dde\u5e02')},
'861807779':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861807778':{'en': 'Nanning, Guangxi', 'zh': u('\u5e7f\u897f\u5357\u5b81\u5e02')},
'861801081':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861801080':{'en': 'Huangshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9ec4\u5c71\u5e02')},
'861801083':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861801082':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861801085':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861801084':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861801087':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861801086':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861801089':{'en': 'MaAnshan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u9a6c\u978d\u5c71\u5e02')},
'861801088':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'86180573':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'86180572':{'en': 'Huzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'86180575':{'en': 'Shaoxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u7ecd\u5174\u5e02')},
'86180574':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'86180577':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'86180576':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861811453':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811452':{'en': 'Suzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811451':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811450':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861811457':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861811456':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861811455':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861811454':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811459':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861811458':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861770563':{'en': 'Xuancheng, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5ba3\u57ce\u5e02')},
'861770562':{'en': 'Tongling, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u94dc\u9675\u5e02')},
'861770561':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861770560':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861770567':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861770566':{'en': 'Chizhou, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6c60\u5dde\u5e02')},
'861770565':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861770564':{'en': 'LiuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861810498':{'en': 'Fuxin, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u961c\u65b0\u5e02')},
'861810499':{'en': 'Liaoyang, Liaoning', 'zh': u('\u8fbd\u5b81\u7701\u8fbd\u9633\u5e02')},
'861770569':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861770568':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861802198':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861802199':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861802192':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861802193':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861802190':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861802191':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861802196':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861802197':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861802194':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861802195':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861810146':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861810147':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861810144':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861810145':{'en': 'Yangzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u626c\u5dde\u5e02')},
'861810142':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861810143':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861810140':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6dee\u5b89\u5e02')},
'861810141':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u76d0\u57ce\u5e02')},
'861810148':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861810149':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861800853':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u5b89\u987a\u5e02')},
'861800598':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861800599':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5357\u5e73\u5e02')},
'861813305':{'en': 'An<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861800590':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861800591':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861800592':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u53a6\u95e8\u5e02')},
'861800593':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u5b81\u5fb7\u5e02')},
'861800594':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861800595':{'en': 'Quanzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6cc9\u5dde\u5e02')},
'861800596':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861800597':{'en': 'Long<NAME>ian', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861771212':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771213':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771210':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861771211':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771216':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771217':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771214':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771215':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771218':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861771219':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5f90\u5dde\u5e02')},
'861814034':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861769839':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861769838':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861769833':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861769832':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861769831':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861769830':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861769837':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5357\u9633\u5e02')},
'861769836':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861769835':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861769834':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5b89\u9633\u5e02')},
'861770528':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861812898':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861812899':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861812896':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861812897':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861812894':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861812895':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861812892':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812893':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812890':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861812891':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861809166':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861812095':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861809167':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861813304':{'en': '<NAME>ui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861810542':{'en': 'Qingdao, Shandong', 'zh': u('\u5c71\u4e1c\u7701\u9752\u5c9b\u5e02')},
'861809164':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861812096':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861812097':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861812094':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861809165':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861812092':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861812093':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861812090':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861812091':{'en': 'Fuzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u798f\u5dde\u5e02')},
'861809162':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861812098':{'en': 'Putian, Fujian', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861809163':{'en': 'Hanzhong, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861804033':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804032':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804031':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804030':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804037':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804036':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804035':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804034':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861804039':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6cf8\u5dde\u5e02')},
'861804038':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u81ea\u8d21\u5e02')},
'861809901':{'en': '<NAME>', 'zh': u('\u65b0\u7586\u5854\u57ce\u5730\u533a')},
'861813381':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861812099':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861813380':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86181240':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181241':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181242':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86181243':{'en': 'Dongguan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e1c\u839e\u5e02')},
'86181245':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181246':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86181247':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861813649':{'en': 'Nanjing, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861813382':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861810618':{'en': 'Wuxi, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861802347':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861802346':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861802345':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861802344':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861802343':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861802342':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861802341':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861802340':{'en': 'Zhongshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4e2d\u5c71\u5e02')},
'861802349':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861802348':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861804213':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861804744':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861804747':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861809319':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861809318':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861804746':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861809311':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861804741':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861809313':{'en': 'Baiyin, Gansu', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861809312':{'en': 'Wuwei, Gansu', 'zh': u('\u7518\u8083\u7701\u6b66\u5a01\u5e02')},
'861809315':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861809314':{'en': 'Baiyin, Gansu', 'zh': u('\u7518\u8083\u7701\u767d\u94f6\u5e02')},
'861809317':{'en': 'Lanzhou, Gansu', 'zh': u('\u7518\u8083\u7701\u5170\u5dde\u5e02')},
'861804740':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861804743':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861804742':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861809622':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u5b9c\u5bbe\u5e02')},
'861810152':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86180324':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86180325':{'en': 'Tangshan, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5510\u5c71\u5e02')},
'861811158':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861811159':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'86180320':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86180321':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86180322':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'86180323':{'en': 'Zhangjiakou, Hebei', 'zh': u('\u6cb3\u5317\u7701\u5f20\u5bb6\u53e3\u5e02')},
'861811152':{'en': 'YaAn, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u96c5\u5b89\u5e02')},
'861811153':{'en': 'Panzhihua, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u6500\u679d\u82b1\u5e02')},
'861811150':{'en': 'Deyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'861811151':{'en': 'Deyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u5fb7\u9633\u5e02')},
'86180328':{'en': 'Shijiazhuang, Hebei', 'zh': u('\u6cb3\u5317\u7701\u77f3\u5bb6\u5e84\u5e02')},
'861811157':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861811154':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861811155':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861806196':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5bbf\u8fc1\u5e02')},
'861806197':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861806194':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861806195':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u8fde\u4e91\u6e2f\u5e02')},
'861806192':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u82cf\u5dde\u5e02')},
'861806193':{'en': '<NAME>', | |
%02x received %02x' % (chsum, calc_chsum))
def check_coredump_trigger_before_print(self, line): # type: (bytes) -> None
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
if COREDUMP_UART_PROMPT in line:
yellow_print('Initiating core dump!')
self.event_queue.put((TAG_KEY, '\n'))
return
if COREDUMP_UART_START in line:
yellow_print('Core dump started (further output muted)')
self._reading_coredump = COREDUMP_READING
self._coredump_buffer = b''
self._output_enabled = False
return
if COREDUMP_UART_END in line:
self._reading_coredump = COREDUMP_DONE
yellow_print('\nCore dump finished!')
self.process_coredump()
return
if self._reading_coredump == COREDUMP_READING:
kb = 1024
buffer_len_kb = len(self._coredump_buffer) // kb
self._coredump_buffer += line.replace(b'\r', b'') + b'\n'
new_buffer_len_kb = len(self._coredump_buffer) // kb
if new_buffer_len_kb > buffer_len_kb:
yellow_print('Received %3d kB...' % (new_buffer_len_kb), newline='\r')
def check_coredump_trigger_after_print(self): # type: () -> None
if self._decode_coredumps == COREDUMP_DECODE_DISABLE:
return
# Re-enable output after the last line of core dump has been consumed
if not self._output_enabled and self._reading_coredump == COREDUMP_DONE:
self._reading_coredump = COREDUMP_IDLE
self._output_enabled = True
self._coredump_buffer = b''
def process_coredump(self): # type: () -> None
if self._decode_coredumps != COREDUMP_DECODE_INFO:
raise NotImplementedError('process_coredump: %s not implemented' % self._decode_coredumps)
coredump_script = os.path.join(os.path.dirname(__file__), '..', 'components', 'espcoredump', 'espcoredump.py')
coredump_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as coredump_file:
coredump_file.write(self._coredump_buffer)
coredump_file.flush()
if self.websocket_client:
self._output_enabled = True
yellow_print('Communicating through WebSocket')
self.websocket_client.send({'event': 'coredump',
'file': coredump_file.name,
'prog': self.elf_file})
yellow_print('Waiting for debug finished event')
self.websocket_client.wait([('event', 'debug_finished')])
yellow_print('Communications through WebSocket is finished')
else:
cmd = [sys.executable,
coredump_script,
'info_corefile',
'--core', coredump_file.name,
'--core-format', 'b64',
self.elf_file
]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self._output_enabled = True
self._print(output)
self._output_enabled = False # Will be reenabled in check_coredump_trigger_after_print
except subprocess.CalledProcessError as e:
yellow_print('Failed to run espcoredump script: {}\n{}\n\n'.format(e, e.output))
self._output_enabled = True
self._print(COREDUMP_UART_START + b'\n')
self._print(self._coredump_buffer)
# end line will be printed in handle_serial_input
finally:
if coredump_file is not None:
try:
os.unlink(coredump_file.name)
except OSError as e:
yellow_print('Couldn\'t remote temporary core dump file ({})'.format(e))
def check_panic_decode_trigger(self, line): # type: (bytes) -> None
if self._decode_panic == PANIC_DECODE_DISABLE:
return
if self._reading_panic == PANIC_IDLE and re.search(PANIC_START, line.decode('ascii', errors='ignore')):
self._reading_panic = PANIC_READING
yellow_print('Stack dump detected')
if self._reading_panic == PANIC_READING and PANIC_STACK_DUMP in line:
self._output_enabled = False
if self._reading_panic == PANIC_READING:
self._panic_buffer += line.replace(b'\r', b'') + b'\n'
if self._reading_panic == PANIC_READING and PANIC_END in line:
self._reading_panic = PANIC_IDLE
self._output_enabled = True
self.process_panic_output(self._panic_buffer)
self._panic_buffer = b''
def process_panic_output(self, panic_output): # type: (bytes) -> None
panic_output_decode_script = os.path.join(os.path.dirname(__file__), '..', 'tools', 'gdb_panic_server.py')
panic_output_file = None
try:
# On Windows, the temporary file can't be read unless it is closed.
# Set delete=False and delete the file manually later.
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as panic_output_file:
panic_output_file.write(panic_output)
panic_output_file.flush()
cmd = [self.toolchain_prefix + 'gdb',
'--batch', '-n',
self.elf_file,
'-ex', "target remote | \"{python}\" \"{script}\" --target {target} \"{output_file}\""
.format(python=sys.executable,
script=panic_output_decode_script,
target=self.target,
output_file=panic_output_file.name),
'-ex', 'bt']
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
yellow_print('\nBacktrace:\n\n')
self._print(output)
except subprocess.CalledProcessError as e:
yellow_print('Failed to run gdb_panic_server.py script: {}\n{}\n\n'.format(e, e.output))
self._print(panic_output)
finally:
if panic_output_file is not None:
try:
os.unlink(panic_output_file.name)
except OSError as e:
yellow_print('Couldn\'t remove temporary panic output file ({})'.format(e))
def run_gdb(self): # type: () -> None
with self: # disable console control
normal_print('')
try:
cmd = ['%sgdb' % self.toolchain_prefix,
'-ex', 'set serial baud %d' % self.serial.baudrate,
'-ex', 'target remote %s' % self.serial.port,
'-ex', 'interrupt', # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd='.')
process.wait()
except OSError as e:
red_print('%s: %s' % (' '.join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(['stty', 'sane'])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action('gdb exited')
def output_enable(self, enable): # type: (bool) -> None
self._output_enabled = enable
def output_toggle(self): # type: () -> None
self._output_enabled = not self._output_enabled
yellow_print('\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.'.format(
self._output_enabled))
def toggle_logging(self): # type: () -> None
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self): # type: () -> None
if not self._log_file:
name = 'log.{}.{}.txt'.format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
try:
self._log_file = open(name, 'wb+')
yellow_print('\nLogging is enabled into file {}'.format(name))
except Exception as e:
red_print('\nLog file {} cannot be created: {}'.format(name, e))
def stop_logging(self): # type: () -> None
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print('\nLogging is disabled and file {} has been closed'.format(name))
except Exception as e:
red_print('\nLog file cannot be closed: {}'.format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None): # type: (Union[str, bytes], Optional[Callable]) -> None
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string) # type: ignore
except Exception as e:
red_print('\nCannot write to file: {}'.format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def handle_commands(self, cmd, chip): # type: (int, str) -> None
config = get_chip_config(chip)
reset_delay = config['reset']
enter_boot_set = config['enter_boot_set']
enter_boot_unset = config['enter_boot_unset']
high = False
low = True
if cmd == CMD_STOP:
self.console_reader.stop()
self.serial_reader.stop()
elif cmd == CMD_RESET:
self.serial.setRTS(low)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(reset_delay)
self.serial.setRTS(high)
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
self.output_enable(low)
elif cmd == CMD_MAKE:
self.run_make('encrypted-flash' if self.encrypted else 'flash')
elif cmd == CMD_APP_FLASH:
self.run_make('encrypted-app-flash' if self.encrypted else 'app-flash')
elif cmd == CMD_OUTPUT_TOGGLE:
self.output_toggle()
elif cmd == CMD_TOGGLE_LOGGING:
self.toggle_logging()
elif cmd == CMD_ENTER_BOOT:
self.serial.setDTR(high) # IO0=HIGH
self.serial.setRTS(low) # EN=LOW, chip in reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(enter_boot_set) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(low) # IO0=LOW
self.serial.setRTS(high) # EN=HIGH, chip out of reset
self.serial.setDTR(self.serial.dtr) # usbser.sys workaround
time.sleep(enter_boot_unset) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(high) # IO0=HIGH, done
else:
raise RuntimeError('Bad command data %d' % cmd) # type: ignore
def main(): # type: () -> None
parser = argparse.ArgumentParser('idf_monitor - a serial output monitor for esp-idf')
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--disable-address-decoding', '-d',
help="Don't print lines about decoded addresses from the application ELF file",
action='store_true',
default=True if os.environ.get('ESP_MONITOR_DECODE') == 0 else False
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.getenv('IDF_MONITOR_BAUD', os.getenv('MONITORBAUD', 115200)))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--encrypted',
help='Use encrypted targets while running make',
action='store_true')
parser.add_argument(
'--toolchain-prefix',
help='Triplet prefix to add before cross-toolchain names',
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
'--eol',
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help='End of line to use when sending to the serial port',
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help='Filtering string',
default=DEFAULT_PRINT_FILTER)
parser.add_argument(
'--decode-coredumps',
choices=[COREDUMP_DECODE_INFO, COREDUMP_DECODE_DISABLE],
default=COREDUMP_DECODE_INFO,
help='Handling of core dumps found in serial output'
)
parser.add_argument(
'--decode-panic',
choices=[PANIC_DECODE_BACKTRACE, PANIC_DECODE_DISABLE],
default=PANIC_DECODE_DISABLE,
help='Handling of panic handler info found in serial output'
)
parser.add_argument(
'--target',
help='Target name (used when stack dump decoding is enabled)',
default=os.environ.get('IDF_TARGET', 'esp32')
)
parser.add_argument(
'--revision',
help='Revision of the target',
type=int,
default=0
)
parser.add_argument(
'--ws',
default=os.environ.get('ESP_IDF_MONITOR_WS', None),
help='WebSocket URL for communicating with IDE tools for debugging purposes'
)
args = parser.parse_args()
# GDB uses CreateFile to open COM port, which requires the COM name to be r'\\.\COMx' if the COM
# number is larger than 10
if os.name == 'nt' and args.port.startswith('COM'):
args.port = args.port.replace('COM', r'\\.\COM')
yellow_print('--- WARNING: GDB cannot open serial ports accessed as COMx')
yellow_print('--- Using %s instead...' % args.port)
elif args.port.startswith('/dev/tty.') and sys.platform == 'darwin':
args.port = args.port.replace('/dev/tty.', '/dev/cu.')
yellow_print('--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.')
yellow_print('--- Using %s instead...' % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ['MAKEFLAGS']
makeflags = re.sub(r'--jobserver[^ =]*=[0-9,]+ ?', '', makeflags)
os.environ['MAKEFLAGS'] = makeflags
except KeyError:
pass # not running a make jobserver
# Pass the actual used port to callee of idf_monitor (e.g. make) | |
# Copyright 2018 New Vector
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from unittest.mock import Mock, patch
from urllib.parse import quote
from twisted.internet import defer
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import UserTypes
from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.appservice import ApplicationService
from synapse.rest.client import login, register, room, user_directory
from synapse.server import HomeServer
from synapse.storage.roommember import ProfileInfo
from synapse.types import create_requester
from synapse.util import Clock
from tests import unittest
from tests.storage.test_user_directory import GetUserDirectoryTables
from tests.test_utils.event_injection import inject_member_event
from tests.unittest import override_config
class UserDirectoryTestCase(unittest.HomeserverTestCase):
"""Tests the UserDirectoryHandler.
We're broadly testing two kinds of things here.
1. Check that we correctly update the user directory in response
to events (e.g. join a room, leave a room, change name, make public)
2. Check that the search logic behaves as expected.
The background process that rebuilds the user directory is tested in
tests/storage/test_user_directory.py.
"""
servlets = [
login.register_servlets,
synapse.rest.admin.register_servlets,
register.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
config = self.default_config()
config["update_user_directory"] = True
self.appservice = ApplicationService(
token="<PASSWORD>",
hostname="test",
id="1234",
namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]},
# Note: this user does not match the regex above, so that tests
# can distinguish the sender from the AS user.
sender="@as_main:test",
)
mock_load_appservices = Mock(return_value=[self.appservice])
with patch(
"synapse.storage.databases.main.appservice.load_appservices",
mock_load_appservices,
):
hs = self.setup_test_homeserver(config=config)
return hs
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastore()
self.handler = hs.get_user_directory_handler()
self.event_builder_factory = self.hs.get_event_builder_factory()
self.event_creation_handler = self.hs.get_event_creation_handler()
self.user_dir_helper = GetUserDirectoryTables(self.store)
def test_normal_user_pair(self) -> None:
"""Sanity check that the room-sharing tables are updated correctly."""
alice = self.register_user("alice", "pass")
alice_token = self.login(alice, "pass")
bob = self.register_user("bob", "pass")
bob_token = self.login(bob, "pass")
public = self.helper.create_room_as(
alice,
is_public=True,
extra_content={"visibility": "public"},
tok=alice_token,
)
private = self.helper.create_room_as(alice, is_public=False, tok=alice_token)
self.helper.invite(private, alice, bob, tok=alice_token)
self.helper.join(public, bob, tok=bob_token)
self.helper.join(private, bob, tok=bob_token)
# Alice also makes a second public room but no-one else joins
public2 = self.helper.create_room_as(
alice,
is_public=True,
extra_content={"visibility": "public"},
tok=alice_token,
)
# The user directory should reflect the room memberships above.
users, in_public, in_private = self.get_success(
self.user_dir_helper.get_tables()
)
self.assertEqual(users, {alice, bob})
self.assertEqual(in_public, {(alice, public), (bob, public), (alice, public2)})
self.assertEqual(
in_private,
{(alice, bob, private), (bob, alice, private)},
)
# The next four tests (test_excludes_*) all setup
# - A normal user included in the user dir
# - A public and private room created by that user
# - A user excluded from the room dir, belonging to both rooms
# They match similar logic in storage/test_user_directory. But that tests
# rebuilding the directory; this tests updating it incrementally.
def test_excludes_support_user(self) -> None:
alice = self.register_user("alice", "pass")
alice_token = self.login(alice, "pass")
support = "@support1:test"
self.get_success(
self.store.register_user(
user_id=support, password_hash=<PASSWORD>, user_type=UserTypes.SUPPORT
)
)
public, private = self._create_rooms_and_inject_memberships(
alice, alice_token, support
)
self._check_only_one_user_in_directory(alice, public)
def test_excludes_deactivated_user(self) -> None:
admin = self.register_user("admin", "pass", admin=True)
admin_token = self.login(admin, "pass")
user = self.register_user("naughty", "pass")
# Deactivate the user.
channel = self.make_request(
"PUT",
f"/_synapse/admin/v2/users/{user}",
access_token=admin_token,
content={"deactivated": True},
)
self.assertEqual(channel.code, 200)
self.assertEqual(channel.json_body["deactivated"], True)
# Join the deactivated user to rooms owned by the admin.
# Is this something that could actually happen outside of a test?
public, private = self._create_rooms_and_inject_memberships(
admin, admin_token, user
)
self._check_only_one_user_in_directory(admin, public)
def test_excludes_appservices_user(self) -> None:
# Register an AS user.
user = self.register_user("user", "<PASSWORD>")
token = self.login(user, "<PASSWORD>")
as_user = self.register_appservice_user("as_user_potato", self.appservice.token)
# Join the AS user to rooms owned by the normal user.
public, private = self._create_rooms_and_inject_memberships(
user, token, as_user
)
self._check_only_one_user_in_directory(user, public)
def test_excludes_appservice_sender(self) -> None:
user = self.register_user("user", "<PASSWORD>")
token = self.login(user, "<PASSWORD>")
room = self.helper.create_room_as(user, is_public=True, tok=token)
self.helper.join(room, self.appservice.sender, tok=self.appservice.token)
self._check_only_one_user_in_directory(user, room)
def test_user_not_in_users_table(self) -> None:
"""Unclear how it happens, but on matrix.org we've seen join events
for users who aren't in the users table. Test that we don't fall over
when processing such a user.
"""
user1 = self.register_user("user1", "<PASSWORD>")
token1 = self.login(user1, "<PASSWORD>")
room = self.helper.create_room_as(user1, is_public=True, tok=token1)
# Inject a join event for a user who doesn't exist
self.get_success(inject_member_event(self.hs, room, "@not-a-user:test", "join"))
# Another new user registers and joins the room
user2 = self.register_user("user2", "pass")
token2 = self.login(user2, "pass")
self.helper.join(room, user2, tok=token2)
# The dodgy event should not have stopped us from processing user2's join.
in_public = self.get_success(self.user_dir_helper.get_users_in_public_rooms())
self.assertEqual(set(in_public), {(user1, room), (user2, room)})
def test_excludes_users_when_making_room_public(self) -> None:
# Create a regular user and a support user.
alice = self.register_user("alice", "pass")
alice_token = self.login(alice, "pass")
support = "@support1:test"
self.get_success(
self.store.register_user(
user_id=support, password_hash=None, user_type=UserTypes.SUPPORT
)
)
# Make a public and private room containing Alice and the support user
public, initially_private = self._create_rooms_and_inject_memberships(
alice, alice_token, support
)
self._check_only_one_user_in_directory(alice, public)
# Alice makes the private room public.
self.helper.send_state(
initially_private,
"m.room.join_rules",
{"join_rule": "public"},
tok=alice_token,
)
users, in_public, in_private = self.get_success(
self.user_dir_helper.get_tables()
)
self.assertEqual(users, {alice})
self.assertEqual(in_public, {(alice, public), (alice, initially_private)})
self.assertEqual(in_private, set())
def test_switching_from_private_to_public_to_private(self) -> None:
"""Check we update the room sharing tables when switching a room
from private to public, then back again to private."""
# Alice and Bob share a private room.
alice = self.register_user("alice", "pass")
alice_token = self.login(alice, "pass")
bob = self.register_user("bob", "pass")
bob_token = self.login(bob, "pass")
room = self.helper.create_room_as(alice, is_public=False, tok=alice_token)
self.helper.invite(room, alice, bob, tok=alice_token)
self.helper.join(room, bob, tok=bob_token)
# The user directory should reflect this.
def check_user_dir_for_private_room() -> None:
users, in_public, in_private = self.get_success(
self.user_dir_helper.get_tables()
)
self.assertEqual(users, {alice, bob})
self.assertEqual(in_public, set())
self.assertEqual(in_private, {(alice, bob, room), (bob, alice, room)})
check_user_dir_for_private_room()
# Alice makes the room public.
self.helper.send_state(
room,
"m.room.join_rules",
{"join_rule": "public"},
tok=alice_token,
)
# The user directory should be updated accordingly
users, in_public, in_private = self.get_success(
self.user_dir_helper.get_tables()
)
self.assertEqual(users, {alice, bob})
self.assertEqual(in_public, {(alice, room), (bob, room)})
self.assertEqual(in_private, set())
# Alice makes the room private.
self.helper.send_state(
room,
"m.room.join_rules",
{"join_rule": "invite"},
tok=alice_token,
)
# The user directory should be updated accordingly
check_user_dir_for_private_room()
def _create_rooms_and_inject_memberships(
self, creator: str, token: str, joiner: str
) -> Tuple[str, str]:
"""Create a public and private room as a normal user.
Then get the `joiner` into those rooms.
"""
# TODO: Duplicates the same-named method in UserDirectoryInitialPopulationTest.
public_room = self.helper.create_room_as(
creator,
is_public=True,
# See https://github.com/matrix-org/synapse/issues/10951
extra_content={"visibility": "public"},
tok=token,
)
private_room = self.helper.create_room_as(creator, is_public=False, tok=token)
# HACK: get the user into these rooms
self.get_success(inject_member_event(self.hs, public_room, joiner, "join"))
self.get_success(inject_member_event(self.hs, private_room, joiner, "join"))
return public_room, private_room
def _check_only_one_user_in_directory(self, user: str, public: str) -> None:
"""Check that the user directory DB tables show that:
- only one user is in the user directory
- they belong to exactly one public room
- they don't share a private room with anyone.
"""
users, in_public, in_private = self.get_success(
self.user_dir_helper.get_tables()
)
self.assertEqual(users, {user})
self.assertEqual(in_public, {(user, public)})
self.assertEqual(in_private, set())
def test_handle_local_profile_change_with_support_user(self) -> None:
support_user_id = "@support:test"
self.get_success(
self.store.register_user(
user_id=support_user_id, password_hash=None, user_type=UserTypes.SUPPORT
)
)
regular_user_id = "@regular:test"
self.get_success(
self.store.register_user(user_id=regular_user_id, password_hash=None)
)
self.get_success(
self.handler.handle_local_profile_change(
support_user_id, ProfileInfo("I love support me", None)
)
)
profile = self.get_success(self.store.get_user_in_directory(support_user_id))
self.assertIsNone(profile)
display_name = "display_name"
profile_info = ProfileInfo(avatar_url="avatar_url", display_name=display_name)
self.get_success(
self.handler.handle_local_profile_change(regular_user_id, profile_info)
)
profile = self.get_success(self.store.get_user_in_directory(regular_user_id))
self.assertTrue(profile["display_name"] == display_name)
def test_handle_local_profile_change_with_deactivated_user(self) -> None:
# create user
r_user_id = "@regular:test"
self.get_success(
self.store.register_user(user_id=r_user_id, password_hash=None)
)
# update profile
display_name = "<NAME>"
profile_info = ProfileInfo(avatar_url="avatar_url", display_name=display_name)
self.get_success(
self.handler.handle_local_profile_change(r_user_id, profile_info)
)
# profile is in directory
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
self.assertTrue(profile["display_name"] == display_name)
# deactivate user
self.get_success(self.store.set_user_deactivated_status(r_user_id, True))
self.get_success(self.handler.handle_local_user_deactivated(r_user_id))
# profile is not in directory
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
self.assertIsNone(profile)
# update profile after deactivation
self.get_success(
self.handler.handle_local_profile_change(r_user_id, profile_info)
)
# profile is furthermore not in directory
profile = self.get_success(self.store.get_user_in_directory(r_user_id))
self.assertIsNone(profile)
def test_handle_local_profile_change_with_appservice_user(self) -> None:
# create user
as_user_id = self.register_appservice_user(
"as_user_alice", self.appservice.token
)
# profile is not in directory
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
self.assertIsNone(profile)
# update profile
profile_info = ProfileInfo(avatar_url="avatar_url", display_name="<PASSWORD>")
self.get_success(
self.handler.handle_local_profile_change(as_user_id, profile_info)
)
# profile is still not in directory
profile = self.get_success(self.store.get_user_in_directory(as_user_id))
self.assertIsNone(profile)
def test_handle_local_profile_change_with_appservice_sender(self) -> None:
# profile is not | |
socket client.
updateAccountTime()
updateAccountValue()
updatePortfolio()
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
# TODO: check self.IB_acct_id before using it
# request IB host (e.g. TWS) push account info to IB client (socket client)
self.connection.reqAccountUpdates(True, self.context.account.account_id)
return
def disable_account_info_update(self):
''' Turn off auto account update, meaning IB socket host will stop pushing account info
to IB socket client.
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
# TODO: check self.IB_acct_id before using it
# stop IB host (e.g. TWS) to push account info to IB client (socket client)
self.connection.reqAccountUpdates(False, self.context.account.account_id)
return
#
# Fundamental Data Methods
#
def get_financial_statements(self, symbol, max_wait_time=20):
''' Get a company's financial statements
:param:
symbol: stock symbol string, e.g. 'IBM'; or a IB contract object
:return:
a string of financial statements
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'ReportsFinStatements', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
self.connection.reqFundamentalData(__id, contract, 'ReportsFinStatements')
response.event.wait(max_wait_time)
raw_xml = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# covert from xml to dest. format
raw_xml = copy(response.fundamental_data)
else:
pass # raise RuntimeError('get_financial_statements: reqFundamentalData got error. Security=%s Reason:%s' % (symbol, response.error_msg))
else:
# Timeout
pass # ('get_financial_statements: reqFundamentalData is timeout. Security=%s' % symbol)
status = response.status
self.ipc_msg_dict.pop(__id)
return status, raw_xml
def get_company_ownership(self, symbol, max_wait_time=60.0 * 5):
''' Get a company's ownership report
:param:
symbol: stock symbol string, e.g. 'IBM'
max_wait_time: max number of seconds to wait before raise timeout
:return:
a string of ownership report
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
# For US stock only
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'ReportsOwnership', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
self.connection.reqFundamentalData(__id, contract, 'ReportsOwnership')
response.event.wait(max_wait_time)
report = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# covert from xml to dest. format
report = parse_ownership_report(response.fundamental_data)
else:
pass # ('get_company_ownership: reqFundamentalData got error. Security=%s Reason:%s' % (symbol, response.error_msg))
else:
pass # ('get_company_ownership: reqFundamentalData is timeout. Security=%s' % symbol)
status = response.status
self.ipc_msg_dict.pop(__id)
return status, report
def get_analyst_estimates(self, symbol, max_wait_time=20):
''' Get analyst estimates report for a company
:param:
symbol: stock symbol string, e.g. 'IBM'; or a IB contract object
:return:
a string of financial statements
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'RESC-Analyst Estimates', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
self.connection.reqFundamentalData(__id, contract, 'RESC')
response.event.wait(max_wait_time)
report = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# covert from xml to dest. format
report = parse_analyst_estimates(response.fundamental_data)
else:
pass # ('get_analyst_estimates: reqFundamentalData got error. Security=%s Reason:%s' % (symbol, response.error_msg))
else:
pass # ('get_analyst_estimates: reqFundamentalData is timeout. Security=%s' % symbol)
status = response.status
self.ipc_msg_dict.pop(__id)
return status, report
def get_company_overview(self, symbol, max_wait_time=10):
''' Get company overview infomration
:param:
symbol: stock symbol string, e.g. 'IBM'; or a IB contract object
:return:
a string of financial statements
'''
# ReportsFinSummary Financial summary
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'ReportSnapshot-Company overview', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
# ReportSnapshot Company's financial overview
self.connection.reqFundamentalData(__id, contract, 'ReportSnapshot')
response.event.wait(max_wait_time)
report = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# TODO: covert from xml to dest. format
report = response.fundamental_data
else:
pass # ('get_analyst_estimates: reqFundamentalData got error. Security=%s Reason:%s' % (symbol, response.error_msg))
else:
pass # ('get_analyst_estimates: reqFundamentalData is timeout. Security=%s' % symbol)
status = response.status
self.ipc_msg_dict.pop(__id)
return status, report
def get_financial_summary(self, symbol, max_wait_time=10):
''' Get company finanical summary information, such as revenue history, net profit, and dividends history.
:param:
symbol: stock symbol string, e.g. 'IBM'; or a IB contract object
:return:
a string of financial statements
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'ReportsFinSummary-Financial summary', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
self.connection.reqFundamentalData(__id, contract, 'ReportsFinSummary')
response.event.wait(max_wait_time)
report = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# TODO: covert from xml to dest. format
report = response.fundamental_data
else:
pass
else:
pass
status = response.status
self.ipc_msg_dict.pop(__id)
return status, report
def get_financial_ratios(self, symbol, max_wait_time=5):
''' Get analyst estimates report for a company
:param:
symbol: stock symbol string, e.g. 'IBM'
:return:
a string of financial statements
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'RESC-Analyst Estimates', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
# 258 - financial ratios
'''
TTMNPMGN=16.1298;NLOW=80.6;TTMPRCFPS=6.26675;TTMGROSMGN=60.76731;TTMCFSHR=15.004
46;QCURRATIO=1.42071;TTMREV=259842;TTMINVTURN=5.28024;TTMOPMGN=14.22711;TTMPR2RE
V=1.39703;AEPSNORM=8.55;TTMNIPEREM=144524.1;EPSCHNGYR=8.47727;TTMPRFCFPS=62.4260
6;TTMRECTURN=19.99938;TTMPTMGN=17.88125;QCSHPS=40.50882;TTMFCF=5815;
LATESTADATE=2016-12-31;APTMGNPCT=17.88125;AEBTNORM=46463;TTMNIAC=33008;NetDebt_I=152080;
PRYTDPCTR=-1.55563;TTMEBITD=53326;AFEEPSNTM=0;PR2TANBK=5.01599;EPSTRENDGR=-
15.53209;QTOTD2EQ=72.60778;TTMFCFSHR=1.50625;QBVPS=110.0867;NPRICE=94.1;YLD5YAVG
=3.88751;REVTRENDGR=51.11774;TTMEPSXCLX=8.54981;QTANBVPS=18.75999;PRICE2BK=0.854
78;MKTCAP=363007.5;TTMPAYRAT=31.32574;TTMINTCOV=-99999.99;TTMDIVSHR=2.585;TTMREVCHG=55.81794;
TTMROAPCT=4.09615;TTMROEPCT=7.73685;
TTMREVPERE=896006.9;APENORM=11.00585;TTMROIPCT=5.51924;REVCHNGYR=-
6.66885;CURRENCY=HKD;DIVGRPCT=-8.33887;TTMEPSCHG=-32.80548;PEEXCLXOR=11.00609;QQUICKRATI=1.30087;
TTMREVPS=67.30638;BETA=0.90979;TTMEBT=46463;ADIV5YAVG=3.1048;ANIACNORM=33008;QLTD2EQ=55.46377;NHIG=103.9
'''
report = None
self.connection.reqMktData(__id, contract, "258", False)
response.event.wait(max_wait_time)
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# TODO: convert the format to a table alike
report = response.tick_str
return report
def get_dividends_info(self, symbol, max_wait_time=5):
''' Get analyst estimates report for a company
:param:
symbol: stock symbol string, e.g. 'IBM'
:return:
a string of financial statements
'''
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
if isinstance(symbol, Contract):
contract = symbol
elif isinstance(symbol, str):
contract = new_stock_contract(symbol)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
__id = self.__get_new_request_id()
request = RequestDetails('reqFundamentalData', 'RESC-Analyst Estimates', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
# IB Dividends ("456")
#
# This tick type provides four different comma-separated elements:
# The sum of dividends for the past 12 months (0.83 in the example below).
# The sum of dividends for the next 12 months (0.92 from the example below).
# The next dividend date (20130219 in the example below).
# The next single dividend amount (0.23 from the example below).
# Example: 0.83,0.92,20130219,0.23
self.connection.reqMktData(__id, contract, "456", False)
result = None
response.event.wait(max_wait_time)
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
# TODO: convert the format
result = set(response.tick_str.split(','))
self.ipc_msg_dict.pop(__id)
return result
def get_contract_details(self, contract, max_wait_time=5):
""" Get contract details for a specified contract
Args:
contract: a legal IBPY Contract object or a string for U.S. stock only
Returns:
status: a reference to the tick data dictionary which will be updated with latest quote.
contract_details: a contractDetails instance
"""
if isinstance(contract, Contract):
pass
elif isinstance(contract, str):
contract = new_stock_contract(contract)
else:
raise TypeError("contract must be a contract object or string (for U.S. stocks only).")
if not self.connected:
raise RuntimeError('IB client is not connected to TWS')
__id = self.__get_new_request_id()
request = RequestDetails('reqContractDetails', '', contract)
response = ResponseDetails()
self.ipc_msg_dict[__id] = (request, response)
# False - indicating request live quotes instead of a snapshot
self.connection.reqContractDetails(__id, contract)
response.event.wait(max_wait_time)
contract_details = None
if response.event.is_set():
if response.status == ResponseDetails.STATUS_FINISHED:
if len(response.contract_list) > 0:
contract_details = copy(response.contract_list[0])
else:
pass
else:
pass
status = response.status
self.ipc_msg_dict.pop(__id)
return status, contract_details
def get_full_contract(self, contract):
""" Subscribe tick data for a specified contract
Args:
contract: a legal IBPY Contract object or a string for U.S. stock only
Returns:
tickerId: the ID of this request. this ID could be used to cancel request later.
tick_data: a reference to the tick data dictionary which will be updated with | |
,
u'耱' : [u'm'] ,
u'㢶' : [u'b'] ,
u'綸' : [u'l', u'g'] ,
u'圿' : [u'j'] ,
u'桁' : [u'h'] ,
u'䗈' : [u'm'] ,
u'蛊' : [u'g'] ,
u'灑' : [u'x', u's', u'l'] ,
u'滚' : [u'g'] ,
u'塡' : [u't'] ,
u'鵣' : [u'c', u'l'] ,
u'㗨' : [u'a'] ,
u'盪' : [u'd'] ,
u'诬' : [u'w'] ,
u'敳' : [u'a'] ,
u'建' : [u'j'] ,
u'鏼' : [u's'] ,
u'螉' : [u'w'] ,
u'砌' : [u'q'] ,
u'岏' : [u'y'] ,
u'謖' : [u's'] ,
u'澙' : [u'x'] ,
u'䀜' : [u'x', u'm', u'g'] ,
u'錦' : [u'j'] ,
u'瞩' : [u'z'] ,
u'蚳' : [u'c'] ,
u'笶' : [u's'] ,
u'徹' : [u'c'] ,
u'詀' : [u'c', u'z'] ,
u'滃' : [u'w'] ,
u'䍆' : [u'z'] ,
u'鉐' : [u's'] ,
u'盓' : [u'y'] ,
u'臝' : [u'l'] ,
u'穠' : [u'n'] ,
u'廣' : [u'g'] ,
u'槭' : [u'q', u'c', u's'] ,
u'䉰' : [u'y'] ,
u'鵺' : [u'y'] ,
u'燽' : [u'c'] ,
u'䬁' : [u'y'] ,
u'騋' : [u'l'] ,
u'斊' : [u'q'] ,
u'匑' : [u'g'] ,
u'戛' : [u'j'] ,
u'䶚' : [u'q'] ,
u'㬡' : [u'l'] ,
u'鲤' : [u'l'] ,
u'䨫' : [u'm'] ,
u'喪' : [u's'] ,
u'蔵' : [u'c', u'z'] ,
u'撴' : [u'd'] ,
u'刻' : [u'k'] ,
u'㶺' : [u't'] ,
u'浅' : [u'q'] ,
u'䳄' : [u'c'] ,
u'㩋' : [u's'] ,
u'畕' : [u'j'] ,
u'哔' : [u'b'] ,
u'葟' : [u'h'] ,
u'柞' : [u'z'] ,
u'嵥' : [u'j'] ,
u'汯' : [u'h'] ,
u'修' : [u'x'] ,
u'黸' : [u'l'] ,
u'瑿' : [u'y'] ,
u'埾' : [u'j'] ,
u'月' : [u'y'] ,
u'宋' : [u's'] ,
u'颍' : [u'y'] ,
u'优' : [u'y'] ,
u'谚' : [u'y'] ,
u'悝' : [u'h', u'k', u'l'] ,
u'在' : [u'z'] ,
u'鐪' : [u'l'] ,
u'䢭' : [u'y'] ,
u'趯' : [u'y', u't'] ,
u'㼸' : [u'r'] ,
u'簺' : [u's'] ,
u'傽' : [u'z'] ,
u'閿' : [u'w'] ,
u'䑊' : [u'w'] ,
u'㣍' : [u't'] ,
u'饌' : [u'x', u'z'] ,
u'総' : [u'z'] ,
u'慜' : [u'm'] ,
u'䗟' : [u'y'] ,
u'髡' : [u'k'] ,
u'㑪' : [u'c'] ,
u'䥬' : [u'b'] ,
u'蹮' : [u'x'] ,
u'拱' : [u'g'] ,
u'兼' : [u'j'] ,
u'㗿' : [u'x'] ,
u'陾' : [u'r'] ,
u'脇' : [u'x'] ,
u'㦌' : [u'x', u't'] ,
u'纎' : [u'x'] ,
u'吕' : [u'l'] ,
u'椗' : [u'd'] ,
u'䚞' : [u'd'] ,
u'鮠' : [u'w'] ,
u'㰥' : [u'h'] ,
u'焧' : [u'c'] ,
u'掰' : [u'b'] ,
u'夷' : [u'y'] ,
u'鸹' : [u'g'] ,
u'䯀' : [u'n'] ,
u'裂' : [u'l'] ,
u'晉' : [u'j'] ,
u'叐' : [u'b'] ,
u'乙' : [u'y'] ,
u'荛' : [u'y', u'r'] ,
u'㯠' : [u'q', u'x'] ,
u'磢' : [u'q', u'c'] ,
u'噩' : [u'e'] ,
u'歫' : [u'j'] ,
u'䃲' : [u'p'] ,
u'藴' : [u'y'] ,
u'獻' : [u'x', u's'] ,
u'趁' : [u'c', u'z'] ,
u'樄' : [u'c'] ,
u'䚇' : [u's', u'z'] ,
u'閑' : [u'x'] ,
u'爔' : [u'x'] ,
u'脞' : [u'q', u'c'] ,
u'綡' : [u'l'] ,
u'娤' : [u'z'] ,
u'㚧' : [u'y'] ,
u'貫' : [u'w', u'g'] ,
u'椮' : [u's'] ,
u'钻' : [u'z'] ,
u'聈' : [u'y'] ,
u'糋' : [u'j'] ,
u'奎' : [u'k'] ,
u'㗑' : [u'b'] ,
u'桘' : [u'c', u'z', u'd'] ,
u'䓛' : [u'g', u'f'] ,
u'鯥' : [u'l'] ,
u'灨' : [u'g'] ,
u'蝲' : [u'l'] ,
u'揵' : [u'q', u'j'] ,
u'塸' : [u'o'] ,
u'蠃' : [u'l', u'g'] ,
u'澂' : [u'c'] ,
u'䄉' : [u'e'] ,
u'逓' : [u'd'] ,
u'瞒' : [u'm'] ,
u'蚜' : [u'y'] ,
u'砣' : [u't'] ,
u'徢' : [u'x'] ,
u'輭' : [u'r'] ,
u'溬' : [u'q'] ,
u'䀳' : [u'a', u'l'] ,
u'霽' : [u'j'] ,
u'皼' : [u'g'] ,
u'藆' : [u'j'] ,
u'罍' : [u'l'] ,
u'廌' : [u'z'] ,
u'蹗' : [u'l'] ,
u'淖' : [u'c', u'z', u'n'] ,
u'䝝' : [u'y'] ,
u'陧' : [u'n'] ,
u'痦' : [u'p', u'w'] ,
u'蓰' : [u'x'] ,
u'繷' : [u'n'] ,
u'巶' : [u'z'] ,
u'㝽' : [u'c', u'z', u's'] ,
u'洀' : [u'p', u'z'] ,
u'誅' : [u'z'] ,
u'㠎' : [u'q', u'j'] ,
u'甐' : [u'l'] ,
u'冓' : [u'g'] ,
u'銕' : [u'y', u't'] ,
u'崠' : [u'd'] ,
u'㦣' : [u'w'] ,
u'鸢' : [u'y'] ,
u'窥' : [u'k'] ,
u'昲' : [u'f'] ,
u'䊵' : [u'q'] ,
u'乂' : [u'y'] ,
u'譄' : [u'z'] ,
u'柇' : [u'h'] ,
u'噒' : [u'l'] ,
u'鍔' : [u'e'] ,
u'俗' : [u's'] ,
u'裙' : [u'q'] ,
u'筤' : [u'l'] ,
u'埧' : [u'j'] ,
u'郩' : [u'x'] ,
u'䍴' : [u'r', u'd', u'w'] ,
u'㿷' : [u'z'] ,
u'鱶' : [u'x'] ,
u'磹' : [u'd'] ,
u'撆' : [u'p'] ,
u'帍' : [u'h'] ,
u'鬏' : [u'j'] ,
u'䲖' : [u'c'] ,
u'覘' : [u'c', u'z', u'd', u'j'] ,
u'挟' : [u'x', u'j'] ,
u'咦' : [u'y'] ,
u'醨' : [u'l'] ,
u'䬯' : [u'n'] ,
u'萱' : [u'x'] ,
u'㲶' : [u'l'] ,
u'禸' : [u'r'] ,
u'匿' : [u'n'] ,
u'汁' : [u'x', u's', u'z'] ,
u'䇈' : [u'h', u'g'] ,
u'苊' : [u'e'] ,
u'㭏' : [u'w'] ,
u'瑑' : [u'z'] ,
u'櫚' : [u'l'] ,
u'屡' : [u'l'] ,
u'饣' : [u's'] ,
u'狪' : [u't'] ,
u'迬' : [u'w'] ,
u'慳' : [u'q'] ,
u'嫺' : [u'x'] ,
u'韼' : [u'p'] ,
u'莉' : [u'c', u'l'] ,
u'簌' : [u's'] ,
u'墏' : [u'q'] ,
u'㔒' : [u'x'] ,
u'輖' : [u'z'] ,
u'殙' : [u'h'] ,
u'霦' : [u'b'] ,
u'玩' : [u'w'] ,
u'芳' : [u'f'] ,
u'缶' : [u'g', u'f'] ,
u'容' : [u'r'] ,
u'㐼' : [u'c', u'z'] ,
u'蹀' : [u'd'] ,
u'櫃' : [u'g'] ,
u'限' : [u'x', u'w'] ,
u'狓' : [u'p'] ,
u'藝' : [u'y'] ,
u'繠' : [u'r'] ,
u'嫣' : [u'y'] ,
u'㝦' : [u'y', u'j'] ,
u'淭' : [u'q'] ,
u'䙰' : [u'l'] ,
u'饺' : [u'j'] ,
u'痽' : [u'd'] ,
u'企' : [u'q'] ,
u'鸋' : [u'n'] ,
u'憊' : [u'b'] ,
u'圑' : [u'f'] ,
u'㚐' : [u't'] ,
u'昛' : [u'j'] ,
u'䦚' : [u'h', u'k'] ,
u'颤' : [u'c', u'z', u's'] ,
u'丫' : [u'y'] ,
u'冪' : [u'm'] ,
u'脵' : [u'g'] ,
u'悴' : [u'c'] ,
u'嘻' : [u'x'] ,
u'㦺' : [u'r'] ,
u'楅' : [u'b'] ,
u'䣄' : [u't'] ,
u'煕' : [u'x'] ,
u'僔' : [u'z'] ,
u'聟' : [u'x'] ,
u'揞' : [u'a'] ,
u'奥' : [u'a'] ,
u'㣤' : [u'j'] ,
u'桯' : [u'x', u't'] ,
u'䯮' : [u'n'] ,
u'灿' : [u'c'] ,
u'挈' : [u'q'] ,
u'律' : [u'l'] ,
u'鲍' : [u'b'] ,
u'蠚' : [u'h', u'r'] ,
u'撝' : [u'h'] ,
u'匨' : [u'c', u'z'] ,
u'逪' : [u'c'] ,
u'覯' : [u'g'] ,
u'砺' : [u'l'] ,
u'咽' : [u'y'] ,
u'醿' : [u'm'] ,
u'䁊' : [u'w'] ,
u'㳍' : [u'b'] ,
u'鵌' : [u'y', u't'] ,
u'秏' : [u'h'] ,
u'敜' : [u'n'] ,
u'䇟' : [u'j'] ,
u'黡' : [u'y'] ,
u'䵬' : [u't'] ,
u'詮' : [u'q'] ,
u'曱' : [u'y'] ,
u'啼' : [u't'] ,
u'鉾' : [u'm'] ,
u'䠅' : [u'k', u't'] ,
u'蔇' : [u'x', u'j'] ,
u'㶌' : [u'l'] ,
u'窎' : [u'd'] ,
u'倕' : [u'c'] ,
u'洗' : [u'x'] ,
u'䊞' : [u'z'] ,
u'龠' : [u'y'] ,
u'㠥' : [u'l'] ,
u'甧' : [u's'] ,
u'枰' : [u'p'] ,
u'崷' : [u'q'] ,
u'騹' : [u'q', u'l'] ,
u'俀' : [u't'] ,
u'賂' : [u'l'] ,
u'扉' : [u'f'] ,
u'埐' : [u'j'] ,
u'铒' : [u'e'] ,
u'䩙' : [u'x'] ,
u'蝛' : [u'w'] ,
u'糢' : [u'm'] ,
u'剩' : [u's'] ,
u'潫' : [u'w'] ,
u'䓲' : [u's', u'r'] ,
u'致' : [u'z'] ,
u'㩹' : [u'z', u'd'] ,
u'睻' : [u'x'] ,
u'醁' : [u'l'] ,
u'瘄' : [u'c'] ,
u'媇' : [u'q'] ,
u'㼊' : [u't'] ,
u'覑' : [u'p'] ,
u'渔' : [u'y'] ,
u'鴞' : [u'x'] ,
u'憡' : [u'c'] ,
u'䘤' : [u's'] ,
u'邫' : [u'b'] ,
u'甮' : [u'f'] ,
u'妱' : [u'z'] ,
u'袻' : [u'e'] ,
u'派' : [u'p', u'b', u'm'] ,
u'鱈' : [u'x'] ,
u'惋' : [u'w'] ,
u'䕎' : [u'f'] ,
u'瑘' : [u'y'] ,
u'壛' : [u'y'] ,
u'㵞' : [u'c'] ,
u'蟥' : [u'h'] ,
u'汨' : [u'm'] ,
u'魲' : [u'l'] ,
u'翵' : [u'q'] ,
u'鐃' : [u'n'] ,
u'玂' : [u'q'] ,
u'崉' : [u't'] ,
u'㲈' : [u's'] ,
u'谓' : [u'w'] ,
u'殒' : [u'y'] ,
u'㔙' : [u'p', u'b'] ,
u'骜' : [u'a'] ,
u'搣' : [u'm'] ,
u'䎢' : [u'q'] ,
u'錭' : [u't', u'd'] ,
u'犬' : [u'q'] ,
u'尳' : [u'g'] ,
u'謽' : [u'j'] ,
u'檼' : [u'y'] ,
u'㑃' : [u'a'] ,
u'駆' : [u'q'] ,
u'捍' : [u'h'] ,
u'䋌' : [u'j'] ,
u'鉗' : [u'q', u'a'] ,
u'燖' : [u'q', u'x'] ,
u'孝' : [u'x'] ,
u'㫜' : [u'd', u't'] ,
u'詧' : [u'c'] ,
u'槦' : [u'y'] ,
u'飰' : [u'f'] ,
u'扷' : [u'a'] ,
u'䇶' : [u'j'] ,
u'焀' : [u'h'] ,
u'喃' : [u'n'] ,
u'隅' : [u'y'] ,
u'椐' : [u'j'] ,
u'䶓' : [u'q', u'j'] ,
u'躕' : [u'c'] ,
u'䄠' : [u's'] ,
u'舢' : [u's'] ,
u'暥' : [u'y'] ,
u'㤰' : [u'z'] ,
u'稲' : [u'd'] ,
u'庵' : [u'a'] ,
u'获' : [u'h'] ,
u'剂' : [u'j'] ,
u'㛅' : [u'e'] ,
u'靄' : [u'a'] ,
u'篇' : [u'p'] ,
u'䩒' : [u'y'] ,
u'轔' : [u'l'] ,
u'受' : [u's'] ,
u'铙' : [u'n'] ,
u'䯧' : [u'q'] ,
u'賩' : [u'c'] ,
u'彴' : [u'z'] ,
u'聶' : [u'y', u's', u'z', u'n'] ,
u'擹' : [u't'] ,
u'碆' : [u'b'] ,
u'䈍' : [u'm'] ,
u'蜏' : [u'y'] ,
u'傖' : [u'c'] ,
u'閘' : [u'y', |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.